Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -34976,6 +34976,32 @@ return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi); } +/// Convert vector increment or decrement to sub/add with an all-ones constant: +/// add X, <1, 1...> --> sub X, <-1, -1...> +/// sub X, <1, 1...> --> add X, <-1, -1...> +/// The all-ones vector constant can be materialized using a pcmpeq instruction +/// that is commonly recognized as an idiom (has no register dependency and/or +/// has no latency), so that's better than loading a splat 1 constant. +static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) { + assert(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB && + "Unexpected opcode for increment/decrement transform"); + + // Pseudo-legality check: getOnesVector() expects one of these types, so bail + // out and wait for legalization if we have an unsupported vector length. + EVT VT = N->getValueType(0); + if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector()) + return SDValue(); + + SDNode *N1 = N->getOperand(1).getNode(); + APInt SplatVal; + if (!ISD::isConstantSplatVector(N1, SplatVal) || SplatVal != 1) + return SDValue(); + + SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N)); + unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD; + return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec); +} + static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, const X86Subtarget &Subtarget) { const SDNodeFlags Flags = N->getFlags(); @@ -34995,6 +35021,9 @@ isHorizontalBinOp(Op0, Op1, true)) return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1); + if (SDValue V = combineIncDecVector(N, DAG)) + return V; + return combineAddOrSubToADCOrSBB(N, DAG); } @@ -35028,6 +35057,9 @@ isHorizontalBinOp(Op0, Op1, false)) return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1); + if (SDValue V = combineIncDecVector(N, DAG)) + return V; + return combineAddOrSubToADCOrSBB(N, DAG); } Index: test/CodeGen/X86/avg.ll =================================================================== --- test/CodeGen/X86/avg.ll +++ test/CodeGen/X86/avg.ll @@ -184,15 +184,15 @@ ; SSE2-NEXT: paddd %xmm10, %xmm7 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: paddd %xmm8, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm9 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm5 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: paddd %xmm4, %xmm7 -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: psubd %xmm4, %xmm9 +; SSE2-NEXT: psubd %xmm4, %xmm2 +; SSE2-NEXT: psubd %xmm4, %xmm5 +; SSE2-NEXT: psubd %xmm4, %xmm0 +; SSE2-NEXT: psubd %xmm4, %xmm6 +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: psubd %xmm4, %xmm7 +; SSE2-NEXT: psubd %xmm4, %xmm1 ; SSE2-NEXT: psrld $1, %xmm1 ; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm3 @@ -361,28 +361,28 @@ ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3] ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1],xmm7[2],xmm0[2],xmm7[3],xmm0[3] ; SSE2-NEXT: paddd %xmm2, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: psubd %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: paddd %xmm0, %xmm10 +; SSE2-NEXT: psubd %xmm0, %xmm10 ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: psubd %xmm0, %xmm1 ; SSE2-NEXT: movdqa %xmm1, -{{[0-9]+}}(%rsp) # 16-byte Spill ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm0, %xmm12 -; SSE2-NEXT: paddd %xmm0, %xmm4 -; SSE2-NEXT: paddd %xmm0, %xmm15 -; SSE2-NEXT: paddd %xmm0, %xmm13 -; SSE2-NEXT: paddd %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm0, %xmm6 -; SSE2-NEXT: paddd %xmm0, %xmm14 -; SSE2-NEXT: paddd %xmm0, %xmm11 -; SSE2-NEXT: paddd %xmm0, %xmm8 -; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: paddd %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm7 +; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm12 +; SSE2-NEXT: psubd %xmm0, %xmm4 +; SSE2-NEXT: psubd %xmm0, %xmm15 +; SSE2-NEXT: psubd %xmm0, %xmm13 +; SSE2-NEXT: psubd %xmm0, %xmm9 +; SSE2-NEXT: psubd %xmm0, %xmm6 +; SSE2-NEXT: psubd %xmm0, %xmm14 +; SSE2-NEXT: psubd %xmm0, %xmm11 +; SSE2-NEXT: psubd %xmm0, %xmm8 +; SSE2-NEXT: psubd %xmm0, %xmm3 +; SSE2-NEXT: psubd %xmm0, %xmm5 +; SSE2-NEXT: psubd %xmm0, %xmm7 ; SSE2-NEXT: psrld $1, %xmm10 ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload ; SSE2-NEXT: psrld $1, %xmm1 @@ -463,15 +463,15 @@ ; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm6 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm7 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8 -; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9 -; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm10 -; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3 -; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4 -; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5 -; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm1 -; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8 +; AVX2-NEXT: vpsubd %ymm8, %ymm0, %ymm9 +; AVX2-NEXT: vpsubd %ymm8, %ymm1, %ymm10 +; AVX2-NEXT: vpsubd %ymm8, %ymm2, %ymm2 +; AVX2-NEXT: vpsubd %ymm8, %ymm3, %ymm3 +; AVX2-NEXT: vpsubd %ymm8, %ymm4, %ymm4 +; AVX2-NEXT: vpsubd %ymm8, %ymm5, %ymm5 +; AVX2-NEXT: vpsubd %ymm8, %ymm6, %ymm1 +; AVX2-NEXT: vpsubd %ymm8, %ymm7, %ymm0 ; AVX2-NEXT: vpsrld $1, %ymm0, %ymm11 ; AVX2-NEXT: vpsrld $1, %ymm1, %ymm12 ; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5 @@ -531,11 +531,11 @@ ; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero ; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3 -; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4 -; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0 -; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1 -; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2 -; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3 +; AVX512F-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512F-NEXT: vpsubd %zmm4, %zmm0, %zmm0 +; AVX512F-NEXT: vpsubd %zmm4, %zmm1, %zmm1 +; AVX512F-NEXT: vpsubd %zmm4, %zmm2, %zmm2 +; AVX512F-NEXT: vpsubd %zmm4, %zmm3, %zmm3 ; AVX512F-NEXT: vpsrld $1, %zmm3, %zmm3 ; AVX512F-NEXT: vpsrld $1, %zmm2, %zmm2 ; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1 @@ -678,11 +678,11 @@ ; SSE2-NEXT: paddd %xmm7, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] ; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: psubd %xmm4, %xmm0 +; SSE2-NEXT: psubd %xmm4, %xmm2 +; SSE2-NEXT: psubd %xmm4, %xmm1 ; SSE2-NEXT: psrld $1, %xmm1 ; SSE2-NEXT: psrld $1, %xmm2 ; SSE2-NEXT: psrld $1, %xmm0 @@ -780,15 +780,15 @@ ; SSE2-NEXT: paddd %xmm13, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] ; SSE2-NEXT: paddd %xmm8, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: paddd %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm0, %xmm6 -; SSE2-NEXT: paddd %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm0, %xmm4 -; SSE2-NEXT: paddd %xmm0, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: psubd %xmm0, %xmm7 +; SSE2-NEXT: psubd %xmm0, %xmm9 +; SSE2-NEXT: psubd %xmm0, %xmm6 +; SSE2-NEXT: psubd %xmm0, %xmm1 +; SSE2-NEXT: psubd %xmm0, %xmm5 +; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm4 +; SSE2-NEXT: psubd %xmm0, %xmm3 ; SSE2-NEXT: psrld $1, %xmm3 ; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: psrld $1, %xmm2 @@ -837,11 +837,11 @@ ; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4 -; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 -; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 -; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 +; AVX2-NEXT: vpsubd %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpsubd %ymm4, %ymm1, %ymm1 +; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpsubd %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3 ; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2 ; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1 @@ -870,9 +870,9 @@ ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 -; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2 -; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 +; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512F-NEXT: vpsubd %zmm2, %zmm0, %zmm0 +; AVX512F-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, (%rax) @@ -1079,15 +1079,15 @@ ; SSE2-NEXT: paddd %xmm10, %xmm7 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3] ; SSE2-NEXT: paddd %xmm8, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm9 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm5 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm6 -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: paddd %xmm4, %xmm7 -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: psubd %xmm4, %xmm9 +; SSE2-NEXT: psubd %xmm4, %xmm2 +; SSE2-NEXT: psubd %xmm4, %xmm5 +; SSE2-NEXT: psubd %xmm4, %xmm0 +; SSE2-NEXT: psubd %xmm4, %xmm6 +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: psubd %xmm4, %xmm7 +; SSE2-NEXT: psubd %xmm4, %xmm1 ; SSE2-NEXT: psrld $1, %xmm1 ; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm3 @@ -1213,25 +1213,25 @@ ; SSE2-NEXT: paddd %xmm8, %xmm8 ; SSE2-NEXT: paddd %xmm7, %xmm7 ; SSE2-NEXT: paddd %xmm15, %xmm15 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm0, %xmm15 -; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: paddd %xmm0, %xmm8 -; SSE2-NEXT: paddd %xmm0, %xmm14 -; SSE2-NEXT: paddd %xmm0, %xmm13 -; SSE2-NEXT: paddd %xmm0, %xmm6 -; SSE2-NEXT: paddd %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm0, %xmm12 -; SSE2-NEXT: paddd %xmm0, %xmm11 -; SSE2-NEXT: paddd %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm10 -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm0, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: psubd %xmm0, %xmm15 +; SSE2-NEXT: psubd %xmm0, %xmm7 +; SSE2-NEXT: psubd %xmm0, %xmm8 +; SSE2-NEXT: psubd %xmm0, %xmm14 +; SSE2-NEXT: psubd %xmm0, %xmm13 +; SSE2-NEXT: psubd %xmm0, %xmm6 +; SSE2-NEXT: psubd %xmm0, %xmm9 +; SSE2-NEXT: psubd %xmm0, %xmm12 +; SSE2-NEXT: psubd %xmm0, %xmm11 +; SSE2-NEXT: psubd %xmm0, %xmm5 +; SSE2-NEXT: psubd %xmm0, %xmm10 +; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm3 ; SSE2-NEXT: movdqa %xmm3, -{{[0-9]+}}(%rsp) # 16-byte Spill -; SSE2-NEXT: paddd %xmm0, %xmm4 +; SSE2-NEXT: psubd %xmm0, %xmm4 ; SSE2-NEXT: movdqa -{{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload -; SSE2-NEXT: paddd %xmm0, %xmm3 -; SSE2-NEXT: paddd %xmm0, %xmm1 +; SSE2-NEXT: psubd %xmm0, %xmm3 +; SSE2-NEXT: psubd %xmm0, %xmm1 ; SSE2-NEXT: psrld $1, %xmm7 ; SSE2-NEXT: psrld $1, %xmm15 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] @@ -1303,15 +1303,15 @@ ; AVX2-NEXT: vpaddd %ymm2, %ymm2, %ymm2 ; AVX2-NEXT: vpaddd %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm8 -; AVX2-NEXT: vpaddd %ymm8, %ymm0, %ymm9 -; AVX2-NEXT: vpaddd %ymm8, %ymm1, %ymm10 -; AVX2-NEXT: vpaddd %ymm8, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm8, %ymm3, %ymm3 -; AVX2-NEXT: vpaddd %ymm8, %ymm4, %ymm4 -; AVX2-NEXT: vpaddd %ymm8, %ymm5, %ymm5 -; AVX2-NEXT: vpaddd %ymm8, %ymm6, %ymm1 -; AVX2-NEXT: vpaddd %ymm8, %ymm7, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm8, %ymm8, %ymm8 +; AVX2-NEXT: vpsubd %ymm8, %ymm0, %ymm9 +; AVX2-NEXT: vpsubd %ymm8, %ymm1, %ymm10 +; AVX2-NEXT: vpsubd %ymm8, %ymm2, %ymm2 +; AVX2-NEXT: vpsubd %ymm8, %ymm3, %ymm3 +; AVX2-NEXT: vpsubd %ymm8, %ymm4, %ymm4 +; AVX2-NEXT: vpsubd %ymm8, %ymm5, %ymm5 +; AVX2-NEXT: vpsubd %ymm8, %ymm6, %ymm1 +; AVX2-NEXT: vpsubd %ymm8, %ymm7, %ymm0 ; AVX2-NEXT: vpsrld $1, %ymm0, %ymm11 ; AVX2-NEXT: vpsrld $1, %ymm1, %ymm12 ; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5 @@ -1367,11 +1367,11 @@ ; AVX512F-NEXT: vpaddd %zmm2, %zmm2, %zmm2 ; AVX512F-NEXT: vpaddd %zmm1, %zmm1, %zmm1 ; AVX512F-NEXT: vpaddd %zmm0, %zmm0, %zmm0 -; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm4 -; AVX512F-NEXT: vpaddd %zmm4, %zmm0, %zmm0 -; AVX512F-NEXT: vpaddd %zmm4, %zmm1, %zmm1 -; AVX512F-NEXT: vpaddd %zmm4, %zmm2, %zmm2 -; AVX512F-NEXT: vpaddd %zmm4, %zmm3, %zmm3 +; AVX512F-NEXT: vpternlogd $255, %zmm4, %zmm4, %zmm4 +; AVX512F-NEXT: vpsubd %zmm4, %zmm0, %zmm0 +; AVX512F-NEXT: vpsubd %zmm4, %zmm1, %zmm1 +; AVX512F-NEXT: vpsubd %zmm4, %zmm2, %zmm2 +; AVX512F-NEXT: vpsubd %zmm4, %zmm3, %zmm3 ; AVX512F-NEXT: vpsrld $1, %zmm3, %zmm3 ; AVX512F-NEXT: vpsrld $1, %zmm2, %zmm2 ; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1 @@ -1515,11 +1515,11 @@ ; SSE2-NEXT: paddd %xmm7, %xmm2 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3] ; SSE2-NEXT: paddd %xmm4, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm4, %xmm3 -; SSE2-NEXT: paddd %xmm4, %xmm0 -; SSE2-NEXT: paddd %xmm4, %xmm2 -; SSE2-NEXT: paddd %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: psubd %xmm4, %xmm3 +; SSE2-NEXT: psubd %xmm4, %xmm0 +; SSE2-NEXT: psubd %xmm4, %xmm2 +; SSE2-NEXT: psubd %xmm4, %xmm1 ; SSE2-NEXT: psrld $1, %xmm1 ; SSE2-NEXT: psrld $1, %xmm2 ; SSE2-NEXT: psrld $1, %xmm0 @@ -1617,15 +1617,15 @@ ; SSE2-NEXT: paddd %xmm13, %xmm4 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] ; SSE2-NEXT: paddd %xmm8, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1] -; SSE2-NEXT: paddd %xmm0, %xmm7 -; SSE2-NEXT: paddd %xmm0, %xmm9 -; SSE2-NEXT: paddd %xmm0, %xmm6 -; SSE2-NEXT: paddd %xmm0, %xmm1 -; SSE2-NEXT: paddd %xmm0, %xmm5 -; SSE2-NEXT: paddd %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm0, %xmm4 -; SSE2-NEXT: paddd %xmm0, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: psubd %xmm0, %xmm7 +; SSE2-NEXT: psubd %xmm0, %xmm9 +; SSE2-NEXT: psubd %xmm0, %xmm6 +; SSE2-NEXT: psubd %xmm0, %xmm1 +; SSE2-NEXT: psubd %xmm0, %xmm5 +; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm4 +; SSE2-NEXT: psubd %xmm0, %xmm3 ; SSE2-NEXT: psrld $1, %xmm3 ; SSE2-NEXT: psrld $1, %xmm4 ; SSE2-NEXT: psrld $1, %xmm2 @@ -1674,11 +1674,11 @@ ; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 ; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm4 -; AVX2-NEXT: vpaddd %ymm4, %ymm0, %ymm0 -; AVX2-NEXT: vpaddd %ymm4, %ymm1, %ymm1 -; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm4, %ymm3, %ymm3 +; AVX2-NEXT: vpcmpeqd %ymm4, %ymm4, %ymm4 +; AVX2-NEXT: vpsubd %ymm4, %ymm0, %ymm0 +; AVX2-NEXT: vpsubd %ymm4, %ymm1, %ymm1 +; AVX2-NEXT: vpsubd %ymm4, %ymm2, %ymm2 +; AVX2-NEXT: vpsubd %ymm4, %ymm3, %ymm3 ; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3 ; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2 ; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1 @@ -1707,9 +1707,9 @@ ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero ; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 -; AVX512F-NEXT: vpbroadcastd {{.*}}(%rip), %zmm2 -; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0 -; AVX512F-NEXT: vpaddd %zmm2, %zmm1, %zmm1 +; AVX512F-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512F-NEXT: vpsubd %zmm2, %zmm0, %zmm0 +; AVX512F-NEXT: vpsubd %zmm2, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrld $1, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrld $1, %zmm0, %zmm0 ; AVX512F-NEXT: vpmovdw %zmm0, (%rax) Index: test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll @@ -388,7 +388,8 @@ ; CHECK-LABEL: test_x86_sse2_storeu_dq: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vpaddb LCPI34_0, %xmm0, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vmovdqu %xmm0, (%eax) ; CHECK-NEXT: retl %a2 = add <16 x i8> %a1, @@ -434,9 +435,9 @@ ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; CHECK-NEXT: vpaddb %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; CHECK-NEXT: vpsubb %xmm2, %xmm0, %xmm0 ; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; CHECK-NEXT: vmovups %ymm0, (%eax) ; CHECK-NEXT: vzeroupper Index: test/CodeGen/X86/avx-intrinsics-x86.ll =================================================================== --- test/CodeGen/X86/avx-intrinsics-x86.ll +++ test/CodeGen/X86/avx-intrinsics-x86.ll @@ -930,8 +930,8 @@ ; AVX-LABEL: movnt_dq: ; AVX: ## BB#0: ; AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A] -; AVX-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] +; AVX-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## encoding: [0xc5,0xf9,0xfb,0xc1] ; AVX-NEXT: vmovntdq %ymm0, (%eax) ## encoding: [0xc5,0xfd,0xe7,0x00] ; AVX-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; AVX-NEXT: retl ## encoding: [0xc3] @@ -939,8 +939,8 @@ ; AVX512VL-LABEL: movnt_dq: ; AVX512VL: ## BB#0: ; AVX512VL-NEXT: movl {{[0-9]+}}(%esp), %eax ## encoding: [0x8b,0x44,0x24,0x04] -; AVX512VL-NEXT: vpaddq LCPI65_0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x05,A,A,A,A] -; AVX512VL-NEXT: ## fixup A - offset: 4, value: LCPI65_0, kind: FK_Data_4 +; AVX512VL-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ## encoding: [0xc5,0xf1,0x76,0xc9] +; AVX512VL-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] ; AVX512VL-NEXT: vmovntdq %ymm0, (%eax) ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe7,0x00] ; AVX512VL-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77] ; AVX512VL-NEXT: retl ## encoding: [0xc3] Index: test/CodeGen/X86/avx-logic.ll =================================================================== --- test/CodeGen/X86/avx-logic.ll +++ test/CodeGen/X86/avx-logic.ll @@ -247,7 +247,8 @@ define <2 x i64> @vpandn(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp { ; CHECK-LABEL: vpandn: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm1 +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm1 ; CHECK-NEXT: vpandn %xmm0, %xmm1, %xmm0 ; CHECK-NEXT: retq entry: @@ -261,7 +262,8 @@ define <2 x i64> @vpand(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone ssp { ; CHECK-LABEL: vpand: ; CHECK: # BB#0: # %entry -; CHECK-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpsubq %xmm2, %xmm0, %xmm0 ; CHECK-NEXT: vpand %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq entry: Index: test/CodeGen/X86/avx-vperm2x128.ll =================================================================== --- test/CodeGen/X86/avx-vperm2x128.ll +++ test/CodeGen/X86/avx-vperm2x128.ll @@ -97,14 +97,16 @@ ; AVX1-LABEL: shuffle_v32i8_2323_domain: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v32i8_2323_domain: ; AVX2: ## BB#0: ## %entry -; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX2-NEXT: retq entry: @@ -127,14 +129,15 @@ define <4 x i64> @shuffle_v4i64_6701_domain(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v4i64_6701_domain: ; AVX1: ## BB#0: ## %entry -; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v4i64_6701_domain: ; AVX2: ## BB#0: ## %entry -; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1] ; AVX2-NEXT: retq entry: @@ -148,15 +151,16 @@ ; AVX1-LABEL: shuffle_v8i32_u5u7cdef: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v8i32_u5u7cdef: ; AVX2: ## BB#0: ## %entry -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] ; AVX2-NEXT: retq entry: @@ -169,13 +173,15 @@ define <16 x i16> @shuffle_v16i16_4501(<16 x i16> %a, <16 x i16> %b) nounwind uwtable readnone ssp { ; AVX1-LABEL: shuffle_v16i16_4501: ; AVX1: ## BB#0: ## %entry -; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501: ; AVX2: ## BB#0: ## %entry -; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpsubw %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq entry: @@ -189,14 +195,16 @@ ; AVX1-LABEL: shuffle_v16i16_4501_mem: ; AVX1: ## BB#0: ## %entry ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 -; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1] ; AVX1-NEXT: retq ; ; AVX2-LABEL: shuffle_v16i16_4501_mem: ; AVX2: ## BB#0: ## %entry ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpsubw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1] ; AVX2-NEXT: retq entry: Index: test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/avx2-intrinsics-x86-upgrade.ll @@ -382,7 +382,8 @@ ; CHECK-LABEL: test_x86_avx_storeu_dq_256: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: vpaddb LCPI34_0, %ymm0, %ymm0 +; CHECK-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vmovdqu %ymm0, (%eax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retl Index: test/CodeGen/X86/avx2-logic.ll =================================================================== --- test/CodeGen/X86/avx2-logic.ll +++ test/CodeGen/X86/avx2-logic.ll @@ -5,14 +5,15 @@ define <4 x i64> @vpandn(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; X32-LABEL: vpandn: ; X32: ## BB#0: ## %entry -; X32-NEXT: vpaddq LCPI0_0, %ymm0, %ymm1 +; X32-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X32-NEXT: vpsubq %ymm1, %ymm0, %ymm1 ; X32-NEXT: vpandn %ymm0, %ymm1, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: vpandn: ; X64: ## BB#0: ## %entry -; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 -; X64-NEXT: vpaddq %ymm1, %ymm0, %ymm1 +; X64-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X64-NEXT: vpsubq %ymm1, %ymm0, %ymm1 ; X64-NEXT: vpandn %ymm0, %ymm1, %ymm0 ; X64-NEXT: retq entry: @@ -26,14 +27,15 @@ define <4 x i64> @vpand(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; X32-LABEL: vpand: ; X32: ## BB#0: ## %entry -; X32-NEXT: vpaddq LCPI1_0, %ymm0, %ymm0 +; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpand %ymm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: vpand: ; X64: ## BB#0: ## %entry -; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X64-NEXT: vpand %ymm1, %ymm0, %ymm0 ; X64-NEXT: retq entry: @@ -46,14 +48,15 @@ define <4 x i64> @vpor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; X32-LABEL: vpor: ; X32: ## BB#0: ## %entry -; X32-NEXT: vpaddq LCPI2_0, %ymm0, %ymm0 +; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpor %ymm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: vpor: ; X64: ## BB#0: ## %entry -; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X64-NEXT: vpor %ymm1, %ymm0, %ymm0 ; X64-NEXT: retq entry: @@ -66,14 +69,15 @@ define <4 x i64> @vpxor(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp { ; X32-LABEL: vpxor: ; X32: ## BB#0: ## %entry -; X32-NEXT: vpaddq LCPI3_0, %ymm0, %ymm0 +; X32-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X32-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X32-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: vpxor: ; X64: ## BB#0: ## %entry -; X64-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; X64-NEXT: vpaddq %ymm2, %ymm0, %ymm0 +; X64-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X64-NEXT: vpsubq %ymm2, %ymm0, %ymm0 ; X64-NEXT: vpxor %ymm1, %ymm0, %ymm0 ; X64-NEXT: retq entry: Index: test/CodeGen/X86/select.ll =================================================================== --- test/CodeGen/X86/select.ll +++ test/CodeGen/X86/select.ll @@ -321,8 +321,9 @@ ; GENERIC-NEXT: LBB7_6: ; GENERIC-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; GENERIC-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm1 -; GENERIC-NEXT: psubd {{.*}}(%rip), %xmm0 +; GENERIC-NEXT: pcmpeqd %xmm2, %xmm2 +; GENERIC-NEXT: paddd %xmm2, %xmm1 +; GENERIC-NEXT: paddd %xmm2, %xmm0 ; GENERIC-NEXT: movq %xmm0, 16(%rsi) ; GENERIC-NEXT: movdqa %xmm1, (%rsi) ; GENERIC-NEXT: retq @@ -361,8 +362,9 @@ ; ATOM-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] ; ATOM-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; ATOM-NEXT: LBB7_6: -; ATOM-NEXT: psubd {{.*}}(%rip), %xmm0 -; ATOM-NEXT: psubd {{.*}}(%rip), %xmm1 +; ATOM-NEXT: pcmpeqd %xmm2, %xmm2 +; ATOM-NEXT: paddd %xmm2, %xmm0 +; ATOM-NEXT: paddd %xmm2, %xmm1 ; ATOM-NEXT: movq %xmm0, 16(%rsi) ; ATOM-NEXT: movdqa %xmm1, (%rsi) ; ATOM-NEXT: retq Index: test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll =================================================================== --- test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll +++ test/CodeGen/X86/sse2-intrinsics-x86-upgrade.ll @@ -83,7 +83,8 @@ ; CHECK-LABEL: test_x86_sse2_storeu_dq: ; CHECK: ## BB#0: ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax -; CHECK-NEXT: paddb LCPI7_0, %xmm0 +; CHECK-NEXT: pcmpeqd %xmm1, %xmm1 +; CHECK-NEXT: psubb %xmm1, %xmm0 ; CHECK-NEXT: movdqu %xmm0, (%eax) ; CHECK-NEXT: retl %a2 = add <16 x i8> %a1, Index: test/CodeGen/X86/vec_ctbits.ll =================================================================== --- test/CodeGen/X86/vec_ctbits.ll +++ test/CodeGen/X86/vec_ctbits.ll @@ -12,20 +12,21 @@ ; CHECK-NEXT: pxor %xmm2, %xmm2 ; CHECK-NEXT: psubq %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm0, %xmm2 -; CHECK-NEXT: psubq {{.*}}(%rip), %xmm2 -; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: pcmpeqd %xmm3, %xmm3 +; CHECK-NEXT: paddq %xmm2, %xmm3 +; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: psrlq $1, %xmm0 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0 -; CHECK-NEXT: psubq %xmm0, %xmm2 +; CHECK-NEXT: psubq %xmm0, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; CHECK-NEXT: movdqa %xmm2, %xmm3 -; CHECK-NEXT: pand %xmm0, %xmm3 -; CHECK-NEXT: psrlq $2, %xmm2 +; CHECK-NEXT: movdqa %xmm3, %xmm2 ; CHECK-NEXT: pand %xmm0, %xmm2 -; CHECK-NEXT: paddq %xmm3, %xmm2 -; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: psrlq $2, %xmm3 +; CHECK-NEXT: pand %xmm0, %xmm3 +; CHECK-NEXT: paddq %xmm2, %xmm3 +; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: psrlq $4, %xmm0 -; CHECK-NEXT: paddq %xmm2, %xmm0 +; CHECK-NEXT: paddq %xmm3, %xmm0 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0 ; CHECK-NEXT: psadbw %xmm1, %xmm0 ; CHECK-NEXT: retq @@ -115,20 +116,21 @@ ; CHECK-NEXT: pxor %xmm2, %xmm2 ; CHECK-NEXT: psubq %xmm0, %xmm2 ; CHECK-NEXT: pand %xmm0, %xmm2 -; CHECK-NEXT: psubq {{.*}}(%rip), %xmm2 -; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: pcmpeqd %xmm3, %xmm3 +; CHECK-NEXT: paddq %xmm2, %xmm3 +; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: psrlq $1, %xmm0 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0 -; CHECK-NEXT: psubq %xmm0, %xmm2 +; CHECK-NEXT: psubq %xmm0, %xmm3 ; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; CHECK-NEXT: movdqa %xmm2, %xmm3 -; CHECK-NEXT: pand %xmm0, %xmm3 -; CHECK-NEXT: psrlq $2, %xmm2 +; CHECK-NEXT: movdqa %xmm3, %xmm2 ; CHECK-NEXT: pand %xmm0, %xmm2 -; CHECK-NEXT: paddq %xmm3, %xmm2 -; CHECK-NEXT: movdqa %xmm2, %xmm0 +; CHECK-NEXT: psrlq $2, %xmm3 +; CHECK-NEXT: pand %xmm0, %xmm3 +; CHECK-NEXT: paddq %xmm2, %xmm3 +; CHECK-NEXT: movdqa %xmm3, %xmm0 ; CHECK-NEXT: psrlq $4, %xmm0 -; CHECK-NEXT: paddq %xmm2, %xmm0 +; CHECK-NEXT: paddq %xmm3, %xmm0 ; CHECK-NEXT: pand {{.*}}(%rip), %xmm0 ; CHECK-NEXT: psadbw %xmm1, %xmm0 ; CHECK-NEXT: retq Index: test/CodeGen/X86/vector-tzcnt-128.ll =================================================================== --- test/CodeGen/X86/vector-tzcnt-128.ll +++ test/CodeGen/X86/vector-tzcnt-128.ll @@ -19,20 +19,21 @@ ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: psubq %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: paddq %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrlq $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubq %xmm0, %xmm2 +; SSE2-NEXT: psubq %xmm0, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: psrlq $2, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: paddq %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrlq $2, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: paddq %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrlq $4, %xmm0 -; SSE2-NEXT: paddq %xmm2, %xmm0 +; SSE2-NEXT: paddq %xmm3, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: psadbw %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -43,20 +44,21 @@ ; SSE3-NEXT: pxor %xmm2, %xmm2 ; SSE3-NEXT: psubq %xmm0, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE3-NEXT: paddq %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrlq $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubq %xmm0, %xmm2 +; SSE3-NEXT: psubq %xmm0, %xmm3 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; SSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE3-NEXT: pand %xmm0, %xmm3 -; SSE3-NEXT: psrlq $2, %xmm2 +; SSE3-NEXT: movdqa %xmm3, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: paddq %xmm3, %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: psrlq $2, %xmm3 +; SSE3-NEXT: pand %xmm0, %xmm3 +; SSE3-NEXT: paddq %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrlq $4, %xmm0 -; SSE3-NEXT: paddq %xmm2, %xmm0 +; SSE3-NEXT: paddq %xmm3, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: psadbw %xmm1, %xmm0 ; SSE3-NEXT: retq @@ -67,16 +69,17 @@ ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: psubq %xmm0, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: paddq %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: pshufb %xmm4, %xmm5 -; SSSE3-NEXT: psrlw $4, %xmm2 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pshufb %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm3 +; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pshufb %xmm3, %xmm0 ; SSSE3-NEXT: paddb %xmm5, %xmm0 ; SSSE3-NEXT: psadbw %xmm1, %xmm0 ; SSSE3-NEXT: retq @@ -87,16 +90,17 @@ ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: psubq %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: paddq %xmm2, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pand %xmm2, %xmm4 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pshufb %xmm4, %xmm5 -; SSE41-NEXT: psrlw $4, %xmm2 -; SSE41-NEXT: pand %xmm3, %xmm2 -; SSE41-NEXT: pshufb %xmm2, %xmm0 +; SSE41-NEXT: psrlw $4, %xmm3 +; SSE41-NEXT: pand %xmm2, %xmm3 +; SSE41-NEXT: pshufb %xmm3, %xmm0 ; SSE41-NEXT: paddb %xmm5, %xmm0 ; SSE41-NEXT: psadbw %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -106,7 +110,8 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubq %xmm0, %xmm1, %xmm2 ; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -123,7 +128,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper @@ -159,20 +165,21 @@ ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: psubq %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: paddq %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrlq $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubq %xmm0, %xmm2 +; SSE2-NEXT: psubq %xmm0, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: psrlq $2, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: paddq %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrlq $2, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: paddq %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrlq $4, %xmm0 -; SSE2-NEXT: paddq %xmm2, %xmm0 +; SSE2-NEXT: paddq %xmm3, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: psadbw %xmm1, %xmm0 ; SSE2-NEXT: retq @@ -183,20 +190,21 @@ ; SSE3-NEXT: pxor %xmm2, %xmm2 ; SSE3-NEXT: psubq %xmm0, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE3-NEXT: paddq %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrlq $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubq %xmm0, %xmm2 +; SSE3-NEXT: psubq %xmm0, %xmm3 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [3689348814741910323,3689348814741910323] -; SSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE3-NEXT: pand %xmm0, %xmm3 -; SSE3-NEXT: psrlq $2, %xmm2 +; SSE3-NEXT: movdqa %xmm3, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: paddq %xmm3, %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: psrlq $2, %xmm3 +; SSE3-NEXT: pand %xmm0, %xmm3 +; SSE3-NEXT: paddq %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrlq $4, %xmm0 -; SSE3-NEXT: paddq %xmm2, %xmm0 +; SSE3-NEXT: paddq %xmm3, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: psadbw %xmm1, %xmm0 ; SSE3-NEXT: retq @@ -207,16 +215,17 @@ ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: psubq %xmm0, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: paddq %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: pshufb %xmm4, %xmm5 -; SSSE3-NEXT: psrlw $4, %xmm2 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pshufb %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm3 +; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pshufb %xmm3, %xmm0 ; SSSE3-NEXT: paddb %xmm5, %xmm0 ; SSSE3-NEXT: psadbw %xmm1, %xmm0 ; SSSE3-NEXT: retq @@ -227,16 +236,17 @@ ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: psubq %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: psubq {{.*}}(%rip), %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm2, %xmm4 -; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE41-NEXT: paddq %xmm2, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pand %xmm2, %xmm4 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm0, %xmm5 ; SSE41-NEXT: pshufb %xmm4, %xmm5 -; SSE41-NEXT: psrlw $4, %xmm2 -; SSE41-NEXT: pand %xmm3, %xmm2 -; SSE41-NEXT: pshufb %xmm2, %xmm0 +; SSE41-NEXT: psrlw $4, %xmm3 +; SSE41-NEXT: pand %xmm2, %xmm3 +; SSE41-NEXT: pshufb %xmm3, %xmm0 ; SSE41-NEXT: paddb %xmm5, %xmm0 ; SSE41-NEXT: psadbw %xmm1, %xmm0 ; SSE41-NEXT: retq @@ -246,7 +256,8 @@ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubq %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -263,7 +274,8 @@ ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpsubq %xmm0, %xmm1, %xmm2 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -301,7 +313,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper @@ -337,20 +350,21 @@ ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: psubd %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: psrld $2, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrld $2, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $4, %xmm0 -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: paddd %xmm3, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -366,20 +380,21 @@ ; SSE3-NEXT: pxor %xmm2, %xmm2 ; SSE3-NEXT: psubd %xmm0, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE3-NEXT: paddd %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrld $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubd %xmm0, %xmm2 +; SSE3-NEXT: psubd %xmm0, %xmm3 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459] -; SSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE3-NEXT: pand %xmm0, %xmm3 -; SSE3-NEXT: psrld $2, %xmm2 +; SSE3-NEXT: movdqa %xmm3, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: paddd %xmm3, %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: psrld $2, %xmm3 +; SSE3-NEXT: pand %xmm0, %xmm3 +; SSE3-NEXT: paddd %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrld $4, %xmm0 -; SSE3-NEXT: paddd %xmm2, %xmm0 +; SSE3-NEXT: paddd %xmm3, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: movdqa %xmm0, %xmm2 ; SSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -395,16 +410,17 @@ ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: psubd %xmm0, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: paddd %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: pshufb %xmm4, %xmm5 -; SSSE3-NEXT: psrlw $4, %xmm2 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pshufb %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm3 +; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pshufb %xmm3, %xmm0 ; SSSE3-NEXT: paddb %xmm5, %xmm0 ; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -420,16 +436,17 @@ ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: psubd %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: pand %xmm0, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: paddd %xmm2, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: pand %xmm2, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm4, %xmm5 ; SSE41-NEXT: pshufb %xmm3, %xmm5 -; SSE41-NEXT: psrlw $4, %xmm2 -; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: pshufb %xmm2, %xmm4 +; SSE41-NEXT: psrlw $4, %xmm0 +; SSE41-NEXT: pand %xmm2, %xmm0 +; SSE41-NEXT: pshufb %xmm0, %xmm4 ; SSE41-NEXT: paddb %xmm5, %xmm4 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] @@ -443,7 +460,8 @@ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -464,8 +482,8 @@ ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 -; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -486,7 +504,8 @@ ; AVX512CDVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512CDVL-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX512CDVL-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to4}, %xmm0, %xmm0 +; AVX512CDVL-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX512CDVL-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -507,8 +526,8 @@ ; AVX512CD-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512CD-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX512CD-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 -; AVX512CD-NEXT: vpsubd %xmm2, %xmm0, %xmm0 +; AVX512CD-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX512CD-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX512CD-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -529,8 +548,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper @@ -542,16 +561,17 @@ ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: psubd %xmm0, %xmm2 ; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: psubd {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pand %xmm0, %xmm3 +; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; X32-SSE-NEXT: paddd %xmm2, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: pand %xmm2, %xmm3 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm4, %xmm5 ; X32-SSE-NEXT: pshufb %xmm3, %xmm5 -; X32-SSE-NEXT: psrlw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: pshufb %xmm2, %xmm4 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pshufb %xmm0, %xmm4 ; X32-SSE-NEXT: paddb %xmm5, %xmm4 ; X32-SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero ; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] @@ -570,20 +590,21 @@ ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: psubd %xmm0, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubd %xmm0, %xmm2 +; SSE2-NEXT: psubd %xmm0, %xmm3 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459] -; SSE2-NEXT: movdqa %xmm2, %xmm3 -; SSE2-NEXT: pand %xmm0, %xmm3 -; SSE2-NEXT: psrld $2, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: paddd %xmm3, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 +; SSE2-NEXT: psrld $2, %xmm3 +; SSE2-NEXT: pand %xmm0, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm3, %xmm0 ; SSE2-NEXT: psrld $4, %xmm0 -; SSE2-NEXT: paddd %xmm2, %xmm0 +; SSE2-NEXT: paddd %xmm3, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: movdqa %xmm0, %xmm2 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -599,20 +620,21 @@ ; SSE3-NEXT: pxor %xmm2, %xmm2 ; SSE3-NEXT: psubd %xmm0, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE3-NEXT: paddd %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrld $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubd %xmm0, %xmm2 +; SSE3-NEXT: psubd %xmm0, %xmm3 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [858993459,858993459,858993459,858993459] -; SSE3-NEXT: movdqa %xmm2, %xmm3 -; SSE3-NEXT: pand %xmm0, %xmm3 -; SSE3-NEXT: psrld $2, %xmm2 +; SSE3-NEXT: movdqa %xmm3, %xmm2 ; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: paddd %xmm3, %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 +; SSE3-NEXT: psrld $2, %xmm3 +; SSE3-NEXT: pand %xmm0, %xmm3 +; SSE3-NEXT: paddd %xmm2, %xmm3 +; SSE3-NEXT: movdqa %xmm3, %xmm0 ; SSE3-NEXT: psrld $4, %xmm0 -; SSE3-NEXT: paddd %xmm2, %xmm0 +; SSE3-NEXT: paddd %xmm3, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: movdqa %xmm0, %xmm2 ; SSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -628,16 +650,17 @@ ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: psubd %xmm0, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm2, %xmm4 -; SSSE3-NEXT: pand %xmm3, %xmm4 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 +; SSSE3-NEXT: paddd %xmm2, %xmm3 +; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm3, %xmm4 +; SSSE3-NEXT: pand %xmm2, %xmm4 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm5 ; SSSE3-NEXT: pshufb %xmm4, %xmm5 -; SSSE3-NEXT: psrlw $4, %xmm2 -; SSSE3-NEXT: pand %xmm3, %xmm2 -; SSSE3-NEXT: pshufb %xmm2, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm3 +; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pshufb %xmm3, %xmm0 ; SSSE3-NEXT: paddb %xmm5, %xmm0 ; SSSE3-NEXT: movdqa %xmm0, %xmm2 ; SSSE3-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3] @@ -653,16 +676,17 @@ ; SSE41-NEXT: pxor %xmm2, %xmm2 ; SSE41-NEXT: psubd %xmm0, %xmm2 ; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: psubd {{.*}}(%rip), %xmm2 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: pand %xmm0, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: paddd %xmm2, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm0, %xmm3 +; SSE41-NEXT: pand %xmm2, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm4, %xmm5 ; SSE41-NEXT: pshufb %xmm3, %xmm5 -; SSE41-NEXT: psrlw $4, %xmm2 -; SSE41-NEXT: pand %xmm0, %xmm2 -; SSE41-NEXT: pshufb %xmm2, %xmm4 +; SSE41-NEXT: psrlw $4, %xmm0 +; SSE41-NEXT: pand %xmm2, %xmm0 +; SSE41-NEXT: pshufb %xmm0, %xmm4 ; SSE41-NEXT: paddb %xmm5, %xmm4 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] @@ -676,7 +700,8 @@ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -697,8 +722,8 @@ ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpsubd %xmm0, %xmm1, %xmm2 ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 -; AVX2-NEXT: vpsubd %xmm2, %xmm0, %xmm0 +; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm3 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -740,8 +765,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 -; AVX512VPOPCNTDQ-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %XMM0 %XMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: vzeroupper @@ -753,16 +778,17 @@ ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: psubd %xmm0, %xmm2 ; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: psubd {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm2, %xmm3 -; X32-SSE-NEXT: pand %xmm0, %xmm3 +; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; X32-SSE-NEXT: paddd %xmm2, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm0, %xmm3 +; X32-SSE-NEXT: pand %xmm2, %xmm3 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm4, %xmm5 ; X32-SSE-NEXT: pshufb %xmm3, %xmm5 -; X32-SSE-NEXT: psrlw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: pshufb %xmm2, %xmm4 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pshufb %xmm0, %xmm4 ; X32-SSE-NEXT: paddb %xmm5, %xmm4 ; X32-SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero ; X32-SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3] @@ -780,24 +806,25 @@ ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: psubw %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: psubw {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psubw %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: paddw %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubw %xmm0, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107] -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrlw $4, %xmm2 -; SSE2-NEXT: paddw %xmm1, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psllw $8, %xmm0 -; SSE2-NEXT: paddb %xmm2, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm0 ; SSE2-NEXT: psrlw $8, %xmm0 ; SSE2-NEXT: retq ; @@ -806,24 +833,25 @@ ; SSE3-NEXT: pxor %xmm1, %xmm1 ; SSE3-NEXT: psubw %xmm0, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: psubw {{.*}}(%rip), %xmm1 +; SSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE3-NEXT: paddw %xmm1, %xmm0 +; SSE3-NEXT: movdqa %xmm0, %xmm1 +; SSE3-NEXT: psrlw $1, %xmm1 +; SSE3-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE3-NEXT: psubw %xmm1, %xmm0 +; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107] +; SSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE3-NEXT: pand %xmm1, %xmm2 +; SSE3-NEXT: psrlw $2, %xmm0 +; SSE3-NEXT: pand %xmm1, %xmm0 +; SSE3-NEXT: paddw %xmm2, %xmm0 +; SSE3-NEXT: movdqa %xmm0, %xmm1 +; SSE3-NEXT: psrlw $4, %xmm1 +; SSE3-NEXT: paddw %xmm0, %xmm1 +; SSE3-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: psrlw $1, %xmm0 -; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubw %xmm0, %xmm1 -; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107] -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psrlw $2, %xmm1 -; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: paddw %xmm2, %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: psrlw $4, %xmm2 -; SSE3-NEXT: paddw %xmm1, %xmm2 -; SSE3-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psllw $8, %xmm0 -; SSE3-NEXT: paddb %xmm2, %xmm0 +; SSE3-NEXT: paddb %xmm1, %xmm0 ; SSE3-NEXT: psrlw $8, %xmm0 ; SSE3-NEXT: retq ; @@ -832,16 +860,17 @@ ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: psubw %xmm0, %xmm1 ; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: psubw {{.*}}(%rip), %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm1, %xmm2 -; SSSE3-NEXT: pand %xmm0, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm3, %xmm4 ; SSSE3-NEXT: pshufb %xmm2, %xmm4 -; SSSE3-NEXT: psrlw $4, %xmm1 -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pshufb %xmm1, %xmm3 +; SSSE3-NEXT: psrlw $4, %xmm0 +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: pshufb %xmm0, %xmm3 ; SSSE3-NEXT: paddb %xmm4, %xmm3 ; SSSE3-NEXT: movdqa %xmm3, %xmm0 ; SSSE3-NEXT: psllw $8, %xmm0 @@ -854,16 +883,17 @@ ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: psubw %xmm0, %xmm1 ; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: psubw {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: pand %xmm0, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: paddw %xmm1, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pand %xmm1, %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: pshufb %xmm2, %xmm4 -; SSE41-NEXT: psrlw $4, %xmm1 -; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: pshufb %xmm1, %xmm3 +; SSE41-NEXT: psrlw $4, %xmm0 +; SSE41-NEXT: pand %xmm1, %xmm0 +; SSE41-NEXT: pshufb %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm4, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 @@ -876,7 +906,8 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -895,7 +926,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -914,16 +946,17 @@ ; X32-SSE-NEXT: pxor %xmm1, %xmm1 ; X32-SSE-NEXT: psubw %xmm0, %xmm1 ; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psubw {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 +; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; X32-SSE-NEXT: paddw %xmm1, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: pand %xmm1, %xmm2 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pshufb %xmm2, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: pshufb %xmm1, %xmm3 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: pand %xmm1, %xmm0 +; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: paddb %xmm4, %xmm3 ; X32-SSE-NEXT: movdqa %xmm3, %xmm0 ; X32-SSE-NEXT: psllw $8, %xmm0 @@ -940,24 +973,25 @@ ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: psubw %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: psubw {{.*}}(%rip), %xmm1 +; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE2-NEXT: paddw %xmm1, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psubw %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: paddw %xmm2, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: paddw %xmm0, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm0 -; SSE2-NEXT: psrlw $1, %xmm0 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubw %xmm0, %xmm1 -; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107] -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm1 -; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddw %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: psrlw $4, %xmm2 -; SSE2-NEXT: paddw %xmm1, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psllw $8, %xmm0 -; SSE2-NEXT: paddb %xmm2, %xmm0 +; SSE2-NEXT: paddb %xmm1, %xmm0 ; SSE2-NEXT: psrlw $8, %xmm0 ; SSE2-NEXT: retq ; @@ -966,24 +1000,25 @@ ; SSE3-NEXT: pxor %xmm1, %xmm1 ; SSE3-NEXT: psubw %xmm0, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: psubw {{.*}}(%rip), %xmm1 +; SSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE3-NEXT: paddw %xmm1, %xmm0 +; SSE3-NEXT: movdqa %xmm0, %xmm1 +; SSE3-NEXT: psrlw $1, %xmm1 +; SSE3-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE3-NEXT: psubw %xmm1, %xmm0 +; SSE3-NEXT: movdqa {{.*#+}} xmm1 = [13107,13107,13107,13107,13107,13107,13107,13107] +; SSE3-NEXT: movdqa %xmm0, %xmm2 +; SSE3-NEXT: pand %xmm1, %xmm2 +; SSE3-NEXT: psrlw $2, %xmm0 +; SSE3-NEXT: pand %xmm1, %xmm0 +; SSE3-NEXT: paddw %xmm2, %xmm0 +; SSE3-NEXT: movdqa %xmm0, %xmm1 +; SSE3-NEXT: psrlw $4, %xmm1 +; SSE3-NEXT: paddw %xmm0, %xmm1 +; SSE3-NEXT: pand {{.*}}(%rip), %xmm1 ; SSE3-NEXT: movdqa %xmm1, %xmm0 -; SSE3-NEXT: psrlw $1, %xmm0 -; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubw %xmm0, %xmm1 -; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [13107,13107,13107,13107,13107,13107,13107,13107] -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psrlw $2, %xmm1 -; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: paddw %xmm2, %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: psrlw $4, %xmm2 -; SSE3-NEXT: paddw %xmm1, %xmm2 -; SSE3-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psllw $8, %xmm0 -; SSE3-NEXT: paddb %xmm2, %xmm0 +; SSE3-NEXT: paddb %xmm1, %xmm0 ; SSE3-NEXT: psrlw $8, %xmm0 ; SSE3-NEXT: retq ; @@ -992,16 +1027,17 @@ ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: psubw %xmm0, %xmm1 ; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: psubw {{.*}}(%rip), %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm1, %xmm2 -; SSSE3-NEXT: pand %xmm0, %xmm2 +; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 ; SSSE3-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm3, %xmm4 ; SSSE3-NEXT: pshufb %xmm2, %xmm4 -; SSSE3-NEXT: psrlw $4, %xmm1 -; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: pshufb %xmm1, %xmm3 +; SSSE3-NEXT: psrlw $4, %xmm0 +; SSSE3-NEXT: pand %xmm1, %xmm0 +; SSSE3-NEXT: pshufb %xmm0, %xmm3 ; SSSE3-NEXT: paddb %xmm4, %xmm3 ; SSSE3-NEXT: movdqa %xmm3, %xmm0 ; SSSE3-NEXT: psllw $8, %xmm0 @@ -1014,16 +1050,17 @@ ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: psubw %xmm0, %xmm1 ; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: psubw {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: pand %xmm0, %xmm2 +; SSE41-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE41-NEXT: paddw %xmm1, %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: pand %xmm1, %xmm2 ; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm3, %xmm4 ; SSE41-NEXT: pshufb %xmm2, %xmm4 -; SSE41-NEXT: psrlw $4, %xmm1 -; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: pshufb %xmm1, %xmm3 +; SSE41-NEXT: psrlw $4, %xmm0 +; SSE41-NEXT: pand %xmm1, %xmm0 +; SSE41-NEXT: pshufb %xmm0, %xmm3 ; SSE41-NEXT: paddb %xmm4, %xmm3 ; SSE41-NEXT: movdqa %xmm3, %xmm0 ; SSE41-NEXT: psllw $8, %xmm0 @@ -1036,7 +1073,8 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1055,7 +1093,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1074,16 +1113,17 @@ ; X32-SSE-NEXT: pxor %xmm1, %xmm1 ; X32-SSE-NEXT: psubw %xmm0, %xmm1 ; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psubw {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm1, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 +; X32-SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; X32-SSE-NEXT: paddw %xmm1, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: pand %xmm1, %xmm2 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm3, %xmm4 ; X32-SSE-NEXT: pshufb %xmm2, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: pshufb %xmm1, %xmm3 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: pand %xmm1, %xmm0 +; X32-SSE-NEXT: pshufb %xmm0, %xmm3 ; X32-SSE-NEXT: paddb %xmm4, %xmm3 ; X32-SSE-NEXT: movdqa %xmm3, %xmm0 ; X32-SSE-NEXT: psllw $8, %xmm0 @@ -1100,20 +1140,21 @@ ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: psubb %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: paddb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psrlw $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubb %xmm0, %xmm1 +; SSE2-NEXT: psubb %xmm0, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: paddb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: paddb %xmm1, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; @@ -1122,20 +1163,21 @@ ; SSE3-NEXT: pxor %xmm1, %xmm1 ; SSE3-NEXT: psubb %xmm0, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE3-NEXT: paddb %xmm1, %xmm2 +; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psrlw $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubb %xmm0, %xmm1 +; SSE3-NEXT: psubb %xmm0, %xmm2 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psrlw $2, %xmm1 +; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: paddb %xmm2, %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: psrlw $2, %xmm2 +; SSE3-NEXT: pand %xmm0, %xmm2 +; SSE3-NEXT: paddb %xmm1, %xmm2 +; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psrlw $4, %xmm0 -; SSE3-NEXT: paddb %xmm1, %xmm0 +; SSE3-NEXT: paddb %xmm2, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: retq ; @@ -1144,16 +1186,17 @@ ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: psubb %xmm0, %xmm1 ; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: paddb %xmm1, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm2, %xmm3 +; SSSE3-NEXT: pand %xmm1, %xmm3 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm4 ; SSSE3-NEXT: pshufb %xmm3, %xmm4 -; SSSE3-NEXT: psrlw $4, %xmm1 -; SSSE3-NEXT: pand %xmm2, %xmm1 -; SSSE3-NEXT: pshufb %xmm1, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 +; SSSE3-NEXT: pshufb %xmm2, %xmm0 ; SSSE3-NEXT: paddb %xmm4, %xmm0 ; SSSE3-NEXT: retq ; @@ -1162,16 +1205,17 @@ ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: psubb %xmm0, %xmm1 ; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pand %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: paddb %xmm1, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: pand %xmm1, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: pshufb %xmm3, %xmm4 -; SSE41-NEXT: psrlw $4, %xmm1 -; SSE41-NEXT: pand %xmm2, %xmm1 -; SSE41-NEXT: pshufb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $4, %xmm2 +; SSE41-NEXT: pand %xmm1, %xmm2 +; SSE41-NEXT: pshufb %xmm2, %xmm0 ; SSE41-NEXT: paddb %xmm4, %xmm0 ; SSE41-NEXT: retq ; @@ -1180,7 +1224,8 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1196,7 +1241,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1212,16 +1258,17 @@ ; X32-SSE-NEXT: pxor %xmm1, %xmm1 ; X32-SSE-NEXT: psubb %xmm0, %xmm1 ; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psubb {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm1, %xmm3 -; X32-SSE-NEXT: pand %xmm2, %xmm3 +; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2 +; X32-SSE-NEXT: paddb %xmm1, %xmm2 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: pand %xmm1, %xmm3 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm0, %xmm4 ; X32-SSE-NEXT: pshufb %xmm3, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand %xmm2, %xmm1 -; X32-SSE-NEXT: pshufb %xmm1, %xmm0 +; X32-SSE-NEXT: psrlw $4, %xmm2 +; X32-SSE-NEXT: pand %xmm1, %xmm2 +; X32-SSE-NEXT: pshufb %xmm2, %xmm0 ; X32-SSE-NEXT: paddb %xmm4, %xmm0 ; X32-SSE-NEXT: retl %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 0) @@ -1234,20 +1281,21 @@ ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: psubb %xmm0, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE2-NEXT: paddb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psrlw $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: psubb %xmm0, %xmm1 +; SSE2-NEXT: psubb %xmm0, %xmm2 ; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] -; SSE2-NEXT: movdqa %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm0, %xmm1 -; SSE2-NEXT: paddb %xmm2, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: paddb %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: paddb %xmm1, %xmm0 +; SSE2-NEXT: paddb %xmm2, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; @@ -1256,20 +1304,21 @@ ; SSE3-NEXT: pxor %xmm1, %xmm1 ; SSE3-NEXT: psubb %xmm0, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE3-NEXT: paddb %xmm1, %xmm2 +; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psrlw $1, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE3-NEXT: psubb %xmm0, %xmm1 +; SSE3-NEXT: psubb %xmm0, %xmm2 ; SSE3-NEXT: movdqa {{.*#+}} xmm0 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] -; SSE3-NEXT: movdqa %xmm1, %xmm2 -; SSE3-NEXT: pand %xmm0, %xmm2 -; SSE3-NEXT: psrlw $2, %xmm1 +; SSE3-NEXT: movdqa %xmm2, %xmm1 ; SSE3-NEXT: pand %xmm0, %xmm1 -; SSE3-NEXT: paddb %xmm2, %xmm1 -; SSE3-NEXT: movdqa %xmm1, %xmm0 +; SSE3-NEXT: psrlw $2, %xmm2 +; SSE3-NEXT: pand %xmm0, %xmm2 +; SSE3-NEXT: paddb %xmm1, %xmm2 +; SSE3-NEXT: movdqa %xmm2, %xmm0 ; SSE3-NEXT: psrlw $4, %xmm0 -; SSE3-NEXT: paddb %xmm1, %xmm0 +; SSE3-NEXT: paddb %xmm2, %xmm0 ; SSE3-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE3-NEXT: retq ; @@ -1278,16 +1327,17 @@ ; SSSE3-NEXT: pxor %xmm1, %xmm1 ; SSSE3-NEXT: psubb %xmm0, %xmm1 ; SSSE3-NEXT: pand %xmm0, %xmm1 -; SSSE3-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSSE3-NEXT: movdqa %xmm1, %xmm3 -; SSSE3-NEXT: pand %xmm2, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm2, %xmm2 +; SSSE3-NEXT: paddb %xmm1, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSSE3-NEXT: movdqa %xmm2, %xmm3 +; SSSE3-NEXT: pand %xmm1, %xmm3 ; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSSE3-NEXT: movdqa %xmm0, %xmm4 ; SSSE3-NEXT: pshufb %xmm3, %xmm4 -; SSSE3-NEXT: psrlw $4, %xmm1 -; SSSE3-NEXT: pand %xmm2, %xmm1 -; SSSE3-NEXT: pshufb %xmm1, %xmm0 +; SSSE3-NEXT: psrlw $4, %xmm2 +; SSSE3-NEXT: pand %xmm1, %xmm2 +; SSSE3-NEXT: pshufb %xmm2, %xmm0 ; SSSE3-NEXT: paddb %xmm4, %xmm0 ; SSSE3-NEXT: retq ; @@ -1296,16 +1346,17 @@ ; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: psubb %xmm0, %xmm1 ; SSE41-NEXT: pand %xmm0, %xmm1 -; SSE41-NEXT: psubb {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: pand %xmm2, %xmm3 +; SSE41-NEXT: pcmpeqd %xmm2, %xmm2 +; SSE41-NEXT: paddb %xmm1, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE41-NEXT: movdqa %xmm2, %xmm3 +; SSE41-NEXT: pand %xmm1, %xmm3 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; SSE41-NEXT: movdqa %xmm0, %xmm4 ; SSE41-NEXT: pshufb %xmm3, %xmm4 -; SSE41-NEXT: psrlw $4, %xmm1 -; SSE41-NEXT: pand %xmm2, %xmm1 -; SSE41-NEXT: pshufb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $4, %xmm2 +; SSE41-NEXT: pand %xmm1, %xmm2 +; SSE41-NEXT: pshufb %xmm2, %xmm0 ; SSE41-NEXT: paddb %xmm4, %xmm0 ; SSE41-NEXT: retq ; @@ -1314,7 +1365,8 @@ ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1330,7 +1382,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubb %xmm0, %xmm1, %xmm1 ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %xmm0, %xmm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %xmm1, %xmm0, %xmm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -1346,16 +1399,17 @@ ; X32-SSE-NEXT: pxor %xmm1, %xmm1 ; X32-SSE-NEXT: psubb %xmm0, %xmm1 ; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psubb {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm1, %xmm3 -; X32-SSE-NEXT: pand %xmm2, %xmm3 +; X32-SSE-NEXT: pcmpeqd %xmm2, %xmm2 +; X32-SSE-NEXT: paddb %xmm1, %xmm2 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: pand %xmm1, %xmm3 ; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] ; X32-SSE-NEXT: movdqa %xmm0, %xmm4 ; X32-SSE-NEXT: pshufb %xmm3, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand %xmm2, %xmm1 -; X32-SSE-NEXT: pshufb %xmm1, %xmm0 +; X32-SSE-NEXT: psrlw $4, %xmm2 +; X32-SSE-NEXT: pand %xmm1, %xmm2 +; X32-SSE-NEXT: pshufb %xmm2, %xmm0 ; X32-SSE-NEXT: paddb %xmm4, %xmm0 ; X32-SSE-NEXT: retl %out = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %in, i1 -1) Index: test/CodeGen/X86/vector-tzcnt-256.ll =================================================================== --- test/CodeGen/X86/vector-tzcnt-256.ll +++ test/CodeGen/X86/vector-tzcnt-256.ll @@ -15,8 +15,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1] -; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -28,7 +28,7 @@ ; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm5 ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -44,8 +44,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -62,7 +62,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubq %ymm0, %ymm1, %ymm2 ; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubq {{.*}}(%rip){1to4}, %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX512CDVL-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -79,8 +80,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubq %ymm0, %ymm1, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512CD-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; AVX512CD-NEXT: vpsubq %ymm2, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX512CD-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -97,8 +98,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm1, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: retq @@ -130,8 +131,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1] -; AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -143,7 +144,7 @@ ; AVX1-NEXT: vpsadbw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsubq %xmm0, %xmm2, %xmm5 ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -159,8 +160,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubq %ymm0, %ymm1, %ymm2 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpaddq %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -197,8 +198,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubq %ymm1, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: retq @@ -230,8 +231,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1] -; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -247,7 +248,7 @@ ; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm5 ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -267,8 +268,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -289,7 +290,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubd {{.*}}(%rip){1to8}, %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX512CDVL-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -310,8 +312,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512CD-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 -; AVX512CD-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX512CD-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -332,8 +334,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm1, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: retq @@ -343,8 +345,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2 -; X32-AVX-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X32-AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -370,8 +372,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1] -; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -387,7 +389,7 @@ ; AVX1-NEXT: vpackuswb %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm5 ; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 -; AVX1-NEXT: vpsubd %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm3 ; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -407,8 +409,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %ymm2 -; AVX2-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm3 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -449,8 +451,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubd %ymm1, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; AVX512VPOPCNTDQ-NEXT: retq @@ -460,8 +462,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubd %ymm0, %ymm1, %ymm2 ; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpbroadcastd {{\.LCPI.*}}, %ymm2 -; X32-AVX-NEXT: vpsubd %ymm2, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm2, %ymm2, %ymm2 +; X32-AVX-NEXT: vpaddd %ymm2, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -486,8 +488,8 @@ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -502,7 +504,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm1 ; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -520,7 +522,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -539,7 +542,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -558,7 +562,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -577,7 +582,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -596,7 +602,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubw {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -619,8 +626,8 @@ ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpsubw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -635,7 +642,7 @@ ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 ; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm1 ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpsubw %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm1 ; AVX1-NEXT: vpshufb %xmm1, %xmm6, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -653,7 +660,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -672,7 +680,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -691,7 +700,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CD-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -710,7 +720,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpsubw {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -729,7 +740,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubw %ymm0, %ymm1, %ymm1 ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubw {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X32-AVX-NEXT: vpaddw %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -753,8 +765,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -765,7 +777,7 @@ ; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm2 ; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -780,7 +792,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -796,7 +809,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -812,7 +826,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -828,7 +843,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -844,7 +860,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubb {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -865,8 +882,8 @@ ; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm3 ; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpsubb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm5 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -877,7 +894,7 @@ ; AVX1-NEXT: vpaddb %xmm5, %xmm1, %xmm1 ; AVX1-NEXT: vpsubb %xmm0, %xmm2, %xmm2 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm2 ; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0 @@ -892,7 +909,8 @@ ; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX2-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -908,7 +926,8 @@ ; AVX512CDVL-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CDVL-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CDVL-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CDVL-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDVL-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CDVL-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -924,7 +943,8 @@ ; AVX512CD-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512CD-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512CD-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -940,7 +960,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vpsubb {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm1, %ymm0, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -956,7 +977,8 @@ ; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; X32-AVX-NEXT: vpsubb %ymm0, %ymm1, %ymm1 ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubb {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-AVX-NEXT: vpcmpeqd %ymm1, %ymm1, %ymm1 +; X32-AVX-NEXT: vpaddb %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm2 ; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] Index: test/CodeGen/X86/vector-tzcnt-512.ll =================================================================== --- test/CodeGen/X86/vector-tzcnt-512.ll +++ test/CodeGen/X86/vector-tzcnt-512.ll @@ -10,7 +10,8 @@ ; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CD-NEXT: vpsubq %zmm0, %zmm1, %zmm1 ; AVX512CD-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512CD-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CD-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3 @@ -37,7 +38,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 ; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -54,7 +56,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -71,7 +74,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: retq %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 0) @@ -104,7 +108,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubq %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512BW-NEXT: vpaddq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -121,7 +126,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubq %zmm0, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntq %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: retq %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 -1) @@ -134,7 +140,8 @@ ; AVX512CD-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CD-NEXT: vpsubd %zmm0, %zmm1, %zmm1 ; AVX512CD-NEXT: vpandd %zmm1, %zmm0, %zmm0 -; AVX512CD-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512CD-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CD-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512CD-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm3 @@ -169,7 +176,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 ; AVX512CDBW-NEXT: vpandd %zmm2, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512CDBW-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -190,7 +198,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512BW-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -211,7 +220,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: retq %out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 0) @@ -244,7 +254,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubd %zmm0, %zmm1, %zmm2 ; AVX512BW-NEXT: vpandd %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm2, %zmm2, %zmm2 +; AVX512BW-NEXT: vpaddd %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm3 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -265,7 +276,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpsubd %zmm0, %zmm1, %zmm1 ; AVX512VPOPCNTDQ-NEXT: vpandd %zmm1, %zmm0, %zmm0 -; AVX512VPOPCNTDQ-NEXT: vpsubd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512VPOPCNTDQ-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512VPOPCNTDQ-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: vpopcntd %zmm0, %zmm0 ; AVX512VPOPCNTDQ-NEXT: retq %out = call <16 x i32> @llvm.cttz.v16i32(<16 x i32> %in, i1 -1) @@ -278,8 +290,8 @@ ; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3 ; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512CD-NEXT: vpsubw %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -293,7 +305,7 @@ ; AVX512CD-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512CD-NEXT: vpsubw %ymm1, %ymm2, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512CD-NEXT: vpsubw %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -310,7 +322,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -329,7 +342,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -348,8 +362,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -363,7 +377,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -384,8 +398,8 @@ ; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512CD-NEXT: vpsubw %ymm0, %ymm2, %ymm3 ; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512CD-NEXT: vpsubw %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -399,7 +413,7 @@ ; AVX512CD-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512CD-NEXT: vpsubw %ymm1, %ymm2, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512CD-NEXT: vpsubw %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -416,7 +430,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubw %zmm0, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -435,7 +450,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubw %zmm0, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubw {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -454,8 +470,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm0, %ymm2, %ymm3 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -469,7 +485,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpsrlw $8, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm1, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubw %ymm3, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddw %ymm3, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -490,8 +506,8 @@ ; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3 ; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512CD-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -502,7 +518,7 @@ ; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0 ; AVX512CD-NEXT: vpsubb %ymm1, %ymm2, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512CD-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -516,7 +532,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -532,7 +549,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -548,8 +566,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -560,7 +578,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm1, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -578,8 +596,8 @@ ; AVX512CD-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512CD-NEXT: vpsubb %ymm0, %ymm2, %ymm3 ; AVX512CD-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512CD-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512CD-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CD-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512CD-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -590,7 +608,7 @@ ; AVX512CD-NEXT: vpaddb %ymm5, %ymm0, %ymm0 ; AVX512CD-NEXT: vpsubb %ymm1, %ymm2, %ymm2 ; AVX512CD-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512CD-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX512CD-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512CD-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512CD-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512CD-NEXT: vpsrlw $4, %ymm1, %ymm1 @@ -604,7 +622,8 @@ ; AVX512CDBW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512CDBW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0 +; AVX512CDBW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512CDBW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512CDBW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512CDBW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512CDBW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -620,7 +639,8 @@ ; AVX512BW-NEXT: vpxord %zmm1, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsubb %zmm0, %zmm1, %zmm1 ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsubb {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpternlogd $255, %zmm1, %zmm1, %zmm1 +; AVX512BW-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512BW-NEXT: vpandq %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vmovdqu8 {{.*#+}} zmm3 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -636,8 +656,8 @@ ; AVX512VPOPCNTDQ-NEXT: vpxor %ymm2, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm0, %ymm2, %ymm3 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm3, %ymm0, %ymm0 -; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm0, %ymm0 +; AVX512VPOPCNTDQ-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm0, %ymm5 ; AVX512VPOPCNTDQ-NEXT: vmovdqa {{.*#+}} ymm6 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] @@ -648,7 +668,7 @@ ; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm5, %ymm0, %ymm0 ; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm1, %ymm2, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm2, %ymm1, %ymm1 -; AVX512VPOPCNTDQ-NEXT: vpsubb %ymm3, %ymm1, %ymm1 +; AVX512VPOPCNTDQ-NEXT: vpaddb %ymm3, %ymm1, %ymm1 ; AVX512VPOPCNTDQ-NEXT: vpand %ymm4, %ymm1, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpshufb %ymm2, %ymm6, %ymm2 ; AVX512VPOPCNTDQ-NEXT: vpsrlw $4, %ymm1, %ymm1 Index: test/CodeGen/X86/widen_arith-1.ll =================================================================== --- test/CodeGen/X86/widen_arith-1.ll +++ test/CodeGen/X86/widen_arith-1.ll @@ -6,7 +6,7 @@ ; CHECK: # BB#0: # %entry ; CHECK-NEXT: subl $12, %esp ; CHECK-NEXT: movl $0, (%esp) -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u> +; CHECK-NEXT: pcmpeqd %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> ; CHECK-NEXT: jmp .LBB0_1 ; CHECK-NEXT: .p2align 4, 0x90 @@ -16,7 +16,7 @@ ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ; CHECK-NEXT: pmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero -; CHECK-NEXT: paddd %xmm0, %xmm2 +; CHECK-NEXT: psubd %xmm0, %xmm2 ; CHECK-NEXT: pextrb $8, %xmm2, 2(%ecx,%eax,4) ; CHECK-NEXT: pshufb %xmm1, %xmm2 ; CHECK-NEXT: pextrw $0, %xmm2, (%ecx,%eax,4) Index: test/CodeGen/X86/widen_arith-2.ll =================================================================== --- test/CodeGen/X86/widen_arith-2.ll +++ test/CodeGen/X86/widen_arith-2.ll @@ -8,7 +8,7 @@ ; CHECK: # BB#0: # %entry ; CHECK-NEXT: subl $12, %esp ; CHECK-NEXT: movl $0, (%esp) -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: pcmpeqd %xmm0, %xmm0 ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4] ; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; CHECK-NEXT: jmp .LBB0_1 @@ -26,7 +26,7 @@ ; CHECK-NEXT: movl (%esp), %ecx ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx ; CHECK-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; CHECK-NEXT: paddw %xmm0, %xmm3 +; CHECK-NEXT: psubw %xmm0, %xmm3 ; CHECK-NEXT: pand %xmm1, %xmm3 ; CHECK-NEXT: pshufb %xmm2, %xmm3 ; CHECK-NEXT: movq %xmm3, (%edx,%ecx,8) Index: test/CodeGen/X86/widen_arith-3.ll =================================================================== --- test/CodeGen/X86/widen_arith-3.ll +++ test/CodeGen/X86/widen_arith-3.ll @@ -14,8 +14,8 @@ ; CHECK-NEXT: andl $-8, %esp ; CHECK-NEXT: subl $40, %esp ; CHECK-NEXT: movl {{\.LCPI.*}}, %eax -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u> ; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; CHECK-NEXT: pcmpeqd %xmm0, %xmm0 ; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp) ; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) ; CHECK-NEXT: movw $1, {{[0-9]+}}(%esp) @@ -29,7 +29,7 @@ ; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero ; CHECK-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero ; CHECK-NEXT: pinsrd $2, 4(%edx,%eax,8), %xmm2 -; CHECK-NEXT: paddd %xmm0, %xmm2 +; CHECK-NEXT: psubd %xmm0, %xmm2 ; CHECK-NEXT: pextrw $4, %xmm2, 4(%ecx,%eax,8) ; CHECK-NEXT: pshufb %xmm1, %xmm2 ; CHECK-NEXT: movd %xmm2, (%ecx,%eax,8) Index: test/CodeGen/X86/widen_cast-2.ll =================================================================== --- test/CodeGen/X86/widen_cast-2.ll +++ test/CodeGen/X86/widen_cast-2.ll @@ -7,8 +7,7 @@ ; CHECK: # BB#0: # %entry ; CHECK-NEXT: pushl %eax ; CHECK-NEXT: movl $0, (%esp) -; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1] -; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <1,1,1,1,1,1,u,u> +; CHECK-NEXT: pcmpeqd %xmm0, %xmm0 ; CHECK-NEXT: cmpl $3, (%esp) ; CHECK-NEXT: jg .LBB0_3 ; CHECK-NEXT: .p2align 4, 0x90 @@ -18,14 +17,14 @@ ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx ; CHECK-NEXT: shll $5, %eax ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx -; CHECK-NEXT: movdqa (%edx,%eax), %xmm2 -; CHECK-NEXT: paddw %xmm0, %xmm2 -; CHECK-NEXT: movdqa 16(%edx,%eax), %xmm3 -; CHECK-NEXT: paddw %xmm1, %xmm3 -; CHECK-NEXT: pextrd $2, %xmm3, 24(%ecx,%eax) -; CHECK-NEXT: pextrd $1, %xmm3, 20(%ecx,%eax) -; CHECK-NEXT: movd %xmm3, 16(%ecx,%eax) -; CHECK-NEXT: movdqa %xmm2, (%ecx,%eax) +; CHECK-NEXT: movdqa (%edx,%eax), %xmm1 +; CHECK-NEXT: movdqa 16(%edx,%eax), %xmm2 +; CHECK-NEXT: psubw %xmm0, %xmm1 +; CHECK-NEXT: psubw %xmm0, %xmm2 +; CHECK-NEXT: pextrd $2, %xmm2, 24(%ecx,%eax) +; CHECK-NEXT: pextrd $1, %xmm2, 20(%ecx,%eax) +; CHECK-NEXT: movd %xmm2, 16(%ecx,%eax) +; CHECK-NEXT: movdqa %xmm1, (%ecx,%eax) ; CHECK-NEXT: incl (%esp) ; CHECK-NEXT: cmpl $3, (%esp) ; CHECK-NEXT: jle .LBB0_2 Index: test/CodeGen/X86/widen_cast-3.ll =================================================================== --- test/CodeGen/X86/widen_cast-3.ll +++ test/CodeGen/X86/widen_cast-3.ll @@ -8,7 +8,8 @@ ; X86-LABEL: convert: ; X86: # BB#0: ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pcmpeqd %xmm1, %xmm1 +; X86-NEXT: psubd %xmm1, %xmm0 ; X86-NEXT: pextrd $2, %xmm0, 8(%eax) ; X86-NEXT: pextrd $1, %xmm0, 4(%eax) ; X86-NEXT: movd %xmm0, (%eax) @@ -16,7 +17,8 @@ ; ; X64-LABEL: convert: ; X64: # BB#0: -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: pcmpeqd %xmm1, %xmm1 +; X64-NEXT: psubd %xmm1, %xmm0 ; X64-NEXT: pextrd $2, %xmm0, 8(%rdi) ; X64-NEXT: movq %xmm0, (%rdi) ; X64-NEXT: retq Index: test/CodeGen/X86/widen_cast-4.ll =================================================================== --- test/CodeGen/X86/widen_cast-4.ll +++ test/CodeGen/X86/widen_cast-4.ll @@ -9,7 +9,7 @@ ; NARROW: # BB#0: # %entry ; NARROW-NEXT: subl $12, %esp ; NARROW-NEXT: movl $0, (%esp) -; NARROW-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1] +; NARROW-NEXT: pcmpeqd %xmm0, %xmm0 ; NARROW-NEXT: movdqa {{.*#+}} xmm1 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> ; NARROW-NEXT: jmp .LBB0_1 ; NARROW-NEXT: .p2align 4, 0x90 @@ -26,7 +26,7 @@ ; NARROW-NEXT: movl (%esp), %ecx ; NARROW-NEXT: movl {{[0-9]+}}(%esp), %edx ; NARROW-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; NARROW-NEXT: paddw %xmm0, %xmm2 +; NARROW-NEXT: psubw %xmm0, %xmm2 ; NARROW-NEXT: psllw $8, %xmm2 ; NARROW-NEXT: psraw $8, %xmm2 ; NARROW-NEXT: psraw $2, %xmm2 @@ -46,7 +46,7 @@ ; WIDE: # BB#0: # %entry ; WIDE-NEXT: subl $12, %esp ; WIDE-NEXT: movl $0, (%esp) -; WIDE-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,1,1,1,1,1,u,u,u,u,u,u,u,u> +; WIDE-NEXT: pcmpeqd %xmm0, %xmm0 ; WIDE-NEXT: movdqa {{.*#+}} xmm1 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] ; WIDE-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] ; WIDE-NEXT: jmp .LBB0_1 @@ -65,7 +65,7 @@ ; WIDE-NEXT: movl {{[0-9]+}}(%esp), %edx ; WIDE-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero ; WIDE-NEXT: pinsrd $1, 4(%eax,%ecx,8), %xmm3 -; WIDE-NEXT: paddb %xmm0, %xmm3 +; WIDE-NEXT: psubb %xmm0, %xmm3 ; WIDE-NEXT: psrlw $2, %xmm3 ; WIDE-NEXT: pand %xmm1, %xmm3 ; WIDE-NEXT: pxor %xmm2, %xmm3 Index: test/CodeGen/X86/widen_conv-1.ll =================================================================== --- test/CodeGen/X86/widen_conv-1.ll +++ test/CodeGen/X86/widen_conv-1.ll @@ -35,7 +35,8 @@ ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movdqa (%ecx), %xmm0 -; X86-NEXT: paddd {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pcmpeqd %xmm1, %xmm1 +; X86-NEXT: psubd %xmm1, %xmm0 ; X86-NEXT: pextrb $8, %xmm0, 2(%eax) ; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; X86-NEXT: pextrw $0, %xmm0, (%eax) @@ -45,7 +46,8 @@ ; X64-LABEL: convert_v3i32_to_v3i8: ; X64: # BB#0: # %entry ; X64-NEXT: movdqa (%rsi), %xmm0 -; X64-NEXT: paddd {{.*}}(%rip), %xmm0 +; X64-NEXT: pcmpeqd %xmm1, %xmm1 +; X64-NEXT: psubd %xmm1, %xmm0 ; X64-NEXT: pextrb $8, %xmm0, 2(%rdi) ; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; X64-NEXT: pextrw $0, %xmm0, (%rdi) @@ -70,7 +72,8 @@ ; X86-NEXT: movl 8(%ebp), %eax ; X86-NEXT: movl 12(%ebp), %ecx ; X86-NEXT: movdqa (%ecx), %xmm0 -; X86-NEXT: paddw {{\.LCPI.*}}, %xmm0 +; X86-NEXT: pcmpeqd %xmm1, %xmm1 +; X86-NEXT: psubw %xmm1, %xmm0 ; X86-NEXT: pextrb $8, %xmm0, 4(%eax) ; X86-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; X86-NEXT: movd %xmm0, (%eax) @@ -81,7 +84,8 @@ ; X64-LABEL: convert_v5i16_to_v5i8: ; X64: # BB#0: # %entry ; X64-NEXT: movdqa (%rsi), %xmm0 -; X64-NEXT: paddw {{.*}}(%rip), %xmm0 +; X64-NEXT: pcmpeqd %xmm1, %xmm1 +; X64-NEXT: psubw %xmm1, %xmm0 ; X64-NEXT: pextrb $8, %xmm0, 4(%rdi) ; X64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; X64-NEXT: movd %xmm0, (%rdi)