Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6005,7 +6005,7 @@ // Don't do anything if the mask is constant. This should not be reachable. // InstCombine should have already unfolded this pattern, and DAGCombiner // probably shouldn't produce it, too. - if (isa(M.getNode())) + if (isa(M.getNode()) || ISD::isBuildVectorOfConstantSDNodes(M.getNode())) return SDValue(); // We can transform if the target has AndNot Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -30045,6 +30045,27 @@ Known = Known.trunc(BitWidth); break; } + case X86ISD::ANDNP: { + KnownBits Known2; + Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); + Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); + + // ANDNP = (~X & Y); + Known.One &= Known2.Zero; + Known.Zero |= Known2.One; + break; + } + case X86ISD::FOR: { + KnownBits Known2; + Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); + Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); + + // Output known-0 bits are only known if clear in both the LHS & RHS. + Known.Zero &= Known2.Zero; + // Output known-1 are known to be set if set in either the LHS | RHS. + Known.One |= Known2.One; + break; + } case X86ISD::CMOV: { Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1); // If we don't know any bits, early out. @@ -36316,6 +36337,47 @@ return SDValue(); } +// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y)) +static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG, + const X86Subtarget &Subtarget) { + assert(N->getOpcode() == ISD::OR && "Unexpected Opcode"); + + EVT VT = N->getValueType(0); + if (!VT.isVector()) + return SDValue(); + + SDValue N0 = peekThroughOneUseBitcasts(N->getOperand(0)); + SDValue N1 = peekThroughOneUseBitcasts(N->getOperand(1)); + + if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND) + return SDValue(); + + // Attempt to extract constant byte masks. + APInt UndefElts0, UndefElts1; + SmallVector EltBits0, EltBits1; + if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0, + false, false)) + return SDValue(); + if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1, + false, false)) + return SDValue(); + + for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) { + // TODO - add UNDEF elts support. + if (UndefElts0[i] || UndefElts1[i]) + return SDValue(); + if (EltBits0[i] != ~EltBits1[i]) + return SDValue(); + } + + SDLoc DL(N); + SDValue X = N->getOperand(0); + SDValue Y = + DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)), + DAG.getBitcast(VT, N1.getOperand(0))); + return DAG.getNode(ISD::OR, DL, VT, X, Y); +} + // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern. static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) { if (N->getOpcode() != ISD::OR) @@ -36578,6 +36640,9 @@ if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget)) return FPLogic; + if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget)) + return R; + if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget)) return R; Index: test/CodeGen/X86/bitreverse.ll =================================================================== --- test/CodeGen/X86/bitreverse.ll +++ test/CodeGen/X86/bitreverse.ll @@ -61,18 +61,17 @@ ; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] -; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; X64-NEXT: packuswb %xmm2, %xmm0 -; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X64-NEXT: movdqa %xmm0, %xmm2 -; X64-NEXT: pand %xmm1, %xmm2 +; X64-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4] +; X64-NEXT: packuswb %xmm2, %xmm1 +; X64-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; X64-NEXT: movdqa %xmm1, %xmm2 +; X64-NEXT: pand %xmm0, %xmm2 ; X64-NEXT: psllw $4, %xmm2 -; X64-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; X64-NEXT: pand %xmm3, %xmm2 -; X64-NEXT: pand %xmm3, %xmm0 -; X64-NEXT: psrlw $4, %xmm0 -; X64-NEXT: pand %xmm1, %xmm0 -; X64-NEXT: por %xmm2, %xmm0 +; X64-NEXT: pand {{.*}}(%rip), %xmm1 +; X64-NEXT: psrlw $4, %xmm1 +; X64-NEXT: pand %xmm0, %xmm1 +; X64-NEXT: pandn %xmm2, %xmm0 +; X64-NEXT: por %xmm1, %xmm0 ; X64-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; X64-NEXT: pand %xmm0, %xmm1 ; X64-NEXT: psllw $2, %xmm1 Index: test/CodeGen/X86/combine-fcopysign.ll =================================================================== --- test/CodeGen/X86/combine-fcopysign.ll +++ test/CodeGen/X86/combine-fcopysign.ll @@ -105,16 +105,16 @@ define <4 x float> @combine_vec_fcopysign_fabs_mag(<4 x float> %x, <4 x float> %y) { ; SSE-LABEL: combine_vec_fcopysign_fabs_mag: ; SSE: # %bb.0: -; SSE-NEXT: andps {{.*}}(%rip), %xmm1 -; SSE-NEXT: andps {{.*}}(%rip), %xmm0 -; SSE-NEXT: orps %xmm1, %xmm0 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm2 +; SSE-NEXT: andps %xmm2, %xmm0 +; SSE-NEXT: andnps %xmm1, %xmm2 +; SSE-NEXT: orps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fabs_mag: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 +; AVX-NEXT: vandnps %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -127,16 +127,16 @@ define <4 x float> @combine_vec_fcopysign_fneg_mag(<4 x float> %x, <4 x float> %y) { ; SSE-LABEL: combine_vec_fcopysign_fneg_mag: ; SSE: # %bb.0: -; SSE-NEXT: andps {{.*}}(%rip), %xmm1 -; SSE-NEXT: andps {{.*}}(%rip), %xmm0 -; SSE-NEXT: orps %xmm1, %xmm0 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm2 +; SSE-NEXT: andps %xmm2, %xmm0 +; SSE-NEXT: andnps %xmm1, %xmm2 +; SSE-NEXT: orps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fneg_mag: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 +; AVX-NEXT: vandnps %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -149,16 +149,16 @@ define <4 x float> @combine_vec_fcopysign_fcopysign_mag(<4 x float> %x, <4 x float> %y, <4 x float> %z) { ; SSE-LABEL: combine_vec_fcopysign_fcopysign_mag: ; SSE: # %bb.0: -; SSE-NEXT: andps {{.*}}(%rip), %xmm1 -; SSE-NEXT: andps {{.*}}(%rip), %xmm0 -; SSE-NEXT: orps %xmm1, %xmm0 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm2 +; SSE-NEXT: andps %xmm2, %xmm0 +; SSE-NEXT: andnps %xmm1, %xmm2 +; SSE-NEXT: orps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fcopysign_mag: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandps %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 +; AVX-NEXT: vandnps %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -171,18 +171,18 @@ define <4 x float> @combine_vec_fcopysign_fcopysign_sgn(<4 x float> %x, <4 x float> %y, <4 x float> %z) { ; SSE-LABEL: combine_vec_fcopysign_fcopysign_sgn: ; SSE: # %bb.0: -; SSE-NEXT: andps {{.*}}(%rip), %xmm2 -; SSE-NEXT: andps {{.*}}(%rip), %xmm0 -; SSE-NEXT: orps %xmm2, %xmm0 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm1 +; SSE-NEXT: andps %xmm1, %xmm0 +; SSE-NEXT: andnps %xmm2, %xmm1 +; SSE-NEXT: orps %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fcopysign_sgn: ; AVX: # %bb.0: -; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandps %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 -; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX-NEXT: vandnps %xmm2, %xmm1, %xmm2 +; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %y, <4 x float> %z) %2 = call <4 x float> @llvm.copysign.v4f32(<4 x float> %x, <4 x float> %1) @@ -194,36 +194,37 @@ ; SSE-LABEL: combine_vec_fcopysign_fpext_sgn: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm2, %xmm3 -; SSE-NEXT: cvtss2sd %xmm2, %xmm4 +; SSE-NEXT: cvtss2sd %xmm2, %xmm8 ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] ; SSE-NEXT: movaps %xmm2, %xmm6 ; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm2[1] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3] -; SSE-NEXT: movaps {{.*#+}} xmm7 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm4 ; SSE-NEXT: movaps %xmm0, %xmm2 -; SSE-NEXT: andps %xmm7, %xmm2 -; SSE-NEXT: movaps {{.*#+}} xmm8 = [-0.0E+0,-0.0E+0] -; SSE-NEXT: andps %xmm8, %xmm4 -; SSE-NEXT: orps %xmm4, %xmm2 +; SSE-NEXT: andps %xmm4, %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm7 +; SSE-NEXT: andnps %xmm8, %xmm7 +; SSE-NEXT: orps %xmm7, %xmm2 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] -; SSE-NEXT: andps %xmm7, %xmm0 -; SSE-NEXT: xorps %xmm4, %xmm4 -; SSE-NEXT: cvtss2sd %xmm5, %xmm4 -; SSE-NEXT: andps %xmm8, %xmm4 -; SSE-NEXT: orps %xmm0, %xmm4 -; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; SSE-NEXT: andps %xmm4, %xmm0 +; SSE-NEXT: cvtss2sd %xmm5, %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm7 +; SSE-NEXT: andnps %xmm5, %xmm7 +; SSE-NEXT: orps %xmm0, %xmm7 +; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0] ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] -; SSE-NEXT: andps %xmm7, %xmm0 +; SSE-NEXT: andps %xmm4, %xmm0 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3 -; SSE-NEXT: andps %xmm8, %xmm3 -; SSE-NEXT: orps %xmm0, %xmm3 -; SSE-NEXT: andps %xmm7, %xmm1 +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: andnps %xmm3, %xmm5 +; SSE-NEXT: orps %xmm0, %xmm5 +; SSE-NEXT: andps %xmm4, %xmm1 ; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: cvtss2sd %xmm6, %xmm0 -; SSE-NEXT: andps %xmm8, %xmm0 -; SSE-NEXT: orps %xmm0, %xmm1 -; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; SSE-NEXT: andnps %xmm0, %xmm4 +; SSE-NEXT: orps %xmm4, %xmm1 +; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm5[0] ; SSE-NEXT: movaps %xmm2, %xmm0 ; SSE-NEXT: retq ; @@ -232,8 +233,7 @@ ; AVX-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vcvtps2pd %xmm1, %ymm1 -; AVX-NEXT: vbroadcastsd {{.*#+}} ymm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandps %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vandnps %ymm1, %ymm2, %ymm1 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %1 = fpext <4 x float> %y to <4 x double> @@ -246,35 +246,37 @@ ; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn: ; SSE: # %bb.0: ; SSE-NEXT: movaps %xmm0, %xmm3 -; SSE-NEXT: movaps {{.*#+}} xmm5 -; SSE-NEXT: andps %xmm5, %xmm0 -; SSE-NEXT: cvtsd2ss %xmm1, %xmm6 -; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; SSE-NEXT: andps %xmm4, %xmm6 +; SSE-NEXT: movaps {{.*}}(%rip), %xmm4 +; SSE-NEXT: andps %xmm4, %xmm0 +; SSE-NEXT: cvtsd2ss %xmm1, %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: andnps %xmm5, %xmm6 ; SSE-NEXT: orps %xmm6, %xmm0 -; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm3[1,1,3,3] -; SSE-NEXT: andps %xmm5, %xmm6 +; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm3[1,1,3,3] +; SSE-NEXT: andps %xmm4, %xmm5 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1] ; SSE-NEXT: cvtsd2ss %xmm1, %xmm1 -; SSE-NEXT: andps %xmm4, %xmm1 -; SSE-NEXT: orps %xmm6, %xmm1 -; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: andnps %xmm1, %xmm6 +; SSE-NEXT: orps %xmm5, %xmm6 +; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] ; SSE-NEXT: movaps %xmm3, %xmm1 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1] -; SSE-NEXT: andps %xmm5, %xmm1 -; SSE-NEXT: xorps %xmm6, %xmm6 -; SSE-NEXT: cvtsd2ss %xmm2, %xmm6 -; SSE-NEXT: andps %xmm4, %xmm6 +; SSE-NEXT: andps %xmm4, %xmm1 +; SSE-NEXT: xorps %xmm5, %xmm5 +; SSE-NEXT: cvtsd2ss %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: andnps %xmm5, %xmm6 ; SSE-NEXT: orps %xmm1, %xmm6 ; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3] ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3] -; SSE-NEXT: andps %xmm5, %xmm3 +; SSE-NEXT: andps %xmm4, %xmm3 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1] ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm1 -; SSE-NEXT: andps %xmm4, %xmm1 -; SSE-NEXT: orps %xmm3, %xmm1 -; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; SSE-NEXT: andnps %xmm1, %xmm4 +; SSE-NEXT: orps %xmm3, %xmm4 +; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm4[0] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn: @@ -282,8 +284,7 @@ ; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm2 ; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vcvtpd2ps %ymm1, %xmm1 -; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] -; AVX-NEXT: vandpd %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vandnpd %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vorpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq Index: test/CodeGen/X86/combine-udiv.ll =================================================================== --- test/CodeGen/X86/combine-udiv.ll +++ test/CodeGen/X86/combine-udiv.ll @@ -644,20 +644,20 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) { ; SSE2-LABEL: combine_vec_udiv_nonuniform4: ; SSE2: # %bb.0: -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: pand %xmm2, %xmm1 +; SSE2-NEXT: pxor %xmm3, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE2-NEXT: movl $171, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: pmullw %xmm2, %xmm1 -; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm1 -; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: movl $255, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: movd %eax, %xmm3 +; SSE2-NEXT: pmullw %xmm0, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: pandn %xmm3, %xmm2 +; SSE2-NEXT: por %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: combine_vec_udiv_nonuniform4: Index: test/CodeGen/X86/sse-fcopysign.ll =================================================================== --- test/CodeGen/X86/sse-fcopysign.ll +++ test/CodeGen/X86/sse-fcopysign.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32 -; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,X32 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=ALL,X64 ; ; Library Functions @@ -66,19 +66,22 @@ ; X32-NEXT: pushl %eax ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero -; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 -; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 -; X32-NEXT: orps %xmm1, %xmm0 -; X32-NEXT: movss %xmm0, (%esp) +; X32-NEXT: movaps {{.*#+}} xmm2 +; X32-NEXT: andps %xmm2, %xmm0 +; X32-NEXT: andnps %xmm1, %xmm2 +; X32-NEXT: orps %xmm0, %xmm2 +; X32-NEXT: movss %xmm2, (%esp) ; X32-NEXT: flds (%esp) ; X32-NEXT: popl %eax ; X32-NEXT: retl ; ; X64-LABEL: int1: ; X64: # %bb.0: -; X64-NEXT: andps {{.*}}(%rip), %xmm0 -; X64-NEXT: andps {{.*}}(%rip), %xmm1 -; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: movaps {{.*#+}} xmm2 +; X64-NEXT: andps %xmm2, %xmm1 +; X64-NEXT: andnps %xmm0, %xmm2 +; X64-NEXT: orps %xmm1, %xmm2 +; X64-NEXT: movaps %xmm2, %xmm0 ; X64-NEXT: retq %tmp = tail call float @llvm.copysign.f32( float %b, float %a ) ret float %tmp @@ -94,11 +97,12 @@ ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: addss 20(%ebp), %xmm1 -; X32-NEXT: andps {{\.LCPI.*}}, %xmm0 +; X32-NEXT: movaps {{.*#+}} xmm2 +; X32-NEXT: andps %xmm2, %xmm0 ; X32-NEXT: cvtss2sd %xmm1, %xmm1 -; X32-NEXT: andps {{\.LCPI.*}}, %xmm1 -; X32-NEXT: orps %xmm0, %xmm1 -; X32-NEXT: movlps %xmm1, (%esp) +; X32-NEXT: andnps %xmm1, %xmm2 +; X32-NEXT: orps %xmm0, %xmm2 +; X32-NEXT: movlps %xmm2, (%esp) ; X32-NEXT: fldl (%esp) ; X32-NEXT: movl %ebp, %esp ; X32-NEXT: popl %ebp @@ -107,10 +111,11 @@ ; X64-LABEL: int2: ; X64: # %bb.0: ; X64-NEXT: addss %xmm2, %xmm1 +; X64-NEXT: movaps {{.*#+}} xmm2 +; X64-NEXT: andps %xmm2, %xmm0 ; X64-NEXT: cvtss2sd %xmm1, %xmm1 -; X64-NEXT: andps {{.*}}(%rip), %xmm1 -; X64-NEXT: andps {{.*}}(%rip), %xmm0 -; X64-NEXT: orps %xmm1, %xmm0 +; X64-NEXT: andnps %xmm1, %xmm2 +; X64-NEXT: orps %xmm2, %xmm0 ; X64-NEXT: retq %tmp1 = fadd float %b, %c %tmp2 = fpext float %tmp1 to double Index: test/CodeGen/X86/vec-copysign-avx512.ll =================================================================== --- test/CodeGen/X86/vec-copysign-avx512.ll +++ test/CodeGen/X86/vec-copysign-avx512.ll @@ -3,37 +3,25 @@ ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VLDQ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind { -; AVX512VL-LABEL: v4f32: -; AVX512VL: ## %bb.0: -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm1, %xmm1 -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0 -; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0 -; AVX512VL-NEXT: retq -; -; AVX512VLDQ-LABEL: v4f32: -; AVX512VLDQ: ## %bb.0: -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm1, %xmm1 -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to4}, %xmm0, %xmm0 -; AVX512VLDQ-NEXT: vorps %xmm1, %xmm0, %xmm0 -; AVX512VLDQ-NEXT: retq +; CHECK-LABEL: v4f32: +; CHECK: ## %bb.0: +; CHECK-NEXT: vbroadcastss {{.*#+}} xmm2 +; CHECK-NEXT: vandnps %xmm1, %xmm2, %xmm1 +; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0 +; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: retq %tmp = tail call <4 x float> @llvm.copysign.v4f32( <4 x float> %a, <4 x float> %b ) ret <4 x float> %tmp } define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { -; AVX512VL-LABEL: v8f32: -; AVX512VL: ## %bb.0: -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm1, %ymm1 -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0 -; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0 -; AVX512VL-NEXT: retq -; -; AVX512VLDQ-LABEL: v8f32: -; AVX512VLDQ: ## %bb.0: -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm1, %ymm1 -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to8}, %ymm0, %ymm0 -; AVX512VLDQ-NEXT: vorps %ymm1, %ymm0, %ymm0 -; AVX512VLDQ-NEXT: retq +; CHECK-LABEL: v8f32: +; CHECK: ## %bb.0: +; CHECK-NEXT: vbroadcastss {{.*#+}} ymm2 +; CHECK-NEXT: vandnps %ymm1, %ymm2, %ymm1 +; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq %tmp = tail call <8 x float> @llvm.copysign.v8f32( <8 x float> %a, <8 x float> %b ) ret <8 x float> %tmp } @@ -41,15 +29,17 @@ define <16 x float> @v16f32(<16 x float> %a, <16 x float> %b) nounwind { ; AVX512VL-LABEL: v16f32: ; AVX512VL: ## %bb.0: -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm1, %zmm1 -; AVX512VL-NEXT: vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512VL-NEXT: vpbroadcastd {{.*#+}} zmm2 +; AVX512VL-NEXT: vpandnd %zmm1, %zmm2, %zmm1 +; AVX512VL-NEXT: vpandd %zmm2, %zmm0, %zmm0 ; AVX512VL-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512VL-NEXT: retq ; ; AVX512VLDQ-LABEL: v16f32: ; AVX512VLDQ: ## %bb.0: -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm1, %zmm1 -; AVX512VLDQ-NEXT: vandps {{.*}}(%rip){1to16}, %zmm0, %zmm0 +; AVX512VLDQ-NEXT: vbroadcastss {{.*#+}} zmm2 +; AVX512VLDQ-NEXT: vandnps %zmm1, %zmm2, %zmm1 +; AVX512VLDQ-NEXT: vandps %zmm2, %zmm0, %zmm0 ; AVX512VLDQ-NEXT: vorps %zmm1, %zmm0, %zmm0 ; AVX512VLDQ-NEXT: retq %tmp = tail call <16 x float> @llvm.copysign.v16f32( <16 x float> %a, <16 x float> %b ) @@ -59,8 +49,9 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind { ; CHECK-LABEL: v2f64: ; CHECK: ## %bb.0: -; CHECK-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1 -; CHECK-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: vmovaps {{.*#+}} xmm2 +; CHECK-NEXT: vandnps %xmm1, %xmm2, %xmm1 +; CHECK-NEXT: vandps %xmm2, %xmm0, %xmm0 ; CHECK-NEXT: vorps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %tmp = tail call <2 x double> @llvm.copysign.v2f64( <2 x double> %a, <2 x double> %b ) @@ -68,19 +59,13 @@ } define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { -; AVX512VL-LABEL: v4f64: -; AVX512VL: ## %bb.0: -; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm1, %ymm1 -; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0 -; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0 -; AVX512VL-NEXT: retq -; -; AVX512VLDQ-LABEL: v4f64: -; AVX512VLDQ: ## %bb.0: -; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm1, %ymm1 -; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to4}, %ymm0, %ymm0 -; AVX512VLDQ-NEXT: vorpd %ymm1, %ymm0, %ymm0 -; AVX512VLDQ-NEXT: retq +; CHECK-LABEL: v4f64: +; CHECK: ## %bb.0: +; CHECK-NEXT: vbroadcastsd {{.*#+}} ymm2 +; CHECK-NEXT: vandnps %ymm1, %ymm2, %ymm1 +; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0 +; CHECK-NEXT: vorps %ymm1, %ymm0, %ymm0 +; CHECK-NEXT: retq %tmp = tail call <4 x double> @llvm.copysign.v4f64( <4 x double> %a, <4 x double> %b ) ret <4 x double> %tmp } @@ -88,16 +73,18 @@ define <8 x double> @v8f64(<8 x double> %a, <8 x double> %b) nounwind { ; AVX512VL-LABEL: v8f64: ; AVX512VL: ## %bb.0: -; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm1, %zmm1 -; AVX512VL-NEXT: vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0 +; AVX512VL-NEXT: vpbroadcastq {{.*#+}} zmm2 +; AVX512VL-NEXT: vpandnq %zmm1, %zmm2, %zmm1 +; AVX512VL-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VL-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VL-NEXT: retq ; ; AVX512VLDQ-LABEL: v8f64: ; AVX512VLDQ: ## %bb.0: -; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm1, %zmm1 -; AVX512VLDQ-NEXT: vandpd {{.*}}(%rip){1to8}, %zmm0, %zmm0 -; AVX512VLDQ-NEXT: vorpd %zmm1, %zmm0, %zmm0 +; AVX512VLDQ-NEXT: vbroadcastsd {{.*#+}} zmm2 +; AVX512VLDQ-NEXT: vandnps %zmm1, %zmm2, %zmm1 +; AVX512VLDQ-NEXT: vandps %zmm2, %zmm0, %zmm0 +; AVX512VLDQ-NEXT: vorps %zmm1, %zmm0, %zmm0 ; AVX512VLDQ-NEXT: retq %tmp = tail call <8 x double> @llvm.copysign.v8f64( <8 x double> %a, <8 x double> %b ) ret <8 x double> %tmp Index: test/CodeGen/X86/vec-copysign.ll =================================================================== --- test/CodeGen/X86/vec-copysign.ll +++ test/CodeGen/X86/vec-copysign.ll @@ -1,15 +1,9 @@ -; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 --check-prefix=CHECK -; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=CHECK +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2 +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX ; Assertions have been enhanced from utils/update_llc_test_checks.py to show the constant pool values. ; Use a macosx triple to make sure the format of those constant strings is exact. -; CHECK: [[SIGNMASK1:L.+]]: -; CHECK-NEXT: .long 2147483648 -; CHECK-NEXT: .long 2147483648 -; CHECK-NEXT: .long 2147483648 -; CHECK-NEXT: .long 2147483648 - ; CHECK: [[MAGMASK1:L.+]]: ; CHECK-NEXT: .long 2147483647 ; CHECK-NEXT: .long 2147483647 @@ -18,16 +12,18 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind { ; SSE2-LABEL: v4f32: -; SSE2: # %bb.0: -; SSE2-NEXT: andps [[SIGNMASK1]](%rip), %xmm1 -; SSE2-NEXT: andps [[MAGMASK1]](%rip), %xmm0 -; SSE2-NEXT: orps %xmm1, %xmm0 +; SSE2: ## %bb.0: +; SSE2-NEXT: movaps [[MAGMASK1]](%rip), %xmm2 +; SSE2-NEXT: andps %xmm2, %xmm0 +; SSE2-NEXT: andnps %xmm1, %xmm2 +; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: v4f32: -; AVX: # %bb.0: -; AVX-NEXT: vandps [[SIGNMASK1]](%rip), %xmm1, %xmm1 -; AVX-NEXT: vandps [[MAGMASK1]](%rip), %xmm0, %xmm0 +; AVX: ## %bb.0: +; AVX-NEXT: vmovaps [[MAGMASK1]](%rip), %xmm2 +; AVX-NEXT: vandnps %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; @@ -35,28 +31,12 @@ ret <4 x float> %tmp } -; SSE2: [[SIGNMASK2:L.+]]: -; SSE2-NEXT: .long 2147483648 -; SSE2-NEXT: .long 2147483648 -; SSE2-NEXT: .long 2147483648 -; SSE2-NEXT: .long 2147483648 - ; SSE2: [[MAGMASK2:L.+]]: ; SSE2-NEXT: .long 2147483647 ; SSE2-NEXT: .long 2147483647 ; SSE2-NEXT: .long 2147483647 ; SSE2-NEXT: .long 2147483647 -; AVX: [[SIGNMASK2:L.+]]: -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 -; AVX-NEXT: .long 2147483648 - ; AVX: [[MAGMASK2:L.+]]: ; AVX-NEXT: .long 2147483647 ; AVX-NEXT: .long 2147483647 @@ -69,21 +49,22 @@ define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind { ; SSE2-LABEL: v8f32: -; SSE2: # %bb.0: -; SSE2-NEXT: movaps [[SIGNMASK2]](%rip), %xmm4 -; SSE2-NEXT: andps %xmm4, %xmm2 -; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm5 -; SSE2-NEXT: andps %xmm5, %xmm0 -; SSE2-NEXT: orps %xmm2, %xmm0 -; SSE2-NEXT: andps %xmm4, %xmm3 -; SSE2-NEXT: andps %xmm5, %xmm1 -; SSE2-NEXT: orps %xmm3, %xmm1 +; SSE2: ## %bb.0: +; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm4 +; SSE2-NEXT: movaps %xmm4, %xmm5 +; SSE2-NEXT: andnps %xmm2, %xmm5 +; SSE2-NEXT: andps %xmm4, %xmm0 +; SSE2-NEXT: orps %xmm5, %xmm0 +; SSE2-NEXT: andps %xmm4, %xmm1 +; SSE2-NEXT: andnps %xmm3, %xmm4 +; SSE2-NEXT: orps %xmm4, %xmm1 ; SSE2-NEXT: retq ; ; AVX-LABEL: v8f32: -; AVX: # %bb.0: -; AVX-NEXT: vandps [[SIGNMASK2]](%rip), %ymm1, %ymm1 -; AVX-NEXT: vandps [[MAGMASK2]](%rip), %ymm0, %ymm0 +; AVX: ## %bb.0: +; AVX-NEXT: vmovaps [[MAGMASK2]](%rip), %ymm2 +; AVX-NEXT: vandnps %ymm1, %ymm2, %ymm1 +; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq ; @@ -91,26 +72,24 @@ ret <8 x float> %tmp } -; CHECK: [[SIGNMASK3:L.+]]: -; CHECK-NEXT: .quad -9223372036854775808 -; CHECK-NEXT: .quad -9223372036854775808 - ; CHECK: [[MAGMASK3:L.+]]: ; CHECK-NEXT: .quad 9223372036854775807 ; CHECK-NEXT: .quad 9223372036854775807 define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind { ; SSE2-LABEL: v2f64: -; SSE2: # %bb.0: -; SSE2-NEXT: andps [[SIGNMASK3]](%rip), %xmm1 -; SSE2-NEXT: andps [[MAGMASK3]](%rip), %xmm0 -; SSE2-NEXT: orps %xmm1, %xmm0 +; SSE2: ## %bb.0: +; SSE2-NEXT: movaps [[MAGMASK3]](%rip), %xmm2 +; SSE2-NEXT: andps %xmm2, %xmm0 +; SSE2-NEXT: andnps %xmm1, %xmm2 +; SSE2-NEXT: orps %xmm2, %xmm0 ; SSE2-NEXT: retq ; ; AVX-LABEL: v2f64: -; AVX: # %bb.0: -; AVX-NEXT: vandps [[SIGNMASK3]](%rip), %xmm1, %xmm1 -; AVX-NEXT: vandps [[MAGMASK3]](%rip), %xmm0, %xmm0 +; AVX: ## %bb.0: +; AVX-NEXT: vmovaps [[MAGMASK3]](%rip), %xmm2 +; AVX-NEXT: vandnps %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vorps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; @@ -118,20 +97,10 @@ ret <2 x double> %tmp } -; SSE2: [[SIGNMASK4:L.+]]: -; SSE2-NEXT: .quad -9223372036854775808 -; SSE2-NEXT: .quad -9223372036854775808 - ; SSE2: [[MAGMASK4:L.+]]: ; SSE2-NEXT: .quad 9223372036854775807 ; SSE2-NEXT: .quad 9223372036854775807 -; AVX: [[SIGNMASK4:L.+]]: -; AVX-NEXT: .quad -9223372036854775808 -; AVX-NEXT: .quad -9223372036854775808 -; AVX-NEXT: .quad -9223372036854775808 -; AVX-NEXT: .quad -9223372036854775808 - ; AVX: [[MAGMASK4:L.+]]: ; AVX-NEXT: .quad 9223372036854775807 ; AVX-NEXT: .quad 9223372036854775807 @@ -140,21 +109,22 @@ define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind { ; SSE2-LABEL: v4f64: -; SSE2: # %bb.0: -; SSE2-NEXT: movaps [[SIGNMASK4]](%rip), %xmm4 -; SSE2-NEXT: andps %xmm4, %xmm2 -; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm5 -; SSE2-NEXT: andps %xmm5, %xmm0 -; SSE2-NEXT: orps %xmm2, %xmm0 -; SSE2-NEXT: andps %xmm4, %xmm3 -; SSE2-NEXT: andps %xmm5, %xmm1 -; SSE2-NEXT: orps %xmm3, %xmm1 +; SSE2: ## %bb.0: +; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm4 +; SSE2-NEXT: movaps %xmm4, %xmm5 +; SSE2-NEXT: andnps %xmm2, %xmm5 +; SSE2-NEXT: andps %xmm4, %xmm0 +; SSE2-NEXT: orps %xmm5, %xmm0 +; SSE2-NEXT: andps %xmm4, %xmm1 +; SSE2-NEXT: andnps %xmm3, %xmm4 +; SSE2-NEXT: orps %xmm4, %xmm1 ; SSE2-NEXT: retq ; ; AVX-LABEL: v4f64: -; AVX: # %bb.0: -; AVX-NEXT: vandps [[SIGNMASK4]](%rip), %ymm1, %ymm1 -; AVX-NEXT: vandps [[MAGMASK4]](%rip), %ymm0, %ymm0 +; AVX: ## %bb.0: +; AVX-NEXT: vmovaps [[MAGMASK4]](%rip), %ymm2 +; AVX-NEXT: vandnps %ymm1, %ymm2, %ymm1 +; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq ; Index: test/CodeGen/X86/vector-bitreverse.ll =================================================================== --- test/CodeGen/X86/vector-bitreverse.ll +++ test/CodeGen/X86/vector-bitreverse.ll @@ -236,16 +236,16 @@ define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v16i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: psllw $4, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pandn %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: psllw $2, %xmm1 @@ -309,18 +309,17 @@ ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,5,4,7,6] +; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: psllw $4, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pandn %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: psllw $2, %xmm1 @@ -386,18 +385,17 @@ ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4] +; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: psllw $4, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pandn %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: psllw $2, %xmm1 @@ -465,18 +463,17 @@ ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4] +; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: pand %xmm0, %xmm2 ; SSE2-NEXT: psllw $4, %xmm2 -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm1, %xmm0 -; SSE2-NEXT: por %xmm2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pandn %xmm2, %xmm0 +; SSE2-NEXT: por %xmm1, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: pand %xmm0, %xmm1 ; SSE2-NEXT: psllw $2, %xmm1 @@ -535,57 +532,59 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v32i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $4, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm5, %xmm3 -; SSE2-NEXT: pand %xmm5, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pandn %xmm3, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm4 ; SSE2-NEXT: pand %xmm3, %xmm4 ; SSE2-NEXT: psllw $2, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; SSE2-NEXT: pand %xmm8, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: pand %xmm9, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] ; SSE2-NEXT: pand %xmm10, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: pand %xmm11, %xmm0 ; SSE2-NEXT: por %xmm4, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: pand %xmm4, %xmm7 -; SSE2-NEXT: psrlw $1, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; SSE2-NEXT: pand %xmm11, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] -; SSE2-NEXT: pand %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: psrlw $1, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; SSE2-NEXT: pand %xmm6, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: pand %xmm7, %xmm0 ; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: pand %xmm2, %xmm7 -; SSE2-NEXT: psllw $4, %xmm7 -; SSE2-NEXT: pand %xmm5, %xmm7 -; SSE2-NEXT: pand %xmm5, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: por %xmm7, %xmm1 +; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm5 +; SSE2-NEXT: pand %xmm1, %xmm5 +; SSE2-NEXT: psllw $4, %xmm5 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm5, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $2, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 -; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm9, %xmm3 ; SSE2-NEXT: pand %xmm10, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm11, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm4 ; SSE2-NEXT: psrlw $1, %xmm4 -; SSE2-NEXT: pand %xmm11, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm1 +; SSE2-NEXT: pand %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: retq @@ -686,74 +685,76 @@ define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v16i16: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $4, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm6, %xmm3 -; SSE2-NEXT: pand %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: pandn %xmm3, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; SSE2-NEXT: pand %xmm8, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: pand %xmm9, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] ; SSE2-NEXT: pand %xmm10, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: pand %xmm11, %xmm0 ; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: pand %xmm5, %xmm7 -; SSE2-NEXT: psrlw $1, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; SSE2-NEXT: pand %xmm11, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] -; SSE2-NEXT: pand %xmm12, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm5, %xmm6 +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: pand %xmm7, %xmm0 ; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,4,7,6] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm7, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: por %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm6 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6] +; SSE2-NEXT: packuswb %xmm6, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm1, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm4, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $2, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 -; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm9, %xmm3 ; SSE2-NEXT: pand %xmm10, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm11, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm5 ; SSE2-NEXT: psrlw $1, %xmm5 -; SSE2-NEXT: pand %xmm11, %xmm5 -; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pand %xmm12, %xmm5 +; SSE2-NEXT: pand %xmm7, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: retq @@ -862,74 +863,76 @@ define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v8i32: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $4, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm6, %xmm3 -; SSE2-NEXT: pand %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: pandn %xmm3, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; SSE2-NEXT: pand %xmm8, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: pand %xmm9, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] ; SSE2-NEXT: pand %xmm10, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: pand %xmm11, %xmm0 ; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: pand %xmm5, %xmm7 -; SSE2-NEXT: psrlw $1, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; SSE2-NEXT: pand %xmm11, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] -; SSE2-NEXT: pand %xmm12, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm5, %xmm6 +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: pand %xmm7, %xmm0 ; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm7, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: por %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm6 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] +; SSE2-NEXT: packuswb %xmm6, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm1, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm4, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $2, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 -; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm9, %xmm3 ; SSE2-NEXT: pand %xmm10, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm11, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm5 ; SSE2-NEXT: psrlw $1, %xmm5 -; SSE2-NEXT: pand %xmm11, %xmm5 -; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pand %xmm12, %xmm5 +; SSE2-NEXT: pand %xmm7, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: retq @@ -1038,78 +1041,80 @@ define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v4i64: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: pxor %xmm4, %xmm4 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm4[8],xmm2[9],xmm4[9],xmm2[10],xmm4[10],xmm2[11],xmm4[11],xmm2[12],xmm4[12],xmm2[13],xmm4[13],xmm2[14],xmm4[14],xmm2[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm1, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm3 -; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $4, %xmm3 -; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm6, %xmm3 -; SSE2-NEXT: pand %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: pandn %xmm3, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: pand %xmm8, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: pand %xmm1, %xmm0 +; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm5 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; SSE2-NEXT: pand %xmm8, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: pand %xmm9, %xmm5 +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] ; SSE2-NEXT: pand %xmm10, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: pand %xmm11, %xmm0 ; SSE2-NEXT: por %xmm5, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] -; SSE2-NEXT: movdqa %xmm0, %xmm7 -; SSE2-NEXT: pand %xmm5, %xmm7 -; SSE2-NEXT: psrlw $1, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; SSE2-NEXT: pand %xmm11, %xmm7 -; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] -; SSE2-NEXT: pand %xmm12, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: pand %xmm5, %xmm6 +; SSE2-NEXT: psrlw $1, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: pand %xmm7, %xmm0 ; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: por %xmm7, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm7 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm4[8],xmm7[9],xmm4[9],xmm7[10],xmm4[10],xmm7[11],xmm4[11],xmm7[12],xmm4[12],xmm7[13],xmm4[13],xmm7[14],xmm4[14],xmm7[15],xmm4[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,7,6,5,4] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm7, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pand %xmm2, %xmm4 +; SSE2-NEXT: por %xmm6, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm6 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm4[8],xmm6[9],xmm4[9],xmm6[10],xmm4[10],xmm6[11],xmm4[11],xmm6[12],xmm4[12],xmm6[13],xmm4[13],xmm6[14],xmm4[14],xmm6[15],xmm4[15] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] +; SSE2-NEXT: packuswb %xmm6, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm1, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm1, %xmm2 +; SSE2-NEXT: pandn %xmm4, %xmm1 +; SSE2-NEXT: por %xmm2, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm3 ; SSE2-NEXT: psllw $2, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 -; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm9, %xmm3 ; SSE2-NEXT: pand %xmm10, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm11, %xmm1 ; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: pand %xmm1, %xmm5 ; SSE2-NEXT: psrlw $1, %xmm5 -; SSE2-NEXT: pand %xmm11, %xmm5 -; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pand %xmm12, %xmm5 +; SSE2-NEXT: pand %xmm7, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm5, %xmm1 ; SSE2-NEXT: retq @@ -1218,103 +1223,107 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v64i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: movdqa %xmm3, %xmm14 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm13, %xmm5 +; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm7, %xmm5 -; SSE2-NEXT: pand %xmm7, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm5, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: pand %xmm9, %xmm0 ; SSE2-NEXT: psrlw $4, %xmm0 -; SSE2-NEXT: pand %xmm13, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm6 ; SSE2-NEXT: pand %xmm5, %xmm6 ; SSE2-NEXT: psllw $2, %xmm6 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] ; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: movdqa {{.*#+}} xmm9 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] -; SSE2-NEXT: pand %xmm9, %xmm0 -; SSE2-NEXT: psrlw $2, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [204,204,204,204,204,204,204,204,204,204,204,204,204,204,204,204] ; SSE2-NEXT: pand %xmm10, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; SSE2-NEXT: pand %xmm11, %xmm0 ; SSE2-NEXT: por %xmm6, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [170,170,170,170,170,170,170,170,170,170,170,170,170,170,170,170] -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: pand %xmm6, %xmm4 -; SSE2-NEXT: psrlw $1, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm11 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] -; SSE2-NEXT: pand %xmm11, %xmm4 -; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] -; SSE2-NEXT: pand %xmm12, %xmm0 +; SSE2-NEXT: movdqa %xmm0, %xmm7 +; SSE2-NEXT: pand %xmm6, %xmm7 +; SSE2-NEXT: psrlw $1, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm12 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; SSE2-NEXT: pand %xmm12, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85] +; SSE2-NEXT: pand %xmm13, %xmm0 ; SSE2-NEXT: paddb %xmm0, %xmm0 -; SSE2-NEXT: por %xmm4, %xmm0 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: pand %xmm13, %xmm4 -; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm1 +; SSE2-NEXT: por %xmm7, %xmm0 +; SSE2-NEXT: movdqa %xmm1, %xmm7 +; SSE2-NEXT: pand %xmm3, %xmm7 +; SSE2-NEXT: psllw $4, %xmm7 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm7, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm1 ; SSE2-NEXT: psrlw $4, %xmm1 -; SSE2-NEXT: pand %xmm13, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm5, %xmm4 ; SSE2-NEXT: psllw $2, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 -; SSE2-NEXT: pand %xmm9, %xmm1 -; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand %xmm10, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand %xmm11, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm4 ; SSE2-NEXT: pand %xmm6, %xmm4 ; SSE2-NEXT: psrlw $1, %xmm4 -; SSE2-NEXT: pand %xmm11, %xmm4 -; SSE2-NEXT: pand %xmm12, %xmm1 +; SSE2-NEXT: pand %xmm12, %xmm4 +; SSE2-NEXT: pand %xmm13, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: pand %xmm13, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm2 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: pandn %xmm4, %xmm7 +; SSE2-NEXT: pand %xmm9, %xmm2 ; SSE2-NEXT: psrlw $4, %xmm2 -; SSE2-NEXT: pand %xmm13, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: pand %xmm3, %xmm2 +; SSE2-NEXT: por %xmm7, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: pand %xmm5, %xmm4 ; SSE2-NEXT: psllw $2, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm4 -; SSE2-NEXT: pand %xmm9, %xmm2 -; SSE2-NEXT: psrlw $2, %xmm2 ; SSE2-NEXT: pand %xmm10, %xmm2 +; SSE2-NEXT: psrlw $2, %xmm2 +; SSE2-NEXT: pand %xmm11, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm4 ; SSE2-NEXT: pand %xmm6, %xmm4 ; SSE2-NEXT: psrlw $1, %xmm4 -; SSE2-NEXT: pand %xmm11, %xmm4 -; SSE2-NEXT: pand %xmm12, %xmm2 +; SSE2-NEXT: pand %xmm12, %xmm4 +; SSE2-NEXT: pand %xmm13, %xmm2 ; SSE2-NEXT: paddb %xmm2, %xmm2 ; SSE2-NEXT: por %xmm4, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: pand %xmm13, %xmm4 +; SSE2-NEXT: movdqa %xmm14, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 ; SSE2-NEXT: psllw $4, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm4 -; SSE2-NEXT: pand %xmm7, %xmm3 -; SSE2-NEXT: psrlw $4, %xmm3 -; SSE2-NEXT: pand %xmm13, %xmm3 -; SSE2-NEXT: por %xmm4, %xmm3 +; SSE2-NEXT: pand %xmm9, %xmm14 +; SSE2-NEXT: psrlw $4, %xmm14 +; SSE2-NEXT: pand %xmm3, %xmm14 +; SSE2-NEXT: pandn %xmm4, %xmm3 +; SSE2-NEXT: por %xmm14, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 ; SSE2-NEXT: pand %xmm8, %xmm5 -; SSE2-NEXT: pand %xmm9, %xmm3 -; SSE2-NEXT: psrlw $2, %xmm3 ; SSE2-NEXT: pand %xmm10, %xmm3 +; SSE2-NEXT: psrlw $2, %xmm3 +; SSE2-NEXT: pand %xmm11, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm6 ; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm11, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm3 +; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: pand %xmm13, %xmm3 ; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: retq @@ -1481,25 +1490,27 @@ define <32 x i16> @test_bitreverse_v32i16(<32 x i16> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v32i16: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm3, %xmm15 ; SSE2-NEXT: pxor %xmm14, %xmm14 -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm4, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm8, %xmm5 +; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: pandn %xmm5, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE2-NEXT: pand %xmm8, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm5, %xmm7 @@ -1531,74 +1542,76 @@ ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,4,7,6] ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 ; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm6, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm1 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand %xmm11, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6] +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: packuswb %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm2 ; SSE2-NEXT: psrlw $2, %xmm2 ; SSE2-NEXT: pand %xmm11, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm2 ; SSE2-NEXT: paddb %xmm2, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,2,4,5,6,7] +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm15, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,3,2,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,4,7,6] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm15[1,0,3,2,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,4,7,6] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,3,2,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,4,7,6] -; SSE2-NEXT: packuswb %xmm6, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: packuswb %xmm4, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: psrlw $4, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 @@ -1795,25 +1808,27 @@ define <16 x i32> @test_bitreverse_v16i32(<16 x i32> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v16i32: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm3, %xmm15 ; SSE2-NEXT: pxor %xmm14, %xmm14 -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm4, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm8, %xmm5 +; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: pandn %xmm5, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE2-NEXT: pand %xmm8, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm5, %xmm7 @@ -1845,74 +1860,76 @@ ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 ; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm6, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm1 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand %xmm11, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: packuswb %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm2 ; SSE2-NEXT: psrlw $2, %xmm2 ; SSE2-NEXT: pand %xmm11, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm2 ; SSE2-NEXT: paddb %xmm2, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm15, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] +; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm15[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm6, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: packuswb %xmm4, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: psrlw $4, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 @@ -2115,27 +2132,29 @@ define <8 x i64> @test_bitreverse_v8i64(<8 x i64> %a) nounwind { ; SSE2-LABEL: test_bitreverse_v8i64: ; SSE2: # %bb.0: +; SSE2-NEXT: movdqa %xmm3, %xmm15 ; SSE2-NEXT: pxor %xmm14, %xmm14 -; SSE2-NEXT: movdqa %xmm0, %xmm4 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15] +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm4, %xmm0 -; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] +; SSE2-NEXT: packuswb %xmm3, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] ; SSE2-NEXT: movdqa %xmm0, %xmm5 -; SSE2-NEXT: pand %xmm8, %xmm5 +; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; SSE2-NEXT: pand %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm4, %xmm0 -; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa %xmm3, %xmm7 +; SSE2-NEXT: pandn %xmm5, %xmm7 +; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE2-NEXT: pand %xmm8, %xmm0 -; SSE2-NEXT: por %xmm5, %xmm0 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: pand %xmm3, %xmm0 +; SSE2-NEXT: por %xmm7, %xmm0 ; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51] ; SSE2-NEXT: movdqa %xmm0, %xmm7 ; SSE2-NEXT: pand %xmm5, %xmm7 @@ -2169,78 +2188,80 @@ ; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,5,4] ; SSE2-NEXT: packuswb %xmm6, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 ; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm1 -; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm6, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: psrlw $4, %xmm1 +; SSE2-NEXT: pand %xmm3, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm1 ; SSE2-NEXT: psrlw $2, %xmm1 ; SSE2-NEXT: pand %xmm11, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm1, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm1 ; SSE2-NEXT: paddb %xmm1, %xmm1 -; SSE2-NEXT: por %xmm6, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm14[0],xmm2[1],xmm14[1],xmm2[2],xmm14[2],xmm2[3],xmm14[3],xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm2 -; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: packuswb %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 +; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm6 ; SSE2-NEXT: pand %xmm8, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm2 +; SSE2-NEXT: pand %xmm3, %xmm2 ; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm5, %xmm6 -; SSE2-NEXT: psllw $2, %xmm6 -; SSE2-NEXT: pand %xmm9, %xmm6 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm5, %xmm4 +; SSE2-NEXT: psllw $2, %xmm4 +; SSE2-NEXT: pand %xmm9, %xmm4 ; SSE2-NEXT: pand %xmm10, %xmm2 ; SSE2-NEXT: psrlw $2, %xmm2 ; SSE2-NEXT: pand %xmm11, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm2, %xmm6 -; SSE2-NEXT: pand %xmm7, %xmm6 -; SSE2-NEXT: psrlw $1, %xmm6 -; SSE2-NEXT: pand %xmm12, %xmm6 +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pand %xmm7, %xmm4 +; SSE2-NEXT: psrlw $1, %xmm4 +; SSE2-NEXT: pand %xmm12, %xmm4 ; SSE2-NEXT: pand %xmm13, %xmm2 ; SSE2-NEXT: paddb %xmm2, %xmm2 -; SSE2-NEXT: por %xmm6, %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm6 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,0,1] +; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm15, %xmm4 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm14[8],xmm4[9],xmm14[9],xmm4[10],xmm14[10],xmm4[11],xmm14[11],xmm4[12],xmm14[12],xmm4[13],xmm14[13],xmm4[14],xmm14[14],xmm4[15],xmm14[15] +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] +; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,2,1,0,4,5,6,7] +; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,5,4] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm15[2,3,0,1] ; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[3,2,1,0,4,5,6,7] ; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,6,5,4] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7] -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] -; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,2,1,0,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,6,5,4] -; SSE2-NEXT: packuswb %xmm6, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm6 +; SSE2-NEXT: packuswb %xmm4, %xmm6 +; SSE2-NEXT: movdqa %xmm6, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 ; SSE2-NEXT: pand %xmm8, %xmm6 -; SSE2-NEXT: psllw $4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm6 -; SSE2-NEXT: pand %xmm4, %xmm3 -; SSE2-NEXT: psrlw $4, %xmm3 -; SSE2-NEXT: pand %xmm8, %xmm3 +; SSE2-NEXT: psrlw $4, %xmm6 +; SSE2-NEXT: pand %xmm3, %xmm6 +; SSE2-NEXT: pandn %xmm4, %xmm3 ; SSE2-NEXT: por %xmm6, %xmm3 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: psllw $2, %xmm5 Index: test/CodeGen/X86/vector-fshl-128.ll =================================================================== --- test/CodeGen/X86/vector-fshl-128.ll +++ test/CodeGen/X86/vector-fshl-128.ll @@ -3007,27 +3007,30 @@ ; SSE-LABEL: splatconstant_funnnel_v16i8: ; SSE: # %bb.0: ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm1, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %xmm1, %xmm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; @@ -3041,10 +3044,11 @@ ; X32-SSE-LABEL: splatconstant_funnnel_v16i8: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> ) ret <16 x i8> %res Index: test/CodeGen/X86/vector-fshl-256.ll =================================================================== --- test/CodeGen/X86/vector-fshl-256.ll +++ test/CodeGen/X86/vector-fshl-256.ll @@ -2523,18 +2523,20 @@ ; AVX2-LABEL: splatconstant_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %ymm1, %ymm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq ; @@ -2556,10 +2558,8 @@ ; XOPAVX2-LABEL: splatconstant_funnnel_v32i8: ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1 -; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0 ; XOPAVX2-NEXT: retq %res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> ) ret <32 x i8> %res Index: test/CodeGen/X86/vector-fshl-512.ll =================================================================== --- test/CodeGen/X86/vector-fshl-512.ll +++ test/CodeGen/X86/vector-fshl-512.ll @@ -1504,68 +1504,70 @@ ; AVX512F-LABEL: splatconstant_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2 -; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2 -; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_funnnel_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_funnnel_v64i8: ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VBMI2-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VBMI2-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VLVBMI2-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLVBMI2-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLVBMI2-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: retq %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> ) Index: test/CodeGen/X86/vector-fshl-rot-128.ll =================================================================== --- test/CodeGen/X86/vector-fshl-rot-128.ll +++ test/CodeGen/X86/vector-fshl-rot-128.ll @@ -448,33 +448,34 @@ ; SSE2-LABEL: var_funnnel_v16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: por %xmm4, %xmm3 ; SSE2-NEXT: psllw $5, %xmm1 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $4, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE2-NEXT: por %xmm4, %xmm5 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: por %xmm3, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: psrlw $6, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: psllw $2, %xmm5 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: pandn %xmm2, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: psrlw $6, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: psllw $2, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: por %xmm2, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm2, %xmm4 -; SSE2-NEXT: pandn %xmm3, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm4, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: paddb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 @@ -493,21 +494,23 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: psrlw $4, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psllw $4, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm0, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: psllw $4, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: psrlw $6, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psllw $2, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm0, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: psllw $2, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 @@ -526,16 +529,18 @@ ; AVX-LABEL: var_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $4, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $6, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $2, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 @@ -624,33 +629,34 @@ ; X32-SSE-LABEL: var_funnnel_v16i8: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; X32-SSE-NEXT: movdqa %xmm2, %xmm4 +; X32-SSE-NEXT: psllw $4, %xmm4 +; X32-SSE-NEXT: pand %xmm3, %xmm4 +; X32-SSE-NEXT: pandn %xmm0, %xmm3 +; X32-SSE-NEXT: por %xmm4, %xmm3 ; X32-SSE-NEXT: psllw $5, %xmm1 ; X32-SSE-NEXT: pxor %xmm0, %xmm0 -; X32-SSE-NEXT: pxor %xmm3, %xmm3 -; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: movdqa %xmm2, %xmm5 -; X32-SSE-NEXT: psllw $4, %xmm5 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5 -; X32-SSE-NEXT: por %xmm4, %xmm5 +; X32-SSE-NEXT: pxor %xmm4, %xmm4 +; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm4 +; X32-SSE-NEXT: pand %xmm4, %xmm3 +; X32-SSE-NEXT: pandn %xmm2, %xmm4 +; X32-SSE-NEXT: por %xmm3, %xmm4 +; X32-SSE-NEXT: movdqa %xmm4, %xmm2 +; X32-SSE-NEXT: psrlw $6, %xmm2 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; X32-SSE-NEXT: movdqa %xmm4, %xmm5 +; X32-SSE-NEXT: psllw $2, %xmm5 ; X32-SSE-NEXT: pand %xmm3, %xmm5 ; X32-SSE-NEXT: pandn %xmm2, %xmm3 ; X32-SSE-NEXT: por %xmm5, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm2 -; X32-SSE-NEXT: psrlw $6, %xmm2 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: psllw $2, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: por %xmm2, %xmm4 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2 -; X32-SSE-NEXT: pand %xmm2, %xmm4 -; X32-SSE-NEXT: pandn %xmm3, %xmm2 -; X32-SSE-NEXT: por %xmm4, %xmm2 +; X32-SSE-NEXT: pand %xmm2, %xmm3 +; X32-SSE-NEXT: pandn %xmm4, %xmm2 +; X32-SSE-NEXT: por %xmm3, %xmm2 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3 ; X32-SSE-NEXT: paddb %xmm2, %xmm3 ; X32-SSE-NEXT: movdqa %xmm2, %xmm4 @@ -1822,27 +1828,30 @@ ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; @@ -1855,10 +1864,11 @@ ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> ) ret <16 x i8> %res Index: test/CodeGen/X86/vector-fshl-rot-256.ll =================================================================== --- test/CodeGen/X86/vector-fshl-rot-256.ll +++ test/CodeGen/X86/vector-fshl-rot-256.ll @@ -341,47 +341,45 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5 ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 ; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5 ; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4 -; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7 +; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7 ; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3 -; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4 -; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4 -; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4 ; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 @@ -392,16 +390,18 @@ ; AVX2-LABEL: var_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -416,16 +416,18 @@ ; AVX512F-LABEL: var_funnnel_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -440,16 +442,18 @@ ; AVX512VL-LABEL: var_funnnel_v32i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -1475,16 +1479,15 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1492,18 +1495,20 @@ ; AVX2-LABEL: splatconstant_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq ; Index: test/CodeGen/X86/vector-fshl-rot-512.ll =================================================================== --- test/CodeGen/X86/vector-fshl-rot-512.ll +++ test/CodeGen/X86/vector-fshl-rot-512.ll @@ -111,49 +111,47 @@ ; AVX512F-LABEL: var_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] ; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 ; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512F-NEXT: vpand %ymm10, %ymm9, %ymm9 -; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm4, %ymm7, %ymm4 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8 +; AVX512F-NEXT: vpand %ymm7, %ymm8, %ymm8 +; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm11 -; AVX512F-NEXT: vpor %ymm4, %ymm11, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm9 +; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm7, %ymm2 ; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -163,49 +161,47 @@ ; AVX512VL-LABEL: var_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] ; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2 ; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512VL-NEXT: vpand %ymm10, %ymm9, %ymm9 -; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm4, %ymm7, %ymm4 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8 +; AVX512VL-NEXT: vpand %ymm7, %ymm8, %ymm8 +; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4 -; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm11 -; AVX512VL-NEXT: vpor %ymm4, %ymm11, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4 +; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm9 +; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm7, %ymm2 ; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2 +; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -776,50 +772,50 @@ ; AVX512F-LABEL: splatconstant_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_funnnel_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq %res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> ) Index: test/CodeGen/X86/vector-fshr-128.ll =================================================================== --- test/CodeGen/X86/vector-fshr-128.ll +++ test/CodeGen/X86/vector-fshr-128.ll @@ -3026,27 +3026,30 @@ ; SSE-LABEL: splatconstant_funnnel_v16i8: ; SSE: # %bb.0: ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm1, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %xmm1, %xmm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; @@ -3060,10 +3063,11 @@ ; X32-SSE-LABEL: splatconstant_funnnel_v16i8: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> ) ret <16 x i8> %res Index: test/CodeGen/X86/vector-fshr-256.ll =================================================================== --- test/CodeGen/X86/vector-fshr-256.ll +++ test/CodeGen/X86/vector-fshr-256.ll @@ -2536,18 +2536,20 @@ ; AVX2-LABEL: splatconstant_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm1, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %ymm1, %ymm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq ; @@ -2569,10 +2571,8 @@ ; XOPAVX2-LABEL: splatconstant_funnnel_v32i8: ; XOPAVX2: # %bb.0: ; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1 -; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 ; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 +; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0 ; XOPAVX2-NEXT: retq %res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> ) ret <32 x i8> %res Index: test/CodeGen/X86/vector-fshr-512.ll =================================================================== --- test/CodeGen/X86/vector-fshr-512.ll +++ test/CodeGen/X86/vector-fshr-512.ll @@ -1496,68 +1496,70 @@ ; AVX512F-LABEL: splatconstant_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2 -; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2 -; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_funnnel_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VBMI2-LABEL: splatconstant_funnnel_v64i8: ; AVX512VBMI2: # %bb.0: ; AVX512VBMI2-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VBMI2-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VBMI2-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VBMI2-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VBMI2-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VBMI2-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VBMI2-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq ; ; AVX512VLVBMI2-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLVBMI2: # %bb.0: ; AVX512VLVBMI2-NEXT: vpsrlw $4, %zmm1, %zmm1 -; AVX512VLVBMI2-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLVBMI2-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLVBMI2-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLVBMI2-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLVBMI2-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLVBMI2-NEXT: retq %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> ) Index: test/CodeGen/X86/vector-fshr-rot-128.ll =================================================================== --- test/CodeGen/X86/vector-fshr-rot-128.ll +++ test/CodeGen/X86/vector-fshr-rot-128.ll @@ -483,29 +483,30 @@ ; SSE2-LABEL: var_funnnel_v16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: movdqa %xmm2, %xmm3 +; SSE2-NEXT: psllw $4, %xmm3 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm0, %xmm4 +; SSE2-NEXT: por %xmm3, %xmm4 ; SSE2-NEXT: pxor %xmm0, %xmm0 ; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: psubb %xmm1, %xmm3 ; SSE2-NEXT: psllw $5, %xmm3 ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm1 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $4, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE2-NEXT: por %xmm4, %xmm5 -; SSE2-NEXT: pand %xmm1, %xmm5 +; SSE2-NEXT: pand %xmm1, %xmm4 ; SSE2-NEXT: pandn %xmm2, %xmm1 -; SSE2-NEXT: por %xmm5, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 ; SSE2-NEXT: movdqa %xmm1, %xmm2 ; SSE2-NEXT: psrlw $6, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm1, %xmm4 -; SSE2-NEXT: psllw $2, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: por %xmm2, %xmm4 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: movdqa %xmm1, %xmm5 +; SSE2-NEXT: psllw $2, %xmm5 +; SSE2-NEXT: pand %xmm4, %xmm5 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: por %xmm5, %xmm4 ; SSE2-NEXT: paddb %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm2 @@ -529,22 +530,24 @@ ; SSE41: # %bb.0: ; SSE41-NEXT: movdqa %xmm0, %xmm2 ; SSE41-NEXT: psrlw $4, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psllw $4, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm0, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psllw $4, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: pxor %xmm0, %xmm0 ; SSE41-NEXT: psubb %xmm1, %xmm0 ; SSE41-NEXT: psllw $5, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 ; SSE41-NEXT: psrlw $6, %xmm1 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 -; SSE41-NEXT: movdqa %xmm2, %xmm3 -; SSE41-NEXT: psllw $2, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm1, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: psllw $2, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm1, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: paddb %xmm0, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm1 @@ -561,18 +564,20 @@ ; AVX-LABEL: var_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $4, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX-NEXT: vpsubb %xmm1, %xmm3, %xmm1 ; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $6, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $2, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 @@ -663,29 +668,30 @@ ; X32-SSE-LABEL: var_funnnel_v16i8: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; X32-SSE-NEXT: movdqa %xmm2, %xmm3 +; X32-SSE-NEXT: psllw $4, %xmm3 +; X32-SSE-NEXT: pand %xmm4, %xmm3 +; X32-SSE-NEXT: pandn %xmm0, %xmm4 +; X32-SSE-NEXT: por %xmm3, %xmm4 ; X32-SSE-NEXT: pxor %xmm0, %xmm0 ; X32-SSE-NEXT: pxor %xmm3, %xmm3 ; X32-SSE-NEXT: psubb %xmm1, %xmm3 ; X32-SSE-NEXT: psllw $5, %xmm3 ; X32-SSE-NEXT: pxor %xmm1, %xmm1 ; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm1 -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: movdqa %xmm2, %xmm5 -; X32-SSE-NEXT: psllw $4, %xmm5 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5 -; X32-SSE-NEXT: por %xmm4, %xmm5 -; X32-SSE-NEXT: pand %xmm1, %xmm5 +; X32-SSE-NEXT: pand %xmm1, %xmm4 ; X32-SSE-NEXT: pandn %xmm2, %xmm1 -; X32-SSE-NEXT: por %xmm5, %xmm1 +; X32-SSE-NEXT: por %xmm4, %xmm1 ; X32-SSE-NEXT: movdqa %xmm1, %xmm2 ; X32-SSE-NEXT: psrlw $6, %xmm2 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa %xmm1, %xmm4 -; X32-SSE-NEXT: psllw $2, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: por %xmm2, %xmm4 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm4 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; X32-SSE-NEXT: movdqa %xmm1, %xmm5 +; X32-SSE-NEXT: psllw $2, %xmm5 +; X32-SSE-NEXT: pand %xmm4, %xmm5 +; X32-SSE-NEXT: pandn %xmm2, %xmm4 +; X32-SSE-NEXT: por %xmm5, %xmm4 ; X32-SSE-NEXT: paddb %xmm3, %xmm3 ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: pcmpgtb %xmm3, %xmm2 @@ -1905,27 +1911,30 @@ ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_funnnel_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: retq ; @@ -1938,10 +1947,11 @@ ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> ) ret <16 x i8> %res Index: test/CodeGen/X86/vector-fshr-rot-256.ll =================================================================== --- test/CodeGen/X86/vector-fshr-rot-256.ll +++ test/CodeGen/X86/vector-fshr-rot-256.ll @@ -372,50 +372,48 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5 ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 -; AVX1-NEXT: vpxor %xmm10, %xmm10, %xmm10 -; AVX1-NEXT: vpsubb %xmm5, %xmm10, %xmm5 +; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8 +; AVX1-NEXT: vpsubb %xmm5, %xmm8, %xmm5 ; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5 ; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6 ; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6 ; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 ; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 -; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4 -; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4 -; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6 +; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4 -; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpsubb %xmm1, %xmm10, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm8, %xmm1 ; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3 ; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4 ; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 @@ -426,18 +424,20 @@ ; AVX2-LABEL: var_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: vpsubb %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -452,18 +452,20 @@ ; AVX512F-LABEL: var_funnnel_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512F-NEXT: vpsubb %ymm1, %ymm3, %ymm1 ; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -478,18 +480,20 @@ ; AVX512VL-LABEL: var_funnnel_v32i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX512VL-NEXT: vpsubb %ymm1, %ymm3, %ymm1 ; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -1552,16 +1556,15 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1569,18 +1572,20 @@ ; AVX2-LABEL: splatconstant_funnnel_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: splatconstant_funnnel_v32i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: retq ; Index: test/CodeGen/X86/vector-fshr-rot-512.ll =================================================================== --- test/CodeGen/X86/vector-fshr-rot-512.ll +++ test/CodeGen/X86/vector-fshr-rot-512.ll @@ -117,52 +117,50 @@ ; AVX512F-LABEL: var_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6 ; AVX512F-NEXT: vpsubb %ymm2, %ymm6, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2 ; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm10 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512F-NEXT: vpand %ymm11, %ymm10, %ymm10 -; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm4, %ymm8, %ymm4 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9 +; AVX512F-NEXT: vpand %ymm8, %ymm9, %ymm9 +; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm12 -; AVX512F-NEXT: vpor %ymm4, %ymm12, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10 +; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsubb %ymm3, %ymm6, %ymm3 -; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3 +; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm8, %ymm2 ; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm11, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm10, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -172,52 +170,50 @@ ; AVX512VL-LABEL: var_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512VL-NEXT: vpxor %xmm6, %xmm6, %xmm6 ; AVX512VL-NEXT: vpsubb %ymm2, %ymm6, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] -; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2 ; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm10 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512VL-NEXT: vpand %ymm11, %ymm10, %ymm10 -; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm4, %ymm8, %ymm4 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9 +; AVX512VL-NEXT: vpand %ymm8, %ymm9, %ymm9 +; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4 -; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm12 -; AVX512VL-NEXT: vpor %ymm4, %ymm12, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4 +; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10 +; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsubb %ymm3, %ymm6, %ymm3 -; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3 +; AVX512VL-NEXT: vpand %ymm7, %ymm3, %ymm3 ; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm8, %ymm2 ; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm11, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm10, %ymm2, %ymm2 +; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -796,50 +792,50 @@ ; AVX512F-LABEL: splatconstant_funnnel_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_funnnel_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_funnnel_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm1 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_funnnel_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpandnq %zmm1, %zmm2, %zmm1 ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm0, %zmm0 ; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512VLBW-NEXT: retq %res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> ) Index: test/CodeGen/X86/vector-rotate-128.ll =================================================================== --- test/CodeGen/X86/vector-rotate-128.ll +++ test/CodeGen/X86/vector-rotate-128.ll @@ -437,33 +437,34 @@ ; SSE2-LABEL: var_rotate_v16i8: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: psrlw $4, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: psllw $4, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm0, %xmm3 +; SSE2-NEXT: por %xmm4, %xmm3 ; SSE2-NEXT: psllw $5, %xmm1 ; SSE2-NEXT: pxor %xmm0, %xmm0 -; SSE2-NEXT: pxor %xmm3, %xmm3 -; SSE2-NEXT: pcmpgtb %xmm1, %xmm3 -; SSE2-NEXT: movdqa %xmm2, %xmm4 -; SSE2-NEXT: psrlw $4, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: movdqa %xmm2, %xmm5 -; SSE2-NEXT: psllw $4, %xmm5 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm5 -; SSE2-NEXT: por %xmm4, %xmm5 +; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pcmpgtb %xmm1, %xmm4 +; SSE2-NEXT: pand %xmm4, %xmm3 +; SSE2-NEXT: pandn %xmm2, %xmm4 +; SSE2-NEXT: por %xmm3, %xmm4 +; SSE2-NEXT: movdqa %xmm4, %xmm2 +; SSE2-NEXT: psrlw $6, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: psllw $2, %xmm5 ; SSE2-NEXT: pand %xmm3, %xmm5 ; SSE2-NEXT: pandn %xmm2, %xmm3 ; SSE2-NEXT: por %xmm5, %xmm3 -; SSE2-NEXT: movdqa %xmm3, %xmm2 -; SSE2-NEXT: psrlw $6, %xmm2 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 -; SSE2-NEXT: movdqa %xmm3, %xmm4 -; SSE2-NEXT: psllw $2, %xmm4 -; SSE2-NEXT: pand {{.*}}(%rip), %xmm4 -; SSE2-NEXT: por %xmm2, %xmm4 ; SSE2-NEXT: paddb %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm2 -; SSE2-NEXT: pand %xmm2, %xmm4 -; SSE2-NEXT: pandn %xmm3, %xmm2 -; SSE2-NEXT: por %xmm4, %xmm2 +; SSE2-NEXT: pand %xmm2, %xmm3 +; SSE2-NEXT: pandn %xmm4, %xmm2 +; SSE2-NEXT: por %xmm3, %xmm2 ; SSE2-NEXT: movdqa %xmm2, %xmm3 ; SSE2-NEXT: paddb %xmm2, %xmm3 ; SSE2-NEXT: movdqa %xmm2, %xmm4 @@ -482,21 +483,23 @@ ; SSE41-NEXT: movdqa %xmm1, %xmm2 ; SSE41-NEXT: movdqa %xmm0, %xmm1 ; SSE41-NEXT: psrlw $4, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psllw $4, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm0, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: psllw $4, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: psllw $5, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: psrlw $6, %xmm0 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE41-NEXT: movdqa %xmm1, %xmm3 -; SSE41-NEXT: psllw $2, %xmm3 -; SSE41-NEXT: pand {{.*}}(%rip), %xmm3 -; SSE41-NEXT: por %xmm0, %xmm3 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: psllw $2, %xmm4 +; SSE41-NEXT: pand %xmm3, %xmm4 +; SSE41-NEXT: pandn %xmm0, %xmm3 +; SSE41-NEXT: por %xmm4, %xmm3 ; SSE41-NEXT: paddb %xmm2, %xmm2 ; SSE41-NEXT: movdqa %xmm2, %xmm0 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm1 @@ -515,16 +518,18 @@ ; AVX-LABEL: var_rotate_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $4, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $4, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpsrlw $6, %xmm0, %xmm2 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vpsllw $2, %xmm0, %xmm3 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm3, %xmm3 +; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX-NEXT: vpandn %xmm2, %xmm3, %xmm2 +; AVX-NEXT: vpsllw $2, %xmm0, %xmm4 +; AVX-NEXT: vpand %xmm3, %xmm4, %xmm3 ; AVX-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0 @@ -601,33 +606,34 @@ ; X32-SSE-LABEL: var_rotate_v16i8: ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm2 +; X32-SSE-NEXT: psrlw $4, %xmm0 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; X32-SSE-NEXT: movdqa %xmm2, %xmm4 +; X32-SSE-NEXT: psllw $4, %xmm4 +; X32-SSE-NEXT: pand %xmm3, %xmm4 +; X32-SSE-NEXT: pandn %xmm0, %xmm3 +; X32-SSE-NEXT: por %xmm4, %xmm3 ; X32-SSE-NEXT: psllw $5, %xmm1 ; X32-SSE-NEXT: pxor %xmm0, %xmm0 -; X32-SSE-NEXT: pxor %xmm3, %xmm3 -; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm3 -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: psrlw $4, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: movdqa %xmm2, %xmm5 -; X32-SSE-NEXT: psllw $4, %xmm5 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm5 -; X32-SSE-NEXT: por %xmm4, %xmm5 +; X32-SSE-NEXT: pxor %xmm4, %xmm4 +; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm4 +; X32-SSE-NEXT: pand %xmm4, %xmm3 +; X32-SSE-NEXT: pandn %xmm2, %xmm4 +; X32-SSE-NEXT: por %xmm3, %xmm4 +; X32-SSE-NEXT: movdqa %xmm4, %xmm2 +; X32-SSE-NEXT: psrlw $6, %xmm2 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; X32-SSE-NEXT: movdqa %xmm4, %xmm5 +; X32-SSE-NEXT: psllw $2, %xmm5 ; X32-SSE-NEXT: pand %xmm3, %xmm5 ; X32-SSE-NEXT: pandn %xmm2, %xmm3 ; X32-SSE-NEXT: por %xmm5, %xmm3 -; X32-SSE-NEXT: movdqa %xmm3, %xmm2 -; X32-SSE-NEXT: psrlw $6, %xmm2 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: psllw $2, %xmm4 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: por %xmm2, %xmm4 ; X32-SSE-NEXT: paddb %xmm1, %xmm1 ; X32-SSE-NEXT: pxor %xmm2, %xmm2 ; X32-SSE-NEXT: pcmpgtb %xmm1, %xmm2 -; X32-SSE-NEXT: pand %xmm2, %xmm4 -; X32-SSE-NEXT: pandn %xmm3, %xmm2 -; X32-SSE-NEXT: por %xmm4, %xmm2 +; X32-SSE-NEXT: pand %xmm2, %xmm3 +; X32-SSE-NEXT: pandn %xmm4, %xmm2 +; X32-SSE-NEXT: por %xmm3, %xmm2 ; X32-SSE-NEXT: movdqa %xmm2, %xmm3 ; X32-SSE-NEXT: paddb %xmm2, %xmm3 ; X32-SSE-NEXT: movdqa %xmm2, %xmm4 @@ -1788,27 +1794,30 @@ ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_rotate_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: splatconstant_rotate_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsllw $4, %xmm0, %xmm1 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpsrlw $4, %xmm0, %xmm0 -; AVX512-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX512-NEXT: vpandn %xmm0, %xmm2, %xmm0 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: retq ; @@ -1821,10 +1830,11 @@ ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: retl %shl = shl <16 x i8> %a, %lshr = lshr <16 x i8> %a, @@ -2017,19 +2027,21 @@ ; SSE: # %bb.0: ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: psrlw $4, %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; SSE-NEXT: psllw $4, %xmm0 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: pand %xmm2, %xmm0 +; SSE-NEXT: pandn %xmm1, %xmm2 +; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splatconstant_rotate_mask_v16i8: ; AVX: # %bb.0: ; AVX-NEXT: vpsrlw $4, %xmm0, %xmm1 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq @@ -2053,10 +2065,11 @@ ; X32-SSE: # %bb.0: ; X32-SSE-NEXT: movdqa %xmm0, %xmm1 ; X32-SSE-NEXT: psrlw $4, %xmm1 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm1 +; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] ; X32-SSE-NEXT: psllw $4, %xmm0 -; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: por %xmm1, %xmm0 +; X32-SSE-NEXT: pand %xmm2, %xmm0 +; X32-SSE-NEXT: pandn %xmm1, %xmm2 +; X32-SSE-NEXT: por %xmm2, %xmm0 ; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0 ; X32-SSE-NEXT: retl %shl = shl <16 x i8> %a, Index: test/CodeGen/X86/vector-rotate-256.ll =================================================================== --- test/CodeGen/X86/vector-rotate-256.ll +++ test/CodeGen/X86/vector-rotate-256.ll @@ -337,47 +337,45 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5 ; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5 ; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5 ; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 -; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4 -; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7 +; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7 +; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7 ; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3 -; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4 -; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5 +; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 -; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4 -; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4 +; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5 +; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3 +; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3 ; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4 ; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 ; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3 -; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 ; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4 ; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1 @@ -388,16 +386,18 @@ ; AVX2-LABEL: var_rotate_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX2-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX2-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX2-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -412,16 +412,18 @@ ; AVX512F-LABEL: var_rotate_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512F-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512F-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -436,16 +438,18 @@ ; AVX512VL-LABEL: var_rotate_v32i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $5, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm2 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm3 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm3, %ymm3 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm4 +; AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3 ; AVX512VL-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm1 ; AVX512VL-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 @@ -1476,16 +1480,15 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -1493,45 +1496,50 @@ ; AVX2-LABEL: splatconstant_rotate_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512F-LABEL: splatconstant_rotate_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_v32i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_rotate_v32i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512BW-NEXT: vpsrlw $4, %ymm0, %ymm0 -; AVX512BW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512BW-NEXT: vpandn %ymm0, %ymm2, %ymm0 ; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_v32i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $4, %ymm0, %ymm1 -; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512VLBW-NEXT: vpsrlw $4, %ymm0, %ymm0 -; AVX512VLBW-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VLBW-NEXT: vpandn %ymm0, %ymm2, %ymm0 ; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX512VLBW-NEXT: retq ; @@ -1760,16 +1768,15 @@ ; AVX1: # %bb.0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1 ; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2 -; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2 ; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0 -; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 @@ -1778,9 +1785,10 @@ ; AVX2-LABEL: splatconstant_rotate_mask_v32i8: ; AVX2: # %bb.0: ; AVX2-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq @@ -1788,9 +1796,10 @@ ; AVX512F-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512F-NEXT: retq @@ -1798,9 +1807,10 @@ ; AVX512VL-LABEL: splatconstant_rotate_mask_v32i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm1 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX512VL-NEXT: retq Index: test/CodeGen/X86/vector-rotate-512.ll =================================================================== --- test/CodeGen/X86/vector-rotate-512.ll +++ test/CodeGen/X86/vector-rotate-512.ll @@ -109,46 +109,44 @@ ; AVX512F-LABEL: var_rotate_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4 -; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8 -; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512F-NEXT: vpandn %ymm4, %ymm6, %ymm4 +; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7 +; AVX512F-NEXT: vpand %ymm6, %ymm7, %ymm7 +; AVX512F-NEXT: vpor %ymm4, %ymm7, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4 -; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10 -; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm8 +; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm6, %ymm2 ; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4 +; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2 ; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -158,46 +156,44 @@ ; AVX512VL-LABEL: var_rotate_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6 +; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6 ; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4 ; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3] -; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4 -; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] -; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8 -; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252] +; AVX512VL-NEXT: vpandn %ymm4, %ymm6, %ymm4 +; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7 +; AVX512VL-NEXT: vpand %ymm6, %ymm7, %ymm7 +; AVX512VL-NEXT: vpor %ymm4, %ymm7, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] -; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4 -; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10 -; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm8 +; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2 ; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm6, %ymm2 ; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4 -; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4 +; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 ; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2 +; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4 ; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3 @@ -767,50 +763,50 @@ ; AVX512F-LABEL: splatconstant_rotate_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2 -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1 ; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ; ; AVX512BW-LABEL: splatconstant_rotate_v64i8: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512BW-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512BW-NEXT: vpsrlw $4, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512BW-NEXT: vpandnq %zmm0, %zmm2, %zmm0 ; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0 ; AVX512BW-NEXT: retq ; ; AVX512VLBW-LABEL: splatconstant_rotate_v64i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: vpsllw $4, %zmm0, %zmm1 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm1, %zmm1 +; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VLBW-NEXT: vpandq %zmm2, %zmm1, %zmm1 ; AVX512VLBW-NEXT: vpsrlw $4, %zmm0, %zmm0 -; AVX512VLBW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0 +; AVX512VLBW-NEXT: vpandnq %zmm0, %zmm2, %zmm0 ; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0 ; AVX512VLBW-NEXT: retq %shl = shl <64 x i8> %a, @@ -907,38 +903,36 @@ ; AVX512F-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39] ; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5 -; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3 +; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm4 +; AVX512F-NEXT: vpandn %ymm4, %ymm3, %ymm4 ; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1 -; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1 +; AVX512F-NEXT: vpor %ymm4, %ymm1, %ymm1 ; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8: ; AVX512VL: # %bb.0: ; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] +; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2 ; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0 -; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240] -; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0 +; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0 ; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39] ; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0 -; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5 -; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3 +; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm4 +; AVX512VL-NEXT: vpandn %ymm4, %ymm3, %ymm4 ; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1 -; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1 +; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1 +; AVX512VL-NEXT: vpor %ymm4, %ymm1, %ymm1 ; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1 ; AVX512VL-NEXT: retq ;