Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -834,6 +834,8 @@ setOperationAction(ISD::MUL, MVT::v2i64, Custom); setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom); setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom); + setOperationAction(ISD::MULHU, MVT::v16i8, Custom); + setOperationAction(ISD::MULHS, MVT::v16i8, Custom); setOperationAction(ISD::MULHU, MVT::v8i16, Legal); setOperationAction(ISD::MULHS, MVT::v8i16, Legal); setOperationAction(ISD::SUB, MVT::v16i8, Legal); @@ -1193,8 +1195,11 @@ setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom); setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom); + setOperationAction(ISD::MULHU, MVT::v16i16, Legal); setOperationAction(ISD::MULHS, MVT::v16i16, Legal); + setOperationAction(ISD::MULHU, MVT::v32i8, Custom); + setOperationAction(ISD::MULHS, MVT::v32i8, Custom); setOperationAction(ISD::SMAX, MVT::v32i8, Legal); setOperationAction(ISD::SMAX, MVT::v16i16, Legal); @@ -1247,6 +1252,11 @@ setOperationAction(ISD::MUL, MVT::v16i16, Custom); setOperationAction(ISD::MUL, MVT::v32i8, Custom); + setOperationAction(ISD::MULHU, MVT::v16i16, Custom); + setOperationAction(ISD::MULHS, MVT::v16i16, Custom); + setOperationAction(ISD::MULHU, MVT::v32i8, Custom); + setOperationAction(ISD::MULHS, MVT::v32i8, Custom); + setOperationAction(ISD::SMAX, MVT::v32i8, Custom); setOperationAction(ISD::SMAX, MVT::v16i16, Custom); setOperationAction(ISD::SMAX, MVT::v8i32, Custom); @@ -19036,6 +19046,119 @@ return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo); } +static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget, + SelectionDAG &DAG) { + SDLoc dl(Op); + MVT VT = Op.getSimpleValueType(); + + // Decompose 256-bit ops into smaller 128-bit ops. + if (VT.is256BitVector() && !Subtarget.hasInt256()) + return Lower256IntArith(Op, DAG); + + // Only i8 vectors should need custom lowering after this. + assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256())) && + "Unsupported vector type"); + + // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply, + // logical shift down the upper half and pack back to i8. + SDValue A = Op.getOperand(0); + SDValue B = Op.getOperand(1); + + // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack + // and then ashr/lshr the upper bits down to the lower bits before multiply. + unsigned Opcode = Op.getOpcode(); + unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA); + unsigned ExSSE41 = (ISD::MULHU == Opcode ? X86ISD::VZEXT : X86ISD::VSEXT); + + // AVX2 implementations - extend xmm subvectors to ymm. + if (Subtarget.hasInt256()) { + SDValue Lo = DAG.getIntPtrConstant(0, dl); + SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2, dl); + + if (VT == MVT::v32i8) { + SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Lo); + SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Lo); + SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, A, Hi); + SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v16i8, B, Hi); + ALo = DAG.getNode(ExSSE41, dl, MVT::v16i16, ALo); + BLo = DAG.getNode(ExSSE41, dl, MVT::v16i16, BLo); + AHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, AHi); + BHi = DAG.getNode(ExSSE41, dl, MVT::v16i16, BHi); + Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16, + DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo), + DAG.getConstant(8, dl, MVT::v16i16)); + Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16, + DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi), + DAG.getConstant(8, dl, MVT::v16i16)); + // Before using ymm PACKUS we need to permute inputs to lower/upper xmm. + const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7, + 16, 17, 18, 19, 20, 21, 22, 23}; + const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15, + 24, 25, 26, 27, 28, 29, 30, 31}; + return DAG.getNode(X86ISD::PACKUS, dl, VT, + DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask), + DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask)); + } + + SDValue ExA = DAG.getNode(ExSSE41, dl, MVT::v16i16, A); + SDValue ExB = DAG.getNode(ExSSE41, dl, MVT::v16i16, B); + SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB); + SDValue MulH = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul, + DAG.getConstant(8, dl, MVT::v16i16)); + Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Lo); + Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, MulH, Hi); + return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); + } + + assert(VT == MVT::v16i8 && + "Pre-AVX2 support only supports v16i8 multiplication"); + MVT ExVT = MVT::v8i16; + + // Extract the lo parts and zero/sign extend to i16 + SDValue ALo, BLo; + if (Subtarget.hasSSE41()) { + ALo = DAG.getNode(ExSSE41, dl, ExVT, A); + BLo = DAG.getNode(ExSSE41, dl, ExVT, B); + } else { + const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3, + -1, 4, -1, 5, -1, 6, -1, 7}; + ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); + BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); + ALo = DAG.getBitcast(ExVT, ALo); + BLo = DAG.getBitcast(ExVT, BLo); + ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT)); + BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT)); + } + + // Extract the hi parts and zero/sign extend to i16 + SDValue AHi, BHi; + if (Subtarget.hasSSE41()) { + const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15, + -1, -1, -1, -1, -1, -1, -1, -1}; + AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); + BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); + AHi = DAG.getNode(ExSSE41, dl, ExVT, AHi); + BHi = DAG.getNode(ExSSE41, dl, ExVT, BHi); + } else { + const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11, + -1, 12, -1, 13, -1, 14, -1, 15}; + AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask); + BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask); + AHi = DAG.getBitcast(ExVT, AHi); + BHi = DAG.getBitcast(ExVT, BHi); + AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT)); + BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT)); + } + + // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and + // pack back to v16i8. + SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); + SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); + RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT)); + RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT)); + return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); +} + SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const { assert(Subtarget.isTargetWin64() && "Unexpected target"); EVT VT = Op.getValueType(); @@ -21075,6 +21198,8 @@ case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG); case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); + case ISD::MULHS: + case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG); case ISD::UMUL_LOHI: case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG); case ISD::ROTL: return LowerRotate(Op, Subtarget, DAG); Index: test/CodeGen/X86/vector-idiv-sdiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-128.ll +++ test/CodeGen/X86/vector-idiv-sdiv-128.ll @@ -175,550 +175,97 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_div7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbp -; SSE2-NEXT: pushq %r14 -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: imull $-109, %eax, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: addb %al, %cl -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: shrb $7, %al -; SSE2-NEXT: sarb $2, %cl -; SSE2-NEXT: addb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r11d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: imull $-109, %esi, %edi -; SSE2-NEXT: shrl $8, %edi -; SSE2-NEXT: addb %sil, %dil -; SSE2-NEXT: movb %dil, %bl -; SSE2-NEXT: shrb $7, %bl -; SSE2-NEXT: sarb $2, %dil -; SSE2-NEXT: addb %bl, %dil -; SSE2-NEXT: movzbl %dil, %esi -; SSE2-NEXT: movd %esi, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $-109, %eax, %esi -; SSE2-NEXT: shrl $8, %esi -; SSE2-NEXT: addb %al, %sil -; SSE2-NEXT: movb %sil, %al -; SSE2-NEXT: shrb $7, %al -; SSE2-NEXT: sarb $2, %sil -; SSE2-NEXT: addb %al, %sil -; SSE2-NEXT: movzbl %sil, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: imull $-109, %edi, %ebx -; SSE2-NEXT: shrl $8, %ebx -; SSE2-NEXT: addb %dil, %bl -; SSE2-NEXT: movb %bl, %al -; SSE2-NEXT: shrb $7, %al -; SSE2-NEXT: sarb $2, %bl -; SSE2-NEXT: addb %al, %bl -; SSE2-NEXT: movzbl %bl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: imull $-109, %edx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: imull $-109, %esi, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %sil, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: imull $-109, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: imull $-109, %eax, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: addb %al, %dl -; SSE2-NEXT: movb %dl, %al -; SSE2-NEXT: shrb $7, %al -; SSE2-NEXT: sarb $2, %dl -; SSE2-NEXT: addb %al, %dl -; SSE2-NEXT: movzbl %dl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r14d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r14b, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: imull $-109, %ebp, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %bpl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: imull $-109, %r11d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r11b, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: imull $-109, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r9d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r9b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: imull $-109, %r10d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r10b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r8d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r8b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: imull $-109, %eax, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: addb %al, %cl -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: shrb $7, %al -; SSE2-NEXT: sarb $2, %cl -; SSE2-NEXT: addb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %r14 -; SSE2-NEXT: popq %rbp +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] +; SSE2-NEXT: psraw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm2, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: paddb %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; SSE2-NEXT: pxor %xmm2, %xmm0 +; SSE2-NEXT: psubb %xmm2, %xmm0 +; SSE2-NEXT: psrlw $7, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: paddb %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_div7_16i8: ; SSE41: # BB#0: -; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pextrb $0, %xmm0, %ecx -; SSE41-NEXT: movsbl %cl, %ecx -; SSE41-NEXT: imull $-109, %ecx, %edx -; SSE41-NEXT: shrl $8, %edx -; SSE41-NEXT: addb %dl, %cl -; SSE41-NEXT: movb %cl, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %cl -; SSE41-NEXT: addb %dl, %cl -; SSE41-NEXT: movzbl %cl, %ecx -; SSE41-NEXT: movd %ecx, %xmm1 -; SSE41-NEXT: pinsrb $1, %eax, %xmm1 -; SSE41-NEXT: pextrb $2, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm1 -; SSE41-NEXT: pextrb $3, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm1 -; SSE41-NEXT: pextrb $4, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm1 -; SSE41-NEXT: pextrb $5, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm1 -; SSE41-NEXT: pextrb $6, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm1 -; SSE41-NEXT: pextrb $7, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm1 -; SSE41-NEXT: pextrb $8, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm1 -; SSE41-NEXT: pextrb $9, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm1 -; SSE41-NEXT: pextrb $10, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm1 -; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm1 -; SSE41-NEXT: pextrb $12, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm1 -; SSE41-NEXT: pextrb $13, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm1 -; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm1 -; SSE41-NEXT: pextrb $15, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %eax -; SSE41-NEXT: imull $-109, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm1 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm1 +; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2 +; SSE41-NEXT: pmullw %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovsxbw %xmm3, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: paddb %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: psrlw $2, %xmm0 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; SSE41-NEXT: pxor %xmm2, %xmm0 +; SSE41-NEXT: psubb %xmm2, %xmm0 +; SSE41-NEXT: psrlw $7, %xmm1 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: paddb %xmm0, %xmm1 ; SSE41-NEXT: movdqa %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: test_div7_16i8: -; AVX: # BB#0: -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpextrb $0, %xmm0, %ecx -; AVX-NEXT: movsbl %cl, %ecx -; AVX-NEXT: imull $-109, %ecx, %edx -; AVX-NEXT: shrl $8, %edx -; AVX-NEXT: addb %dl, %cl -; AVX-NEXT: movb %cl, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %cl -; AVX-NEXT: addb %dl, %cl -; AVX-NEXT: movzbl %cl, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: movsbl %al, %eax -; AVX-NEXT: imull $-109, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_div7_16i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm1 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_div7_16i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsrlw $7, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %res = sdiv <16 x i8> %a, ret <16 x i8> %res } @@ -936,651 +483,140 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbp -; SSE2-NEXT: pushq %r15 -; SSE2-NEXT: pushq %r14 -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: imull $-109, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movb $7, %r11b -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r15d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r9d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r14d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r8d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: imull $-109, %ebp, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %bpl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bpl -; SSE2-NEXT: movzbl %bpl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $-109, %edi, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %dil, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dil -; SSE2-NEXT: movzbl %dil, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: imull $-109, %edx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dl -; SSE2-NEXT: movzbl %dl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: imull $-109, %ebx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %bl, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bl -; SSE2-NEXT: movzbl %bl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: imull $-109, %ebp, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %bpl, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bpl -; SSE2-NEXT: movzbl %bpl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: imull $-109, %esi, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %sil, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %sil -; SSE2-NEXT: movzbl %sil, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: imull $-109, %edx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: movb %al, %bl -; SSE2-NEXT: shrb $7, %bl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %bl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dl -; SSE2-NEXT: movzbl %dl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r15d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r15b, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r15b -; SSE2-NEXT: movzbl %r15b, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: imull $-109, %edi, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %dil, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dil -; SSE2-NEXT: movzbl %dil, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: imull $-109, %r14d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r14b, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r14b -; SSE2-NEXT: movzbl %r14b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: imull $-109, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r9d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r9b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r9b -; SSE2-NEXT: movzbl %r9b, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: imull $-109, %r10d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r10b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r10b -; SSE2-NEXT: movzbl %r10b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: imull $-109, %r8d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %r8b, %al -; SSE2-NEXT: movb %al, %cl -; SSE2-NEXT: shrb $7, %cl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r8b -; SSE2-NEXT: movzbl %r8b, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movsbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: imull $-109, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: movb %al, %dl -; SSE2-NEXT: shrb $7, %dl -; SSE2-NEXT: sarb $2, %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %r14 -; SSE2-NEXT: popq %r15 -; SSE2-NEXT: popq %rbp +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] +; SSE2-NEXT: psraw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm2, %xmm1 +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: packuswb %xmm3, %xmm1 +; SSE2-NEXT: paddb %xmm0, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrlw $2, %xmm2 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; SSE2-NEXT: pxor %xmm3, %xmm2 +; SSE2-NEXT: psubb %xmm3, %xmm2 +; SSE2-NEXT: psrlw $7, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: paddb %xmm2, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psraw $8, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: pand %xmm4, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm3, %xmm1 +; SSE2-NEXT: pand %xmm4, %xmm1 +; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: psubb %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_rem7_16i8: ; SSE41: # BB#0: -; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %edx -; SSE41-NEXT: imull $-109, %edx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb $7, %dil -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %edx -; SSE41-NEXT: pextrb $0, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %esi -; SSE41-NEXT: imull $-109, %esi, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: movb %al, %cl -; SSE41-NEXT: shrb $7, %cl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %sil -; SSE41-NEXT: movzbl %sil, %eax -; SSE41-NEXT: movd %eax, %xmm1 -; SSE41-NEXT: pinsrb $1, %edx, %xmm1 -; SSE41-NEXT: pextrb $2, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm1 -; SSE41-NEXT: pextrb $3, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm1 -; SSE41-NEXT: pextrb $4, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm1 -; SSE41-NEXT: pextrb $5, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm1 -; SSE41-NEXT: pextrb $6, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm1 -; SSE41-NEXT: pextrb $7, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm1 -; SSE41-NEXT: pextrb $8, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm1 -; SSE41-NEXT: pextrb $9, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm1 -; SSE41-NEXT: pextrb $10, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm1 -; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm1 -; SSE41-NEXT: pextrb $12, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm1 -; SSE41-NEXT: pextrb $13, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm1 -; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm1 -; SSE41-NEXT: pextrb $15, %xmm0, %eax -; SSE41-NEXT: movsbl %al, %ecx -; SSE41-NEXT: imull $-109, %ecx, %eax -; SSE41-NEXT: shrl $8, %eax -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: movb %al, %dl -; SSE41-NEXT: shrb $7, %dl -; SSE41-NEXT: sarb $2, %al -; SSE41-NEXT: addb %dl, %al -; SSE41-NEXT: mulb %dil -; SSE41-NEXT: subb %al, %cl -; SSE41-NEXT: movzbl %cl, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pmovsxbw %xmm0, %xmm1 +; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2 +; SSE41-NEXT: pmullw %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovsxbw %xmm3, %xmm3 +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: paddb %xmm0, %xmm1 +; SSE41-NEXT: movdqa %xmm1, %xmm2 +; SSE41-NEXT: psrlw $2, %xmm2 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; SSE41-NEXT: pxor %xmm3, %xmm2 +; SSE41-NEXT: psubb %xmm3, %xmm2 +; SSE41-NEXT: psrlw $7, %xmm1 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE41-NEXT: paddb %xmm2, %xmm1 +; SSE41-NEXT: pmovsxbw %xmm1, %xmm2 +; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm3 +; SSE41-NEXT: pmullw %xmm3, %xmm2 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm2 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; SSE41-NEXT: pmovsxbw %xmm1, %xmm1 +; SSE41-NEXT: pmullw %xmm3, %xmm1 +; SSE41-NEXT: pand %xmm4, %xmm1 +; SSE41-NEXT: packuswb %xmm1, %xmm2 +; SSE41-NEXT: psubb %xmm2, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: test_rem7_16i8: -; AVX: # BB#0: -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: movsbl %al, %edx -; AVX-NEXT: imull $-109, %edx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb $7, %dil -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %edx -; AVX-NEXT: vpextrb $0, %xmm0, %eax -; AVX-NEXT: movsbl %al, %esi -; AVX-NEXT: imull $-109, %esi, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: movb %al, %cl -; AVX-NEXT: shrb $7, %cl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %sil -; AVX-NEXT: movzbl %sil, %eax -; AVX-NEXT: vmovd %eax, %xmm1 -; AVX-NEXT: vpinsrb $1, %edx, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: movsbl %al, %ecx -; AVX-NEXT: imull $-109, %ecx, %eax -; AVX-NEXT: shrl $8, %eax -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: movb %al, %dl -; AVX-NEXT: shrb $7, %dl -; AVX-NEXT: sarb $2, %al -; AVX-NEXT: addb %dl, %al -; AVX-NEXT: mulb %dil -; AVX-NEXT: subb %al, %cl -; AVX-NEXT: movzbl %cl, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_rem7_16i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm2 +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm3 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_rem7_16i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm1 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpaddb %xmm0, %xmm1, %xmm1 +; AVX2-NEXT: vpsrlw $2, %xmm1, %xmm2 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpsrlw $7, %xmm1, %xmm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %res = srem <16 x i8> %a, ret <16 x i8> %res } Index: test/CodeGen/X86/vector-idiv-sdiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-sdiv-256.ll +++ test/CodeGen/X86/vector-idiv-sdiv-256.ll @@ -193,150 +193,15 @@ ; AVX1-LABEL: test_div7_16i16: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movswl %cx, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm1, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movswl %cx, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: cwtl -; AVX1-NEXT: imull $18725, %eax, %eax # imm = 0x4925 -; AVX1-NEXT: movl %eax, %ecx -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: sarw %cx -; AVX1-NEXT: shrl $31, %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725] +; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $15, %xmm1, %xmm3 +; AVX1-NEXT: vpsraw $1, %xmm1, %xmm1 +; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpmulhw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm2 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -355,717 +220,70 @@ ; AVX1-LABEL: test_div7_32i8: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpextrb $0, %xmm1, %ecx -; AVX1-NEXT: movsbl %cl, %ecx -; AVX1-NEXT: imull $-109, %ecx, %edx -; AVX1-NEXT: shrl $8, %edx -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: movb %cl, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %cl -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: movzbl %cl, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: movsbl %cl, %ecx -; AVX1-NEXT: imull $-109, %ecx, %edx -; AVX1-NEXT: shrl $8, %edx -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: movb %cl, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %cl -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: movzbl %cl, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %eax -; AVX1-NEXT: imull $-109, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm3 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4 +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm2 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm7, %xmm7 +; AVX1-NEXT: vpmullw %xmm3, %xmm7, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm2 +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm5, %xmm0, %xmm0 +; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpsubb %xmm6, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_div7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpextrb $0, %xmm1, %ecx -; AVX2-NEXT: movsbl %cl, %ecx -; AVX2-NEXT: imull $-109, %ecx, %edx -; AVX2-NEXT: shrl $8, %edx -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: movb %cl, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %cl -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: movzbl %cl, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: movsbl %cl, %ecx -; AVX2-NEXT: imull $-109, %ecx, %edx -; AVX2-NEXT: shrl $8, %edx -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: movb %cl, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %cl -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: movzbl %cl, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %eax -; AVX2-NEXT: imull $-109, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $7, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq %res = sdiv <32 x i8> %a, ret <32 x i8> %res @@ -1321,198 +539,20 @@ ; AVX1-LABEL: test_rem7_16i16: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movswl %cx, %edx -; AVX1-NEXT: imull $18725, %edx, %edx # imm = 0x4925 -; AVX1-NEXT: movl %edx, %esi -; AVX1-NEXT: shrl $16, %esi -; AVX1-NEXT: sarw %si -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: addl %esi, %edx -; AVX1-NEXT: leal (,%rdx,8), %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: subl %esi, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm1, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movswl %cx, %edx -; AVX1-NEXT: imull $18725, %edx, %edx # imm = 0x4925 -; AVX1-NEXT: movl %edx, %esi -; AVX1-NEXT: shrl $16, %esi -; AVX1-NEXT: sarw %si -; AVX1-NEXT: shrl $31, %edx -; AVX1-NEXT: addl %esi, %edx -; AVX1-NEXT: leal (,%rdx,8), %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: subl %esi, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: movswl %ax, %ecx -; AVX1-NEXT: imull $18725, %ecx, %ecx # imm = 0x4925 -; AVX1-NEXT: movl %ecx, %edx -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: sarw %dx -; AVX1-NEXT: shrl $31, %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: leal (,%rcx,8), %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: subl %edx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [18725,18725,18725,18725,18725,18725,18725,18725] +; AVX1-NEXT: vpmulhw %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vpsrlw $15, %xmm3, %xmm4 +; AVX1-NEXT: vpsraw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7] +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpmulhw %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlw $15, %xmm2, %xmm3 +; AVX1-NEXT: vpsraw $1, %xmm2, %xmm2 +; AVX1-NEXT: vpaddw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -1532,848 +572,108 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-LABEL: test_rem7_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %edx -; AVX1-NEXT: imull $-109, %edx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb $7, %dil -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %edx -; AVX1-NEXT: vpextrb $0, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %esi -; AVX1-NEXT: imull $-109, %esi, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %sil -; AVX1-NEXT: movzbl %sil, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm1, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %esi -; AVX1-NEXT: vpextrb $0, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %edx -; AVX1-NEXT: imull $-109, %edx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: movb %al, %cl -; AVX1-NEXT: shrb $7, %cl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: movsbl %al, %ecx -; AVX1-NEXT: imull $-109, %ecx, %eax -; AVX1-NEXT: shrl $8, %eax -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: movb %al, %dl -; AVX1-NEXT: shrb $7, %dl -; AVX1-NEXT: sarb $2, %al -; AVX1-NEXT: addb %dl, %al -; AVX1-NEXT: mulb %dil -; AVX1-NEXT: subb %al, %cl -; AVX1-NEXT: movzbl %cl, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpmovsxbw %xmm2, %xmm3 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm1 +; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4 +; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $7, %xmm3, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX1-NEXT: vpxor %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpsubb %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm4 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm5 +; AVX1-NEXT: vpmullw %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 +; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmovsxbw %xmm0, %xmm3 +; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm4, %xmm4 +; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpaddb %xmm0, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $7, %xmm1, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3 +; AVX1-NEXT: vpmullw %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm6, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_rem7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %edx -; AVX2-NEXT: imull $-109, %edx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb $7, %dil -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %edx -; AVX2-NEXT: vpextrb $0, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %esi -; AVX2-NEXT: imull $-109, %esi, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %sil -; AVX2-NEXT: movzbl %sil, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %esi -; AVX2-NEXT: vpextrb $0, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %edx -; AVX2-NEXT: imull $-109, %edx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: movb %al, %cl -; AVX2-NEXT: shrb $7, %cl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vpinsrb $1, %esi, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: movsbl %al, %ecx -; AVX2-NEXT: imull $-109, %ecx, %eax -; AVX2-NEXT: shrl $8, %eax -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: movb %al, %dl -; AVX2-NEXT: shrb $7, %dl -; AVX2-NEXT: sarb $2, %al -; AVX2-NEXT: addb %dl, %al -; AVX2-NEXT: mulb %dil -; AVX2-NEXT: subb %al, %cl -; AVX2-NEXT: movzbl %cl, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147,147] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpmovsxbw %xmm3, %ymm3 +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmovsxbw %xmm0, %ymm3 +; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm0, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm2 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32] +; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsubb %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsrlw $7, %ymm1, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm3 +; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 +; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3 +; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = srem <32 x i8> %a, ret <32 x i8> %res Index: test/CodeGen/X86/vector-idiv-udiv-128.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-128.ll +++ test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -172,468 +172,81 @@ define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_div7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbp -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: imull $37, %eax, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: subb %cl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: imull $37, %esi, %edi -; SSE2-NEXT: shrl $8, %edi -; SSE2-NEXT: subb %dil, %sil -; SSE2-NEXT: shrb %sil -; SSE2-NEXT: addb %dil, %sil -; SSE2-NEXT: shrb $2, %sil -; SSE2-NEXT: movzbl %sil, %esi -; SSE2-NEXT: movd %esi, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $37, %eax, %esi -; SSE2-NEXT: shrl $8, %esi -; SSE2-NEXT: subb %sil, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %sil, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: imull $37, %edi, %ebp -; SSE2-NEXT: shrl $8, %ebp -; SSE2-NEXT: subb %bpl, %dil -; SSE2-NEXT: shrb %dil -; SSE2-NEXT: addb %bpl, %dil -; SSE2-NEXT: shrb $2, %dil -; SSE2-NEXT: movzbl %dil, %edi -; SSE2-NEXT: movd %edi, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: imull $37, %edx, %edi -; SSE2-NEXT: shrl $8, %edi -; SSE2-NEXT: subb %dil, %dl -; SSE2-NEXT: shrb %dl -; SSE2-NEXT: addb %dil, %dl -; SSE2-NEXT: shrb $2, %dl -; SSE2-NEXT: movzbl %dl, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: imull $37, %esi, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: subb %dl, %sil -; SSE2-NEXT: shrb %sil -; SSE2-NEXT: addb %dl, %sil -; SSE2-NEXT: shrb $2, %sil -; SSE2-NEXT: movzbl %sil, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: imull $37, %ecx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: subb %dl, %cl -; SSE2-NEXT: shrb %cl -; SSE2-NEXT: addb %dl, %cl -; SSE2-NEXT: shrb $2, %cl -; SSE2-NEXT: movzbl %cl, %ecx -; SSE2-NEXT: movd %ecx, %xmm3 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: imull $37, %edx, %esi -; SSE2-NEXT: shrl $8, %esi -; SSE2-NEXT: subb %sil, %dl -; SSE2-NEXT: shrb %dl -; SSE2-NEXT: addb %sil, %dl -; SSE2-NEXT: shrb $2, %dl -; SSE2-NEXT: movzbl %dl, %edx -; SSE2-NEXT: movd %edx, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $37, %ebx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: subb %dl, %bl -; SSE2-NEXT: shrb %bl -; SSE2-NEXT: addb %dl, %bl -; SSE2-NEXT: shrb $2, %bl -; SSE2-NEXT: movzbl %bl, %edx -; SSE2-NEXT: movd %edx, %xmm2 -; SSE2-NEXT: imull $37, %eax, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: imull $37, %r11d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: subb %al, %r11b -; SSE2-NEXT: shrb %r11b -; SSE2-NEXT: addb %al, %r11b -; SSE2-NEXT: shrb $2, %r11b -; SSE2-NEXT: movzbl %r11b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: imull $37, %ecx, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: shrb %cl -; SSE2-NEXT: addb %al, %cl -; SSE2-NEXT: shrb $2, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: imull $37, %r9d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: subb %al, %r9b -; SSE2-NEXT: shrb %r9b -; SSE2-NEXT: addb %al, %r9b -; SSE2-NEXT: shrb $2, %r9b -; SSE2-NEXT: movzbl %r9b, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: imull $37, %r10d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: subb %al, %r10b -; SSE2-NEXT: shrb %r10b -; SSE2-NEXT: addb %al, %r10b -; SSE2-NEXT: shrb $2, %r10b -; SSE2-NEXT: movzbl %r10b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: imull $37, %r8d, %eax -; SSE2-NEXT: shrl $8, %eax -; SSE2-NEXT: subb %al, %r8b -; SSE2-NEXT: shrb %r8b -; SSE2-NEXT: addb %al, %r8b -; SSE2-NEXT: shrb $2, %r8b -; SSE2-NEXT: movzbl %r8b, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax -; SSE2-NEXT: imull $37, %eax, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: subb %cl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: movzbl %al, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %rbp +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: pmullw %xmm1, %xmm2 +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm1, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: packuswb %xmm2, %xmm3 +; SSE2-NEXT: psubb %xmm3, %xmm0 +; SSE2-NEXT: psrlw $1, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT: paddb %xmm3, %xmm0 +; SSE2-NEXT: psrlw $2, %xmm0 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_div7_16i8: ; SSE41: # BB#0: -; SSE41-NEXT: pextrb $1, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pextrb $0, %xmm0, %ecx -; SSE41-NEXT: imull $37, %ecx, %edx -; SSE41-NEXT: shrl $8, %edx -; SSE41-NEXT: subb %dl, %cl -; SSE41-NEXT: shrb %cl -; SSE41-NEXT: addb %dl, %cl -; SSE41-NEXT: shrb $2, %cl -; SSE41-NEXT: movzbl %cl, %ecx -; SSE41-NEXT: movd %ecx, %xmm1 -; SSE41-NEXT: pinsrb $1, %eax, %xmm1 -; SSE41-NEXT: pextrb $2, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm1 -; SSE41-NEXT: pextrb $3, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm1 -; SSE41-NEXT: pextrb $4, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm1 -; SSE41-NEXT: pextrb $5, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm1 -; SSE41-NEXT: pextrb $6, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm1 -; SSE41-NEXT: pextrb $7, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm1 -; SSE41-NEXT: pextrb $8, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm1 -; SSE41-NEXT: pextrb $9, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm1 -; SSE41-NEXT: pextrb $10, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm1 -; SSE41-NEXT: pextrb $11, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm1 -; SSE41-NEXT: pextrb $12, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm1 -; SSE41-NEXT: pextrb $13, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm1 -; SSE41-NEXT: pextrb $14, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm1 -; SSE41-NEXT: pextrb $15, %xmm0, %eax -; SSE41-NEXT: imull $37, %eax, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movzbl %al, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; SSE41-NEXT: pmullw %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: psubb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $1, %xmm0 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE41-NEXT: paddb %xmm1, %xmm0 +; SSE41-NEXT: psrlw $2, %xmm0 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: test_div7_16i8: -; AVX: # BB#0: -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpextrb $0, %xmm0, %ecx -; AVX-NEXT: imull $37, %ecx, %edx -; AVX-NEXT: shrl $8, %edx -; AVX-NEXT: subb %dl, %cl -; AVX-NEXT: shrb %cl -; AVX-NEXT: addb %dl, %cl -; AVX-NEXT: shrb $2, %cl -; AVX-NEXT: movzbl %cl, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: imull $37, %eax, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movzbl %al, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_div7_16i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm0 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_div7_16i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpaddb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpsrlw $2, %xmm0, %xmm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %res = udiv <16 x i8> %a, ret <16 x i8> %res } @@ -852,619 +465,128 @@ define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: pushq %rbp -; SSE2-NEXT: pushq %r15 -; SSE2-NEXT: pushq %r14 -; SSE2-NEXT: pushq %rbx -; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: imull $37, %ecx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: movb $7, %r11b -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: imull $37, %ebp, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %bpl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bpl -; SSE2-NEXT: movzbl %bpl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $37, %edi, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %dil, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dil -; SSE2-NEXT: movzbl %dil, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: imull $37, %edx, %esi -; SSE2-NEXT: shrl $8, %esi -; SSE2-NEXT: movb %dl, %al -; SSE2-NEXT: subb %sil, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %sil, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dl -; SSE2-NEXT: movzbl %dl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: imull $37, %ebx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %bl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bl -; SSE2-NEXT: movzbl %bl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: imull $37, %ebp, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %bpl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %bpl -; SSE2-NEXT: movzbl %bpl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] -; SSE2-NEXT: imull $37, %ecx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx -; SSE2-NEXT: imull $37, %edx, %esi -; SSE2-NEXT: shrl $8, %esi -; SSE2-NEXT: movb %dl, %al -; SSE2-NEXT: subb %sil, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %sil, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dl -; SSE2-NEXT: movzbl %dl, %eax -; SSE2-NEXT: movd %eax, %xmm1 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] -; SSE2-NEXT: imull $37, %r15d, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %r15b, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r15b -; SSE2-NEXT: movzbl %r15b, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: imull $37, %edi, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %dil, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %dil -; SSE2-NEXT: movzbl %dil, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: imull $37, %r14d, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %r14b, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r14b -; SSE2-NEXT: movzbl %r14b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: imull $37, %ecx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm2 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] -; SSE2-NEXT: imull $37, %r9d, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: movb %r9b, %al -; SSE2-NEXT: subb %cl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r9b -; SSE2-NEXT: movzbl %r9b, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: imull $37, %r10d, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: movb %r10b, %al -; SSE2-NEXT: subb %cl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r10b -; SSE2-NEXT: movzbl %r10b, %eax -; SSE2-NEXT: movd %eax, %xmm3 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7] -; SSE2-NEXT: imull $37, %r8d, %ecx -; SSE2-NEXT: shrl $8, %ecx -; SSE2-NEXT: movb %r8b, %al -; SSE2-NEXT: subb %cl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %cl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %r8b -; SSE2-NEXT: movzbl %r8b, %eax -; SSE2-NEXT: movd %eax, %xmm4 -; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx -; SSE2-NEXT: imull $37, %ecx, %edx -; SSE2-NEXT: shrl $8, %edx -; SSE2-NEXT: movb %cl, %al -; SSE2-NEXT: subb %dl, %al -; SSE2-NEXT: shrb %al -; SSE2-NEXT: addb %dl, %al -; SSE2-NEXT: shrb $2, %al -; SSE2-NEXT: mulb %r11b -; SSE2-NEXT: subb %al, %cl -; SSE2-NEXT: movzbl %cl, %eax -; SSE2-NEXT: movd %eax, %xmm0 -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] -; SSE2-NEXT: popq %rbx -; SSE2-NEXT: popq %r14 -; SSE2-NEXT: popq %r15 -; SSE2-NEXT: popq %rbp +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: pmullw %xmm1, %xmm2 +; SSE2-NEXT: psrlw $8, %xmm2 +; SSE2-NEXT: movdqa %xmm0, %xmm3 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm1, %xmm3 +; SSE2-NEXT: psrlw $8, %xmm3 +; SSE2-NEXT: packuswb %xmm2, %xmm3 +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: psubb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $1, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: paddb %xmm3, %xmm1 +; SSE2-NEXT: psrlw $2, %xmm1 +; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psraw $8, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7] +; SSE2-NEXT: psraw $8, %xmm3 +; SSE2-NEXT: pmullw %xmm3, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT: pand %xmm4, %xmm2 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: psraw $8, %xmm1 +; SSE2-NEXT: pmullw %xmm3, %xmm1 +; SSE2-NEXT: pand %xmm4, %xmm1 +; SSE2-NEXT: packuswb %xmm2, %xmm1 +; SSE2-NEXT: psubb %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSE41-LABEL: test_rem7_16i8: ; SSE41: # BB#0: -; SSE41-NEXT: pextrb $1, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %ecx -; SSE41-NEXT: shrl $8, %ecx -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %cl, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %cl, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: movb $7, %cl -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %edx -; SSE41-NEXT: pextrb $0, %xmm0, %esi -; SSE41-NEXT: imull $37, %esi, %edi -; SSE41-NEXT: shrl $8, %edi -; SSE41-NEXT: movb %sil, %al -; SSE41-NEXT: subb %dil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %dil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %sil -; SSE41-NEXT: movzbl %sil, %eax -; SSE41-NEXT: movd %eax, %xmm1 -; SSE41-NEXT: pinsrb $1, %edx, %xmm1 -; SSE41-NEXT: pextrb $2, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $2, %eax, %xmm1 -; SSE41-NEXT: pextrb $3, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $3, %eax, %xmm1 -; SSE41-NEXT: pextrb $4, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $4, %eax, %xmm1 -; SSE41-NEXT: pextrb $5, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $5, %eax, %xmm1 -; SSE41-NEXT: pextrb $6, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $6, %eax, %xmm1 -; SSE41-NEXT: pextrb $7, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $7, %eax, %xmm1 -; SSE41-NEXT: pextrb $8, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $8, %eax, %xmm1 -; SSE41-NEXT: pextrb $9, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $9, %eax, %xmm1 -; SSE41-NEXT: pextrb $10, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $10, %eax, %xmm1 -; SSE41-NEXT: pextrb $11, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $11, %eax, %xmm1 -; SSE41-NEXT: pextrb $12, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $12, %eax, %xmm1 -; SSE41-NEXT: pextrb $13, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $13, %eax, %xmm1 -; SSE41-NEXT: pextrb $14, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $14, %eax, %xmm1 -; SSE41-NEXT: pextrb $15, %xmm0, %edx -; SSE41-NEXT: imull $37, %edx, %esi -; SSE41-NEXT: shrl $8, %esi -; SSE41-NEXT: movb %dl, %al -; SSE41-NEXT: subb %sil, %al -; SSE41-NEXT: shrb %al -; SSE41-NEXT: addb %sil, %al -; SSE41-NEXT: shrb $2, %al -; SSE41-NEXT: mulb %cl -; SSE41-NEXT: subb %al, %dl -; SSE41-NEXT: movzbl %dl, %eax -; SSE41-NEXT: pinsrb $15, %eax, %xmm1 -; SSE41-NEXT: movdqa %xmm1, %xmm0 +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; SSE41-NEXT: pmullw %xmm2, %xmm1 +; SSE41-NEXT: psrlw $8, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; SSE41-NEXT: pmullw %xmm2, %xmm3 +; SSE41-NEXT: psrlw $8, %xmm3 +; SSE41-NEXT: packuswb %xmm3, %xmm1 +; SSE41-NEXT: movdqa %xmm0, %xmm2 +; SSE41-NEXT: psubb %xmm1, %xmm2 +; SSE41-NEXT: psrlw $1, %xmm2 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE41-NEXT: paddb %xmm1, %xmm2 +; SSE41-NEXT: psrlw $2, %xmm2 +; SSE41-NEXT: pand {{.*}}(%rip), %xmm2 +; SSE41-NEXT: pmovsxbw %xmm2, %xmm1 +; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm3 +; SSE41-NEXT: pmullw %xmm3, %xmm1 +; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; SSE41-NEXT: pand %xmm4, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; SSE41-NEXT: pmovsxbw %xmm2, %xmm2 +; SSE41-NEXT: pmullw %xmm3, %xmm2 +; SSE41-NEXT: pand %xmm4, %xmm2 +; SSE41-NEXT: packuswb %xmm2, %xmm1 +; SSE41-NEXT: psubb %xmm1, %xmm0 ; SSE41-NEXT: retq ; -; AVX-LABEL: test_rem7_16i8: -; AVX: # BB#0: -; AVX-NEXT: vpextrb $1, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %ecx -; AVX-NEXT: shrl $8, %ecx -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %cl, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %cl, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: movb $7, %cl -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %edx -; AVX-NEXT: vpextrb $0, %xmm0, %esi -; AVX-NEXT: imull $37, %esi, %edi -; AVX-NEXT: shrl $8, %edi -; AVX-NEXT: movb %sil, %al -; AVX-NEXT: subb %dil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %dil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %sil -; AVX-NEXT: movzbl %sil, %eax -; AVX-NEXT: vmovd %eax, %xmm1 -; AVX-NEXT: vpinsrb $1, %edx, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $2, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $3, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $4, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $5, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $6, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $7, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $8, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $9, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $10, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $11, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $12, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $13, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $14, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $15, %xmm0, %edx -; AVX-NEXT: imull $37, %edx, %esi -; AVX-NEXT: shrl $8, %esi -; AVX-NEXT: movb %dl, %al -; AVX-NEXT: subb %sil, %al -; AVX-NEXT: shrb %al -; AVX-NEXT: addb %sil, %al -; AVX-NEXT: shrb $2, %al -; AVX-NEXT: mulb %cl -; AVX-NEXT: subb %al, %dl -; AVX-NEXT: movzbl %dl, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: test_rem7_16i8: +; AVX1: # BB#0: +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmullw %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero +; AVX1-NEXT: vpmullw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vpsrlw $1, %xmm2, %xmm2 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm2 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm3 +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_rem7_16i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm2 +; AVX2-NEXT: vpsrlw $1, %xmm2, %xmm2 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm2, %xmm2 +; AVX2-NEXT: vpaddb %xmm1, %xmm2, %xmm1 +; AVX2-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm2 +; AVX2-NEXT: vpmullw %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2 +; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; AVX2-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq %res = urem <16 x i8> %a, ret <16 x i8> %res } Index: test/CodeGen/X86/vector-idiv-udiv-256.ll =================================================================== --- test/CodeGen/X86/vector-idiv-udiv-256.ll +++ test/CodeGen/X86/vector-idiv-udiv-256.ll @@ -184,154 +184,19 @@ define <16 x i16> @test_div7_16i16(<16 x i16> %a) nounwind { ; AVX1-LABEL: test_div7_16i16: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %edx -; AVX1-NEXT: imull $9363, %edx, %edx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: andl $65534, %ecx # imm = 0xFFFE -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movzwl %cx, %edx -; AVX1-NEXT: imull $9363, %edx, %edx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: andl $65534, %ecx # imm = 0xFFFE -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: addl %edx, %ecx -; AVX1-NEXT: shrl $2, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: andl $65534, %eax # imm = 0xFFFE -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: addl %ecx, %eax -; AVX1-NEXT: shrl $2, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9363,9363,9363,9363,9363,9363,9363,9363] +; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vpmulhuw %xmm1, %xmm0, %xmm1 +; AVX1-NEXT: vpsubw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_div7_16i16: @@ -350,589 +215,62 @@ ; AVX1-LABEL: test_div7_32i8: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpextrb $0, %xmm1, %ecx -; AVX1-NEXT: imull $37, %ecx, %edx -; AVX1-NEXT: shrl $8, %edx -; AVX1-NEXT: subb %dl, %cl -; AVX1-NEXT: shrb %cl -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: shrb $2, %cl -; AVX1-NEXT: movzbl %cl, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm1, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpextrb $0, %xmm0, %ecx -; AVX1-NEXT: imull $37, %ecx, %edx -; AVX1-NEXT: shrl $8, %edx -; AVX1-NEXT: subb %dl, %cl -; AVX1-NEXT: shrb %cl -; AVX1-NEXT: addb %dl, %cl -; AVX1-NEXT: shrb $2, %cl -; AVX1-NEXT: movzbl %cl, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm0, %eax -; AVX1-NEXT: imull $37, %eax, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movzbl %al, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero +; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsubb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm3, %xmm5, %xmm5 +; AVX1-NEXT: vpsrlw $8, %xmm5, %xmm5 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero +; AVX1-NEXT: vpmullw %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm5, %xmm3 +; AVX1-NEXT: vpsubb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $1, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpaddb %xmm3, %xmm0, %xmm0 +; AVX1-NEXT: vpsrlw $2, %xmm0, %xmm0 +; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_div7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpextrb $0, %xmm1, %ecx -; AVX2-NEXT: imull $37, %ecx, %edx -; AVX2-NEXT: shrl $8, %edx -; AVX2-NEXT: subb %dl, %cl -; AVX2-NEXT: shrb %cl -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: shrb $2, %cl -; AVX2-NEXT: movzbl %cl, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm1, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpextrb $0, %xmm0, %ecx -; AVX2-NEXT: imull $37, %ecx, %edx -; AVX2-NEXT: shrl $8, %edx -; AVX2-NEXT: subb %dl, %cl -; AVX2-NEXT: shrb %cl -; AVX2-NEXT: addb %dl, %cl -; AVX2-NEXT: shrb $2, %cl -; AVX2-NEXT: movzbl %cl, %ecx -; AVX2-NEXT: vmovd %ecx, %xmm2 -; AVX2-NEXT: vpinsrb $1, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm0, %eax -; AVX2-NEXT: imull $37, %eax, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movzbl %al, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $1, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsrlw $2, %ymm0, %ymm0 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq %res = udiv <32 x i8> %a, ret <32 x i8> %res @@ -1188,216 +526,22 @@ ; AVX1-LABEL: test_rem7_16i16: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vmovd %xmm1, %ecx -; AVX1-NEXT: movzwl %cx, %edx -; AVX1-NEXT: imull $9363, %edx, %edx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: movl %ecx, %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: andl $65534, %esi # imm = 0xFFFE -; AVX1-NEXT: shrl %esi -; AVX1-NEXT: addl %edx, %esi -; AVX1-NEXT: shrl $2, %esi -; AVX1-NEXT: leal (,%rsi,8), %edx -; AVX1-NEXT: subl %esi, %edx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm1, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrw $1, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vmovd %xmm0, %ecx -; AVX1-NEXT: movzwl %cx, %edx -; AVX1-NEXT: imull $9363, %edx, %edx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %edx -; AVX1-NEXT: movl %ecx, %esi -; AVX1-NEXT: subl %edx, %esi -; AVX1-NEXT: andl $65534, %esi # imm = 0xFFFE -; AVX1-NEXT: shrl %esi -; AVX1-NEXT: addl %edx, %esi -; AVX1-NEXT: shrl $2, %esi -; AVX1-NEXT: leal (,%rsi,8), %edx -; AVX1-NEXT: subl %esi, %edx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: vmovd %ecx, %xmm2 -; AVX1-NEXT: vpinsrw $1, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $2, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $3, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $4, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $5, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $6, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrw $7, %xmm0, %eax -; AVX1-NEXT: imull $9363, %eax, %ecx # imm = 0x2493 -; AVX1-NEXT: shrl $16, %ecx -; AVX1-NEXT: movl %eax, %edx -; AVX1-NEXT: subl %ecx, %edx -; AVX1-NEXT: andl $65534, %edx # imm = 0xFFFE -; AVX1-NEXT: shrl %edx -; AVX1-NEXT: addl %ecx, %edx -; AVX1-NEXT: shrl $2, %edx -; AVX1-NEXT: leal (,%rdx,8), %ecx -; AVX1-NEXT: subl %edx, %ecx -; AVX1-NEXT: subl %ecx, %eax -; AVX1-NEXT: vpinsrw $7, %eax, %xmm2, %xmm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9363,9363,9363,9363,9363,9363,9363,9363] +; AVX1-NEXT: vpmulhuw %xmm2, %xmm1, %xmm3 +; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm4 +; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [7,7,7,7,7,7,7,7] +; AVX1-NEXT: vpmullw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsubw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpmulhuw %xmm2, %xmm0, %xmm2 +; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsrlw $2, %xmm2, %xmm2 +; AVX1-NEXT: vpmullw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpsubw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -1418,784 +562,100 @@ define <32 x i8> @test_rem7_32i8(<32 x i8> %a) nounwind { ; AVX1-LABEL: test_rem7_32i8: ; AVX1: # BB#0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %ecx -; AVX1-NEXT: shrl $8, %ecx -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %cl, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %cl, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: movb $7, %cl -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %edx -; AVX1-NEXT: vpextrb $0, %xmm1, %esi -; AVX1-NEXT: imull $37, %esi, %edi -; AVX1-NEXT: shrl $8, %edi -; AVX1-NEXT: movb %sil, %al -; AVX1-NEXT: subb %dil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %dil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %sil -; AVX1-NEXT: movzbl %sil, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm1, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX1-NEXT: vpextrb $1, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %edx -; AVX1-NEXT: vpextrb $0, %xmm0, %esi -; AVX1-NEXT: imull $37, %esi, %edi -; AVX1-NEXT: shrl $8, %edi -; AVX1-NEXT: movb %sil, %al -; AVX1-NEXT: subb %dil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %dil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %sil -; AVX1-NEXT: movzbl %sil, %eax -; AVX1-NEXT: vmovd %eax, %xmm2 -; AVX1-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $2, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $3, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $4, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $5, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $6, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $7, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $8, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $9, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $10, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $11, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $12, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $13, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $14, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX1-NEXT: vpextrb $15, %xmm0, %edx -; AVX1-NEXT: imull $37, %edx, %esi -; AVX1-NEXT: shrl $8, %esi -; AVX1-NEXT: movb %dl, %al -; AVX1-NEXT: subb %sil, %al -; AVX1-NEXT: shrb %al -; AVX1-NEXT: addb %sil, %al -; AVX1-NEXT: shrb $2, %al -; AVX1-NEXT: mulb %cl -; AVX1-NEXT: subb %al, %dl -; AVX1-NEXT: movzbl %dl, %eax -; AVX1-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero +; AVX1-NEXT: vpmullw %xmm1, %xmm4, %xmm4 +; AVX1-NEXT: vpsrlw $8, %xmm4, %xmm4 +; AVX1-NEXT: vpackuswb %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm4 +; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127] +; AVX1-NEXT: vpand %xmm8, %xmm4, %xmm4 +; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3 +; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63] +; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm6 +; AVX1-NEXT: vpmovsxbw {{.*}}(%rip), %xmm7 +; AVX1-NEXT: vpmullw %xmm7, %xmm6, %xmm6 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255] +; AVX1-NEXT: vpand %xmm5, %xmm6, %xmm6 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm3, %xmm3 +; AVX1-NEXT: vpmullw %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vpsubb %xmm3, %xmm2, %xmm2 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3 +; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero,xmm6[4],zero,xmm6[5],zero,xmm6[6],zero,xmm6[7],zero +; AVX1-NEXT: vpmullw %xmm1, %xmm6, %xmm1 +; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm3 +; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3 +; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm3 +; AVX1-NEXT: vpmullw %xmm7, %xmm3, %xmm3 +; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovsxbw %xmm1, %xmm1 +; AVX1-NEXT: vpmullw %xmm7, %xmm1, %xmm1 +; AVX1-NEXT: vpand %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpackuswb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpsubb %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: test_rem7_32i8: ; AVX2: # BB#0: -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %ecx -; AVX2-NEXT: shrl $8, %ecx -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %cl, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %cl, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: movb $7, %cl -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %edx -; AVX2-NEXT: vpextrb $0, %xmm1, %esi -; AVX2-NEXT: imull $37, %esi, %edi -; AVX2-NEXT: shrl $8, %edi -; AVX2-NEXT: movb %sil, %al -; AVX2-NEXT: subb %dil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %dil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %sil -; AVX2-NEXT: movzbl %sil, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm1, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm1 -; AVX2-NEXT: vpextrb $1, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %edx -; AVX2-NEXT: vpextrb $0, %xmm0, %esi -; AVX2-NEXT: imull $37, %esi, %edi -; AVX2-NEXT: shrl $8, %edi -; AVX2-NEXT: movb %sil, %al -; AVX2-NEXT: subb %dil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %dil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %sil -; AVX2-NEXT: movzbl %sil, %eax -; AVX2-NEXT: vmovd %eax, %xmm2 -; AVX2-NEXT: vpinsrb $1, %edx, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $2, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $2, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $3, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $3, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $4, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $4, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $5, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $5, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $6, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $6, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $7, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $7, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $8, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $8, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $9, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $9, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $10, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $10, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $11, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $11, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $12, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $12, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $13, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $13, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $14, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $14, %eax, %xmm2, %xmm2 -; AVX2-NEXT: vpextrb $15, %xmm0, %edx -; AVX2-NEXT: imull $37, %edx, %esi -; AVX2-NEXT: shrl $8, %esi -; AVX2-NEXT: movb %dl, %al -; AVX2-NEXT: subb %sil, %al -; AVX2-NEXT: shrb %al -; AVX2-NEXT: addb %sil, %al -; AVX2-NEXT: shrb $2, %al -; AVX2-NEXT: mulb %cl -; AVX2-NEXT: subb %al, %dl -; AVX2-NEXT: movzbl %dl, %eax -; AVX2-NEXT: vpinsrb $15, %eax, %xmm2, %xmm0 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero,xmm3[4],zero,xmm3[5],zero,xmm3[6],zero,xmm3[7],zero,xmm3[8],zero,xmm3[9],zero,xmm3[10],zero,xmm3[11],zero,xmm3[12],zero,xmm3[13],zero,xmm3[14],zero,xmm3[15],zero +; AVX2-NEXT: vpmullw %ymm2, %ymm3, %ymm2 +; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2 +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX2-NEXT: vpmullw %ymm1, %ymm3, %ymm1 +; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm2[2,3] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpackuswb %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm2 +; AVX2-NEXT: vpsrlw $1, %ymm2, %ymm2 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm2, %ymm2 +; AVX2-NEXT: vpaddb %ymm1, %ymm2, %ymm1 +; AVX2-NEXT: vpsrlw $2, %ymm1, %ymm1 +; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovsxbw %xmm2, %ymm2 +; AVX2-NEXT: vpmovsxbw {{.*}}(%rip), %ymm3 +; AVX2-NEXT: vpmullw %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 +; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0] +; AVX2-NEXT: vpmovsxbw %xmm1, %ymm1 +; AVX2-NEXT: vpmullw %ymm3, %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm3 +; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1 +; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %res = urem <32 x i8> %a, ret <32 x i8> %res