Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -3510,7 +3510,9 @@ SmallVectorImpl &Created) const { SDLoc dl(N); EVT VT = N->getValueType(0); + EVT SVT = VT.getScalarType(); EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); + EVT ShSVT = ShVT.getScalarType(); unsigned EltBits = VT.getScalarSizeInBits(); // Check to see if we can do this. @@ -3522,48 +3524,72 @@ if (N->getFlags().hasExact()) return BuildExactSDIV(*this, N, dl, DAG, Created); + SmallVector MagicFactors, Factors, Shifts; + + auto BuildSDIVPattern = [&](ConstantSDNode *C) { + if (C->isNullValue()) + return false; + + const APInt &Divisor = C->getAPIntValue(); + APInt::ms magics = Divisor.magic(); + int Factor = 0; + + // If d > 0 and m < 0, add the numerator. + if (Divisor.isStrictlyPositive() && magics.m.isNegative()) + Factor = 1; + // If d < 0 and m > 0, subtract the numerator. + else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) + Factor = -1; + + MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); + Factors.push_back(DAG.getConstant(Factor, dl, SVT)); + Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); + return true; + }; + SDValue N0 = N->getOperand(0); SDValue N1 = N->getOperand(1); - // TODO: Add non-uniform constant support. - ConstantSDNode *C = isConstOrConstSplat(N1); - if (!C || C->isNullValue()) + // Collect the shifts/magic values from each element. + if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) return SDValue(); - const APInt &Divisor = C->getAPIntValue(); - APInt::ms magics = Divisor.magic(); + SDValue MagicFactor, Factor, Shift; + if (VT.isVector()) { + MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); + Factor = DAG.getBuildVector(VT, dl, Factors); + Shift = DAG.getBuildVector(ShVT, dl, Shifts); + } else { + MagicFactor = MagicFactors[0]; + Factor = Factors[0]; + Shift = Shifts[0]; + } // Multiply the numerator (operand 0) by the magic value // FIXME: We should support doing a MUL in a wider type SDValue Q; if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) : isOperationLegalOrCustom(ISD::MULHS, VT)) - Q = DAG.getNode(ISD::MULHS, dl, VT, N0, DAG.getConstant(magics.m, dl, VT)); + Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) - : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) - Q = SDValue(DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, - DAG.getConstant(magics.m, dl, VT)) - .getNode(), 1); - else - return SDValue(); // No mulhs or equvialent + : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { + SDValue LoHi = + DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); + Q = SDValue(LoHi.getNode(), 1); + } else + return SDValue(); // No mulhs or equivalent + Created.push_back(Q.getNode()); + // (Optionally) Add/subtract the numerator using Factor. + Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); + Created.push_back(Factor.getNode()); + Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); Created.push_back(Q.getNode()); - // If d > 0 and m < 0, add the numerator - if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { - Q = DAG.getNode(ISD::ADD, dl, VT, Q, N0); - Created.push_back(Q.getNode()); - } - // If d < 0 and m > 0, subtract the numerator. - if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { - Q = DAG.getNode(ISD::SUB, dl, VT, Q, N0); - Created.push_back(Q.getNode()); - } // Shift right algebraic if shift value is nonzero - if (magics.s > 0) { - Q = DAG.getNode(ISD::SRA, dl, VT, Q, DAG.getConstant(magics.s, dl, ShVT)); - Created.push_back(Q.getNode()); - } + Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); + Created.push_back(Q.getNode()); + // Extract the sign bit and add it to the quotient SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, DAG.getConstant(EltBits - 1, dl, ShVT)); Index: test/CodeGen/X86/combine-sdiv.ll =================================================================== --- test/CodeGen/X86/combine-sdiv.ll +++ test/CodeGen/X86/combine-sdiv.ll @@ -2168,139 +2168,18 @@ define <8 x i16> @combine_vec_sdiv_nonuniform(<8 x i16> %x) { ; SSE-LABEL: combine_vec_sdiv_nonuniform: ; SSE: # %bb.0: -; SSE-NEXT: pextrw $1, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: movd %xmm0, %ecx -; SSE-NEXT: movswl %cx, %ecx -; SSE-NEXT: imull $21846, %ecx, %ecx # imm = 0x5556 -; SSE-NEXT: movl %ecx, %edx -; SSE-NEXT: shrl $31, %edx -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: movd %ecx, %xmm1 -; SSE-NEXT: pinsrw $1, %eax, %xmm1 -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: pextrw $4, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $4, %eax, %xmm1 -; SSE-NEXT: pextrw $5, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $5, %eax, %xmm1 -; SSE-NEXT: pextrw $6, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $6, %eax, %xmm1 -; SSE-NEXT: pextrw $7, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $7, %eax, %xmm1 +; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrlw $15, %xmm1 +; SSE-NEXT: paddw %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_sdiv_nonuniform: ; AVX: # %bb.0: -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %ecx -; AVX-NEXT: imull $21846, %ecx, %ecx # imm = 0x5556 -; AVX-NEXT: movl %ecx, %edx -; AVX-NEXT: shrl $31, %edx -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 +; AVX-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 @@ -2309,140 +2188,61 @@ define <8 x i16> @combine_vec_sdiv_nonuniform2(<8 x i16> %x) { ; SSE-LABEL: combine_vec_sdiv_nonuniform2: ; SSE: # %bb.0: -; SSE-NEXT: pextrw $1, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $18, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: movd %xmm0, %ecx -; SSE-NEXT: movswl %cx, %ecx -; SSE-NEXT: imull $10923, %ecx, %ecx # imm = 0x2AAB -; SSE-NEXT: movl %ecx, %edx -; SSE-NEXT: shrl $31, %edx -; SSE-NEXT: sarl $18, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: movd %ecx, %xmm1 -; SSE-NEXT: pinsrw $1, %eax, %xmm1 -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $18, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $18, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: pextrw $4, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $17, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $4, %eax, %xmm1 -; SSE-NEXT: pextrw $5, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $17, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $5, %eax, %xmm1 -; SSE-NEXT: pextrw $6, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $17, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $6, %eax, %xmm1 -; SSE-NEXT: pextrw $7, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: sarl $17, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $7, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psraw $1, %xmm1 +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psraw $2, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: psrlw $15, %xmm0 +; SSE-NEXT: paddw %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_nonuniform2: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $18, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %ecx -; AVX-NEXT: imull $10923, %ecx, %ecx # imm = 0x2AAB -; AVX-NEXT: movl %ecx, %edx -; AVX-NEXT: shrl $31, %edx -; AVX-NEXT: sarl $18, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $18, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $10923, %eax, %eax # imm = 0x2AAB -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $18, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $17, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $17, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $17, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: sarl $17, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_sdiv_nonuniform2: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX1-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_nonuniform2: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX2-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_nonuniform2: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512F-NEXT: vpsraw $1, %xmm0, %xmm1 +; AVX512F-NEXT: vpsraw $2, %xmm0, %xmm2 +; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_nonuniform2: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: retq +; +; XOP-LABEL: combine_vec_sdiv_nonuniform2: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsrlw $15, %xmm0, %xmm1 +; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 } @@ -2450,172 +2250,69 @@ define <8 x i16> @combine_vec_sdiv_nonuniform3(<8 x i16> %x) { ; SSE-LABEL: combine_vec_sdiv_nonuniform3: ; SSE: # %bb.0: -; SSE-NEXT: pextrw $1, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: movd %xmm0, %ecx -; SSE-NEXT: movswl %cx, %edx -; SSE-NEXT: imull $-19945, %edx, %edx # imm = 0xB217 -; SSE-NEXT: shrl $16, %edx -; SSE-NEXT: addl %ecx, %edx -; SSE-NEXT: movzwl %dx, %ecx -; SSE-NEXT: sarw $4, %dx -; SSE-NEXT: shrl $15, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: movd %ecx, %xmm1 -; SSE-NEXT: pinsrw $1, %eax, %xmm1 -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: pextrw $4, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $4, %eax, %xmm1 -; SSE-NEXT: pextrw $5, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $5, %eax, %xmm1 -; SSE-NEXT: pextrw $6, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $6, %eax, %xmm1 -; SSE-NEXT: pextrw $7, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $7, %eax, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [45591,45591,45591,45591,32833,32833,32833,32833] +; SSE-NEXT: pmulhw %xmm0, %xmm1 +; SSE-NEXT: paddw %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: psraw $8, %xmm0 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psraw $4, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: psrlw $15, %xmm1 +; SSE-NEXT: paddw %xmm2, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_nonuniform3: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $-19945, %edx, %edx # imm = 0xB217 -; AVX-NEXT: shrl $16, %edx -; AVX-NEXT: addl %ecx, %edx -; AVX-NEXT: movzwl %dx, %ecx -; AVX-NEXT: sarw $4, %dx -; AVX-NEXT: shrl $15, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-19945, %ecx, %ecx # imm = 0xB217 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_sdiv_nonuniform3: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_nonuniform3: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_nonuniform3: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_nonuniform3: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512BW-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: retq +; +; XOP-LABEL: combine_vec_sdiv_nonuniform3: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; XOP-NEXT: vpsrlw $15, %xmm0, %xmm1 +; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 } @@ -2623,172 +2320,69 @@ define <8 x i16> @combine_vec_sdiv_nonuniform4(<8 x i16> %x) { ; SSE-LABEL: combine_vec_sdiv_nonuniform4: ; SSE: # %bb.0: -; SSE-NEXT: pextrw $1, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: movd %xmm0, %ecx -; SSE-NEXT: movswl %cx, %edx -; SSE-NEXT: imull $19945, %edx, %edx # imm = 0x4DE9 -; SSE-NEXT: shrl $16, %edx -; SSE-NEXT: subl %ecx, %edx -; SSE-NEXT: movzwl %dx, %ecx -; SSE-NEXT: sarw $4, %dx -; SSE-NEXT: shrl $15, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: movd %ecx, %xmm1 -; SSE-NEXT: pinsrw $1, %eax, %xmm1 -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: pextrw $4, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $4, %eax, %xmm1 -; SSE-NEXT: pextrw $5, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $5, %eax, %xmm1 -; SSE-NEXT: pextrw $6, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $6, %eax, %xmm1 -; SSE-NEXT: pextrw $7, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $7, %eax, %xmm1 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [19945,19945,19945,19945,32639,32639,32639,32639] +; SSE-NEXT: pmulhw %xmm0, %xmm1 +; SSE-NEXT: psubw %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: psraw $8, %xmm0 +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psraw $4, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] +; SSE-NEXT: psrlw $15, %xmm1 +; SSE-NEXT: paddw %xmm2, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_nonuniform4: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $1, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $19945, %edx, %edx # imm = 0x4DE9 -; AVX-NEXT: shrl $16, %edx -; AVX-NEXT: subl %ecx, %edx -; AVX-NEXT: movzwl %dx, %ecx -; AVX-NEXT: sarw $4, %dx -; AVX-NEXT: shrl $15, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpinsrw $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $32639, %ecx, %ecx # imm = 0x7F7F -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_sdiv_nonuniform4: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX1-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7] +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_nonuniform4: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX2-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_nonuniform4: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512F-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX512F-NEXT: vpsraw $4, %xmm0, %xmm2 +; AVX512F-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX512F-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_nonuniform4: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512BW-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: retq +; +; XOP-LABEL: combine_vec_sdiv_nonuniform4: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpsubw %xmm0, %xmm1, %xmm0 +; XOP-NEXT: vpsrlw $15, %xmm0, %xmm1 +; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 } @@ -2796,160 +2390,88 @@ define <8 x i16> @combine_vec_sdiv_nonuniform5(<8 x i16> %x) { ; SSE-LABEL: combine_vec_sdiv_nonuniform5: ; SSE: # %bb.0: -; SSE-NEXT: pextrw $2, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: subl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $4, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: movd %xmm0, %ecx -; SSE-NEXT: movswl %cx, %edx -; SSE-NEXT: imull $32639, %edx, %edx # imm = 0x7F7F -; SSE-NEXT: shrl $16, %edx -; SSE-NEXT: subl %ecx, %edx -; SSE-NEXT: movzwl %dx, %ecx -; SSE-NEXT: sarw $8, %dx -; SSE-NEXT: shrl $15, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: movd %ecx, %xmm1 -; SSE-NEXT: pextrw $1, %xmm0, %ecx -; SSE-NEXT: movswl %cx, %ecx -; SSE-NEXT: imull $-10923, %ecx, %ecx # imm = 0xD555 -; SSE-NEXT: movl %ecx, %edx -; SSE-NEXT: shrl $16, %edx -; SSE-NEXT: sarw $2, %dx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: addl %edx, %ecx -; SSE-NEXT: pinsrw $1, %ecx, %xmm1 -; SSE-NEXT: pinsrw $2, %eax, %xmm1 -; SSE-NEXT: pextrw $3, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $3, %eax, %xmm1 -; SSE-NEXT: pextrw $4, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $31, %ecx -; SSE-NEXT: shrl $16, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $4, %eax, %xmm1 -; SSE-NEXT: pextrw $5, %xmm0, %eax -; SSE-NEXT: cwtl -; SSE-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; SSE-NEXT: movl %eax, %ecx -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: sarw %cx -; SSE-NEXT: shrl $31, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $5, %eax, %xmm1 -; SSE-NEXT: pextrw $6, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32639, %ecx, %ecx # imm = 0x8081 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $7, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $6, %eax, %xmm1 -; SSE-NEXT: pextrw $7, %xmm0, %eax -; SSE-NEXT: movswl %ax, %ecx -; SSE-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; SSE-NEXT: shrl $16, %ecx -; SSE-NEXT: addl %eax, %ecx -; SSE-NEXT: movzwl %cx, %eax -; SSE-NEXT: sarw $8, %cx -; SSE-NEXT: shrl $15, %eax -; SSE-NEXT: addl %ecx, %eax -; SSE-NEXT: pinsrw $7, %eax, %xmm1 -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,0,0,0,1,1] +; SSE-NEXT: pmullw %xmm0, %xmm1 +; SSE-NEXT: pmulhw {{.*}}(%rip), %xmm0 +; SSE-NEXT: paddw %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psraw $8, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7] +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psraw $4, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7] +; SSE-NEXT: movdqa %xmm2, %xmm1 +; SSE-NEXT: psraw $2, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1],xmm2[2,3,4,5],xmm1[6],xmm2[7] +; SSE-NEXT: movdqa %xmm1, %xmm2 +; SSE-NEXT: psraw $1, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7] +; SSE-NEXT: psrlw $15, %xmm0 +; SSE-NEXT: paddw %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: combine_vec_sdiv_nonuniform5: -; AVX: # %bb.0: -; AVX-NEXT: vpextrw $2, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $19945, %ecx, %ecx # imm = 0x4DE9 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: subl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $4, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vmovd %xmm0, %ecx -; AVX-NEXT: movswl %cx, %edx -; AVX-NEXT: imull $32639, %edx, %edx # imm = 0x7F7F -; AVX-NEXT: shrl $16, %edx -; AVX-NEXT: subl %ecx, %edx -; AVX-NEXT: movzwl %dx, %ecx -; AVX-NEXT: sarw $8, %dx -; AVX-NEXT: shrl $15, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vmovd %ecx, %xmm1 -; AVX-NEXT: vpextrw $1, %xmm0, %ecx -; AVX-NEXT: movswl %cx, %ecx -; AVX-NEXT: imull $-10923, %ecx, %ecx # imm = 0xD555 -; AVX-NEXT: movl %ecx, %edx -; AVX-NEXT: shrl $16, %edx -; AVX-NEXT: sarw $2, %dx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: addl %edx, %ecx -; AVX-NEXT: vpinsrw $1, %ecx, %xmm1, %xmm1 -; AVX-NEXT: vpinsrw $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $3, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $21846, %eax, %eax # imm = 0x5556 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $4, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $2979, %eax, %eax # imm = 0xBA3 -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $31, %ecx -; AVX-NEXT: shrl $16, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $5, %xmm0, %eax -; AVX-NEXT: cwtl -; AVX-NEXT: imull $5243, %eax, %eax # imm = 0x147B -; AVX-NEXT: movl %eax, %ecx -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: sarw %cx -; AVX-NEXT: shrl $31, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $6, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32639, %ecx, %ecx # imm = 0x8081 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $7, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrw $7, %xmm0, %eax -; AVX-NEXT: movswl %ax, %ecx -; AVX-NEXT: imull $-32703, %ecx, %ecx # imm = 0x8041 -; AVX-NEXT: shrl $16, %ecx -; AVX-NEXT: addl %eax, %ecx -; AVX-NEXT: movzwl %cx, %eax -; AVX-NEXT: sarw $8, %cx -; AVX-NEXT: shrl $15, %eax -; AVX-NEXT: addl %ecx, %eax -; AVX-NEXT: vpinsrw $7, %eax, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: combine_vec_sdiv_nonuniform5: +; AVX1: # %bb.0: +; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX1-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpsraw $8, %xmm0, %xmm1 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm0[1,2,3,4,5,6],xmm1[7] +; AVX1-NEXT: vpsraw $4, %xmm1, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2],xmm1[3,4,5],xmm2[6],xmm1[7] +; AVX1-NEXT: vpsraw $2, %xmm1, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3,4,5],xmm2[6],xmm1[7] +; AVX1-NEXT: vpsraw $1, %xmm1, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4],xmm2[5,6],xmm1[7] +; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX1-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vec_sdiv_nonuniform5: +; AVX2: # %bb.0: +; AVX2-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX2-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX2-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxwd %xmm0, %ymm1 +; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm1, %ymm1 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1 +; AVX2-NEXT: vpsrlw $15, %xmm0, %xmm0 +; AVX2-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vec_sdiv_nonuniform5: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512F-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0 +; AVX512F-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0 +; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 +; AVX512F-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: combine_vec_sdiv_nonuniform5: +; AVX512BW: # %bb.0: +; AVX512BW-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm1 +; AVX512BW-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: vpsrlw $15, %xmm0, %xmm1 +; AVX512BW-NEXT: vpsravw {{.*}}(%rip), %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; AVX512BW-NEXT: retq +; +; XOP-LABEL: combine_vec_sdiv_nonuniform5: +; XOP: # %bb.0: +; XOP-NEXT: vpmulhw {{.*}}(%rip), %xmm0, %xmm1 +; XOP-NEXT: vpmacsww %xmm1, {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpsrlw $15, %xmm0, %xmm1 +; XOP-NEXT: vpshaw {{.*}}(%rip), %xmm0, %xmm0 +; XOP-NEXT: vpaddw %xmm1, %xmm0, %xmm0 +; XOP-NEXT: retq %1 = sdiv <8 x i16> %x, ret <8 x i16> %1 }