Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -3432,34 +3432,74 @@ /// Given an exact SDIV by a constant, create a multiplication /// with the multiplicative inverse of the constant. -static SDValue BuildExactSDIV(const TargetLowering &TLI, SDValue Op1, APInt d, +static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl &Created) { - assert(d != 0 && "Division by zero!"); + SDValue Op0 = N->getOperand(0); + SDValue Op1 = N->getOperand(1); + EVT VT = N->getValueType(0); + EVT ShVT = TLI.getShiftAmountTy(Op0.getValueType(), DAG.getDataLayout()); + + auto BuildSDIVPattern = [](APInt Divisor, unsigned &Shift, APInt &Factor) { + bool UseSRA = false; + Shift = Divisor.countTrailingZeros(); + if (Shift) { + Divisor.ashrInPlace(Shift); + UseSRA = true; + } + // Calculate the multiplicative inverse, using Newton's method. + APInt t; + Factor = Divisor; + while ((t = Divisor * Factor) != 1) + Factor *= APInt(Divisor.getBitWidth(), 2) - t; + return UseSRA; + }; + + bool UseSRA = false; + SDValue Shift, Factor; + if (VT.isVector()) { + if (ISD::BUILD_VECTOR != Op1.getOpcode()) + return SDValue(); + EVT SVT = VT.getScalarType(); + EVT ShSVT = ShVT.getScalarType(); + unsigned EltBits = VT.getScalarSizeInBits(); + unsigned NumElts = VT.getVectorNumElements(); + SmallVector Shifts, Factors; + for (unsigned i = 0; i != NumElts; ++i) { + auto *C = dyn_cast(Op1.getOperand(i)); + if (!C || C->isNullValue() || C->getAPIntValue().getBitWidth() != EltBits) + return SDValue(); + APInt FactorVal; + unsigned ShiftVal; + UseSRA |= BuildSDIVPattern(C->getAPIntValue(), ShiftVal, FactorVal); + Shifts.push_back(DAG.getConstant(ShiftVal, dl, ShSVT)); + Factors.push_back(DAG.getConstant(FactorVal, dl, SVT)); + } + Shift = DAG.getBuildVector(ShVT, dl, Shifts); + Factor = DAG.getBuildVector(VT, dl, Factors); + } else { + auto *C = dyn_cast(Op1); + if (!C || C->isNullValue()) + return SDValue(); + APInt FactorVal; + unsigned ShiftVal; + UseSRA = BuildSDIVPattern(C->getAPIntValue(), ShiftVal, FactorVal); + Shift = DAG.getConstant(ShiftVal, dl, ShVT); + Factor = DAG.getConstant(FactorVal, dl, VT); + } + + SDValue Res = Op0; // Shift the value upfront if it is even, so the LSB is one. - unsigned ShAmt = d.countTrailingZeros(); - if (ShAmt) { + if (UseSRA) { // TODO: For UDIV use SRL instead of SRA. - SDValue Amt = - DAG.getConstant(ShAmt, dl, TLI.getShiftAmountTy(Op1.getValueType(), - DAG.getDataLayout())); SDNodeFlags Flags; Flags.setExact(true); - Op1 = DAG.getNode(ISD::SRA, dl, Op1.getValueType(), Op1, Amt, Flags); - Created.push_back(Op1.getNode()); - d.ashrInPlace(ShAmt); + Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); + Created.push_back(Res.getNode()); } - // Calculate the multiplicative inverse, using Newton's method. - APInt t, xn = d; - while ((t = d*xn) != 1) - xn *= APInt(d.getBitWidth(), 2) - t; - - SDValue Op2 = DAG.getConstant(xn, dl, Op1.getValueType()); - SDValue Mul = DAG.getNode(ISD::MUL, dl, Op1.getValueType(), Op1, Op2); - Created.push_back(Mul.getNode()); - return Mul; + return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); } SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, @@ -3487,16 +3527,16 @@ if (!isTypeLegal(VT)) return SDValue(); + // If the sdiv has an 'exact' bit we can use a simpler lowering. + if (N->getFlags().hasExact()) + return BuildExactSDIV(*this, N, dl, DAG, Created); + // TODO: Add non-uniform constant support. ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); if (!C || C->isNullValue()) return SDValue(); const APInt &Divisor = C->getAPIntValue(); - // If the sdiv has an 'exact' bit we can use a simpler lowering. - if (N->getFlags().hasExact()) - return BuildExactSDIV(*this, N->getOperand(0), Divisor, dl, DAG, Created); - APInt::ms magics = Divisor.magic(); // Multiply the numerator (operand 0) by the magic value Index: test/CodeGen/X86/sdiv-exact.ll =================================================================== --- test/CodeGen/X86/sdiv-exact.ll +++ test/CodeGen/X86/sdiv-exact.ll @@ -80,45 +80,25 @@ define <4 x i32> @test5(<4 x i32> %x) { ; X86-LABEL: test5: ; X86: # %bb.0: -; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; X86-NEXT: movd %xmm1, %eax -; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; X86-NEXT: movd %xmm2, %eax -; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X86-NEXT: movd %eax, %xmm2 -; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: sarl $3, %eax -; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: sarl $3, %eax -; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X86-NEXT: movd %eax, %xmm0 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: psrad $3, %xmm1 +; X86-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3264175145,3264175145] +; X86-NEXT: movapd %xmm0, %xmm1 +; X86-NEXT: pmuludq %xmm2, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X86-NEXT: pmuludq %xmm0, %xmm2 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test5: ; X64: # %bb.0: -; X64-NEXT: vpextrd $1, %xmm0, %eax -; X64-NEXT: sarl $3, %eax -; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X64-NEXT: vmovd %xmm0, %ecx -; X64-NEXT: sarl $3, %ecx -; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB -; X64-NEXT: vmovd %ecx, %xmm1 -; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $2, %xmm0, %eax -; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $3, %xmm0, %eax -; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; X64-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, ret <4 x i32> %div @@ -127,49 +107,26 @@ define <4 x i32> @test6(<4 x i32> %x) { ; X86-LABEL: test6: ; X86: # %bb.0: -; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; X86-NEXT: movd %xmm1, %eax -; X86-NEXT: sarl %eax -; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5 -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; X86-NEXT: movd %xmm2, %eax -; X86-NEXT: sarl %eax -; X86-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5 -; X86-NEXT: movd %eax, %xmm2 -; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: sarl $3, %eax -; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: sarl $3, %eax -; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X86-NEXT: movd %eax, %xmm0 +; X86-NEXT: movdqa %xmm0, %xmm1 +; X86-NEXT: psrad $3, %xmm1 +; X86-NEXT: psrad $1, %xmm0 +; X86-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; X86-NEXT: movdqa {{.*#+}} xmm2 = [2863311531,2863311531,3303820997,3303820997] +; X86-NEXT: movapd %xmm0, %xmm1 +; X86-NEXT: pmuludq %xmm2, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; X86-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3] +; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3] +; X86-NEXT: pmuludq %xmm0, %xmm2 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] ; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] ; X86-NEXT: movdqa %xmm1, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test6: ; X64: # %bb.0: -; X64-NEXT: vpextrd $1, %xmm0, %eax -; X64-NEXT: sarl $3, %eax -; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB -; X64-NEXT: vmovd %xmm0, %ecx -; X64-NEXT: sarl $3, %ecx -; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB -; X64-NEXT: vmovd %ecx, %xmm1 -; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $2, %xmm0, %eax -; X64-NEXT: sarl %eax -; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5 -; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $3, %xmm0, %eax -; X64-NEXT: sarl %eax -; X64-NEXT: imull $-991146299, %eax, %eax # imm = 0xC4EC4EC5 -; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; X64-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, ret <4 x i32> %div @@ -178,41 +135,19 @@ define <4 x i32> @test7(<4 x i32> %x) { ; X86-LABEL: test7: ; X86: # %bb.0: -; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] -; X86-NEXT: movd %xmm1, %eax -; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13 -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] -; X86-NEXT: movd %xmm2, %eax -; X86-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13 -; X86-NEXT: movd %eax, %xmm2 -; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X86-NEXT: movd %eax, %xmm1 -; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] -; X86-NEXT: movd %xmm0, %eax -; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X86-NEXT: movd %eax, %xmm0 -; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; X86-NEXT: movdqa %xmm1, %xmm0 +; X86-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,1749801491,1749801491] +; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; X86-NEXT: pmuludq %xmm1, %xmm0 +; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; X86-NEXT: pmuludq %xmm2, %xmm1 +; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-NEXT: retl ; ; X64-LABEL: test7: ; X64: # %bb.0: -; X64-NEXT: vpextrd $1, %xmm0, %eax -; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29 -; X64-NEXT: vmovd %xmm0, %ecx -; X64-NEXT: imull $-1030792151, %ecx, %ecx # imm = 0xC28F5C29 -; X64-NEXT: vmovd %ecx, %xmm1 -; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $2, %xmm0, %eax -; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13 -; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 -; X64-NEXT: vpextrd $3, %xmm0, %eax -; X64-NEXT: imull $1749801491, %eax, %eax # imm = 0x684BDA13 -; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; X64-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0 ; X64-NEXT: retq %div = sdiv exact <4 x i32> %x, ret <4 x i32> %div