diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -1701,85 +1701,136 @@ // Variety of transform for (urem/srem (mul/shl X, Y), (mul/shl X, Z)) static Instruction *simplifyIRemMulShl(BinaryOperator &I, InstCombinerImpl &IC) { - Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *X; - const APInt *Y, *Z; + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *X, *Y, *Z; bool ShiftX = false, ShiftY = false, ShiftZ = false; - if ((match(Op0, m_Mul(m_Value(X), m_APInt(Y))) && - match(Op1, m_c_Mul(m_Specific(X), m_APInt(Z)))) || - (match(Op0, m_Mul(m_APInt(Y), m_Value(X))) && - match(Op1, m_c_Mul(m_Specific(X), m_APInt(Z))))) { + if ((match(Op0, m_Mul(m_Value(X), m_Value(Y))) && + match(Op1, m_c_Mul(m_Specific(X), m_Value(Z)))) || + (match(Op0, m_Mul(m_Value(Y), m_Value(X))) && + match(Op1, m_c_Mul(m_Specific(X), m_Value(Z))))) { // Pass - } else if (match(Op0, m_Shl(m_Value(X), m_APInt(Y))) && - match(Op1, m_c_Mul(m_Specific(X), m_APInt(Z)))) { + } else if (match(Op0, m_Shl(m_Value(X), m_Value(Y))) && + match(Op1, m_c_Mul(m_Specific(X), m_Value(Z)))) { ShiftY = true; - } else if (match(Op1, m_Shl(m_Value(X), m_APInt(Z))) && - match(Op0, m_c_Mul(m_Specific(X), m_APInt(Y)))) { + } else if (match(Op1, m_Shl(m_Value(X), m_Value(Z))) && + match(Op0, m_c_Mul(m_Specific(X), m_Value(Y)))) { ShiftZ = true; - } else if (match(Op0, m_Shl(m_Value(X), m_APInt(Y))) && - match(Op1, m_Shl(m_Specific(X), m_APInt(Z)))) { + } else if (match(Op0, m_Shl(m_Value(X), m_Value(Y))) && + match(Op1, m_Shl(m_Specific(X), m_Value(Z)))) { ShiftZ = true; ShiftY = true; - } else if (match(Op0, m_Shl(m_APInt(Y), m_Value(X))) && - match(Op1, m_Shl(m_APInt(Z), m_Specific(X)))) { + } else if (match(Op0, m_Shl(m_Value(Y), m_Value(X))) && + match(Op1, m_Shl(m_Value(Z), m_Specific(X)))) { ShiftX = true; } else { return nullptr; } - APInt AdjustedY = *Y; - APInt AdjustedZ = *Z; - // Just treat the shifts as mul, we may end up returning a mul by power - // of 2 but that will be cleaned up later. - if (ShiftY) - AdjustedY = APInt(AdjustedY.getBitWidth(), 1) << AdjustedY; - if (ShiftZ) - AdjustedZ = APInt(AdjustedZ.getBitWidth(), 1) << AdjustedZ; + OverflowingBinaryOperator *BO0 = cast(Op0); + OverflowingBinaryOperator *BO1 = cast(Op1); + + // If X is constant 1, then we avoid both in the mul and shl case. + auto CX = dyn_cast(X); + if (CX && CX->isOneValue()) + return nullptr; + + auto GetOperandAsConstantInt = [](Value *Op) -> ConstantInt * { + if (Op->getType()->isVectorTy()) + if (auto *COp = dyn_cast(Op)) { + auto *CSplat = COp->getSplatValue(); + return CSplat ? dyn_cast(CSplat) : nullptr; + } + return dyn_cast(Op); + }; + + ConstantInt *ConstY = GetOperandAsConstantInt(Y); + ConstantInt *ConstZ = GetOperandAsConstantInt(Z); bool IsSRem = I.getOpcode() == Instruction::SRem; - OverflowingBinaryOperator *BO0 = cast(Op0); // TODO: We may be able to deduce more about nsw/nuw of BO0/BO1 based on Y >= // Z or Z >= Y. bool BO0HasNSW = BO0->hasNoSignedWrap(); bool BO0HasNUW = BO0->hasNoUnsignedWrap(); - bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW; - - APInt RemYZ = IsSRem ? AdjustedY.srem(AdjustedZ) : AdjustedY.urem(AdjustedZ); - // (rem (mul nuw/nsw X, Y), (mul X, Z)) - // if (rem Y, Z) == 0 - // -> 0 - if (RemYZ.isZero() && BO0NoWrap) - return IC.replaceInstUsesWith(I, ConstantInt::getNullValue(I.getType())); + bool BO1HasNSW = BO1->hasNoSignedWrap(); + bool BO1HasNUW = BO1->hasNoUnsignedWrap(); auto GetBinOpOut = [&](Value *RemSimplification) -> BinaryOperator * { return ShiftX ? BinaryOperator::CreateShl(RemSimplification, X) : BinaryOperator::CreateMul(X, RemSimplification); }; - OverflowingBinaryOperator *BO1 = cast(Op1); - bool BO1HasNSW = BO1->hasNoSignedWrap(); - bool BO1HasNUW = BO1->hasNoUnsignedWrap(); - bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW; - // (rem (mul X, Y), (mul nuw/nsw X, Z)) - // if (rem Y, Z) == Y - // -> (mul nuw/nsw X, Y) - if (RemYZ == AdjustedY && BO1NoWrap) { - BinaryOperator *BO = GetBinOpOut(ConstantInt::get(I.getType(), AdjustedY)); - // Copy any overflow flags from Op0. - BO->setHasNoSignedWrap(IsSRem || BO0HasNSW); - BO->setHasNoUnsignedWrap(!IsSRem || BO0HasNUW); - return BO; - } - - // (rem (mul nuw/nsw X, Y), (mul {nsw} X, Z)) - // if Y >= Z - // -> (mul {nuw} nsw X, (rem Y, Z)) - if (AdjustedY.uge(AdjustedZ) && - (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) { - BinaryOperator *BO = GetBinOpOut(ConstantInt::get(I.getType(), RemYZ)); - BO->setHasNoSignedWrap(); - BO->setHasNoUnsignedWrap(BO0HasNUW); - return BO; + if (ConstY && ConstZ) { + APInt AdjustedY = ConstY->getValue(); + APInt AdjustedZ = ConstZ->getValue(); + + // Just treat the shifts as mul, we may end up returning a mul by power + // of 2 but that will be cleaned up later. + if (ShiftY) + AdjustedY = APInt(AdjustedY.getBitWidth(), 1) << AdjustedY; + if (ShiftZ) + AdjustedZ = APInt(AdjustedZ.getBitWidth(), 1) << AdjustedZ; + + bool BO0NoWrap = IsSRem ? BO0HasNSW : BO0HasNUW; + + APInt RemYZ = + IsSRem ? AdjustedY.srem(AdjustedZ) : AdjustedY.urem(AdjustedZ); + // (rem (mul nuw/nsw X, Y), (mul X, Z)) + // if (rem Y, Z) == 0 + // -> 0 + if (RemYZ.isZero() && BO0NoWrap) + return IC.replaceInstUsesWith(I, ConstantInt::getNullValue(I.getType())); + + bool BO1NoWrap = IsSRem ? BO1HasNSW : BO1HasNUW; + // (rem (mul X, Y), (mul nuw/nsw X, Z)) + // if (rem Y, Z) == Y + // -> (mul nuw/nsw X, Y) + if (RemYZ == AdjustedY && BO1NoWrap) { + BinaryOperator *BO = + GetBinOpOut(ConstantInt::get(I.getType(), AdjustedY)); + // Copy any overflow flags from Op0. + BO->setHasNoSignedWrap(IsSRem || BO0HasNSW); + BO->setHasNoUnsignedWrap(!IsSRem || BO0HasNUW); + return BO; + } + + // (rem (mul nuw/nsw X, Y), (mul {nsw} X, Z)) + // if Y >= Z + // -> (mul {nuw} nsw X, (rem Y, Z)) + if (AdjustedY.uge(AdjustedZ) && + (IsSRem ? (BO0HasNSW && BO1HasNSW) : BO0HasNUW)) { + BinaryOperator *BO = GetBinOpOut(ConstantInt::get(I.getType(), RemYZ)); + BO->setHasNoSignedWrap(); + BO->setHasNoUnsignedWrap(BO0HasNUW); + return BO; + } + } + // Check if desirable to do generic replacement. + // NB: It may be beneficial to do this if we have X << Z even if there are + // multiple uses of Op0/Op1 as it will eliminate the urem (urem of a power + // of 2 is converted to add/and) and urem is pretty expensive (maybe more + // sense in DAGCombiner). + if ((ConstY && ConstZ) || + (Op0->hasOneUse() && Op1->hasOneUse() && + (IsSRem ? (!ShiftY && !ShiftZ) : (!ShiftY || ShiftZ)))) { + + // (rem (mul nuw/nsw X, Y), (mul nuw {nsw} X, Z) + // -> (mul nuw/nsw X, (rem Y, Z)) + if (IsSRem ? (BO0HasNSW && BO1HasNSW && BO1HasNUW) + : (BO0HasNUW && BO1HasNUW)) { + if (ShiftY) + Y = IC.Builder.CreateShl(ConstantInt::get(I.getType(), 1), Y); + if (ShiftZ) + Z = IC.Builder.CreateShl(ConstantInt::get(I.getType(), 1), Z); + + BinaryOperator *BO = GetBinOpOut(IsSRem ? IC.Builder.CreateSRem(Y, Z) + : IC.Builder.CreateURem(Y, Z)); + + if (BO0HasNSW || BO1HasNSW) + BO->setHasNoSignedWrap(); + if (!IsSRem || (BO0HasNUW && BO1HasNUW)) + BO->setHasNoUnsignedWrap(); + return BO; + } } return nullptr; diff --git a/llvm/test/Transforms/InstCombine/rem-mul-shl.ll b/llvm/test/Transforms/InstCombine/rem-mul-shl.ll --- a/llvm/test/Transforms/InstCombine/rem-mul-shl.ll +++ b/llvm/test/Transforms/InstCombine/rem-mul-shl.ll @@ -134,9 +134,8 @@ define i8 @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ(i8 %X, i8 %Y, i8 %Z) { ; CHECK-LABEL: @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw i8 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = mul nuw i8 [[Z:%.*]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = urem i8 [[Y:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw i8 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw i8 %X, %Y @@ -147,9 +146,10 @@ define i8 @urem_XY_XZ_with_CX_Y_Z_is_mul_X_RemYZ(i8 %Y, i8 %Z) { ; CHECK-LABEL: @urem_XY_XZ_with_CX_Y_Z_is_mul_X_RemYZ( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw i8 [[Y:%.*]], 10 -; CHECK-NEXT: [[BO1:%.*]] = shl nuw i8 10, [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw i8 [[TMP2]], 10 ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw i8 10, %Y @@ -160,9 +160,10 @@ define i8 @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nsw_out1(i8 %X, i8 %Y, i8 %Z) { ; CHECK-LABEL: @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nsw_out1( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw i8 [[X]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP2]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw nsw i8 %X, %Y @@ -173,9 +174,8 @@ define <2 x i8> @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nsw_out2(<2 x i8> %X, <2 x i8> %Y, <2 x i8> %Z) { ; CHECK-LABEL: @urem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nsw_out2( -; CHECK-NEXT: [[BO0:%.*]] = shl nuw <2 x i8> [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw <2 x i8> [[Z:%.*]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem <2 x i8> [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = urem <2 x i8> [[Y:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = shl nuw nsw <2 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %BO0 = shl nuw <2 x i8> %Y, %X @@ -366,9 +366,8 @@ define i8 @srem_XY_XZ_with_Y_Z_is_mul_X_RemYZ(i8 %X, i8 %Y, i8 %Z) { ; CHECK-LABEL: @srem_XY_XZ_with_Y_Z_is_mul_X_RemYZ( -; CHECK-NEXT: [[BO0:%.*]] = mul nsw i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = mul nuw nsw i8 [[X]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = srem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = srem i8 [[Y:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nsw i8 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nsw i8 %Y, %X @@ -379,9 +378,8 @@ define i8 @srem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nuw_out(i8 %X, i8 %Y, i8 %Z) { ; CHECK-LABEL: @srem_XY_XZ_with_Y_Z_is_mul_X_RemYZ_with_nuw_out( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = mul nuw nsw i8 [[Z:%.*]], [[X]] -; CHECK-NEXT: [[R:%.*]] = srem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = srem i8 [[Y:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nsw nuw i8 %Y, %X @@ -614,9 +612,8 @@ define i8 @urem_shl_XX_shl_ZX(i8 %X, i8 %Z) { ; CHECK-LABEL: @urem_shl_XX_shl_ZX( -; CHECK-NEXT: [[BO0:%.*]] = shl nuw nsw i8 [[X:%.*]], [[X]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[Z:%.*]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = urem i8 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = shl nuw nsw i8 [[TMP1]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = shl nuw nsw i8 %X, %X @@ -627,9 +624,8 @@ define i8 @urem_shl_YX_shl_XX(i8 %X, i8 %Y) { ; CHECK-LABEL: @urem_shl_YX_shl_XX( -; CHECK-NEXT: [[BO0:%.*]] = shl nuw nsw i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = urem i8 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = shl nuw nsw i8 [[TMP1]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = shl nuw nsw i8 %Y, %X @@ -640,9 +636,11 @@ define i8 @urem_shl_XX_shl_XZ(i8 %X, i8 %Z) { ; CHECK-LABEL: @urem_shl_XX_shl_XZ( -; CHECK-NEXT: [[BO0:%.*]] = shl nuw nsw i8 [[X:%.*]], [[X]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i8 1, [[X:%.*]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP3]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = shl nuw nsw i8 %X, %X @@ -653,9 +651,11 @@ define i8 @urem_shl_XY_shl_XX(i8 %X, i8 %Y) { ; CHECK-LABEL: @urem_shl_XY_shl_XX( -; CHECK-NEXT: [[BO0:%.*]] = shl nuw nsw i8 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i8 1, [[Y:%.*]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP3]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = shl nuw nsw i8 %X, %Y @@ -679,9 +679,10 @@ define i8 @urem_mul_YX_shl_XX(i8 %X, i8 %Y) { ; CHECK-LABEL: @urem_mul_YX_shl_XX( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP2]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw nsw i8 %Y, %X @@ -692,9 +693,10 @@ define i8 @urem_mul_XX_shl_XZ(i8 %X, i8 %Z) { ; CHECK-LABEL: @urem_mul_XX_shl_XZ( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[X:%.*]], [[X]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP2]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw nsw i8 %X, %X @@ -705,9 +707,10 @@ define i8 @urem_mul_XY_shl_XX(i8 %X, i8 %Y) { ; CHECK-LABEL: @urem_mul_XY_shl_XX( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP2]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw nsw i8 %X, %Y @@ -770,9 +773,10 @@ define i8 @urem_mul_XX_shl_XX(i8 %X) { ; CHECK-LABEL: @urem_mul_XX_shl_XX( -; CHECK-NEXT: [[BO0:%.*]] = mul nuw nsw i8 [[X:%.*]], [[X]] -; CHECK-NEXT: [[BO1:%.*]] = shl nuw nsw i8 [[X]], [[X]] -; CHECK-NEXT: [[R:%.*]] = urem i8 [[BO0]], [[BO1]] +; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[NOTMASK]], -1 +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[R:%.*]] = mul nuw nsw i8 [[TMP2]], [[X]] ; CHECK-NEXT: ret i8 [[R]] ; %BO0 = mul nuw nsw i8 %X, %X