Index: include/llvm/Analysis/ValueTracking.h =================================================================== --- include/llvm/Analysis/ValueTracking.h +++ include/llvm/Analysis/ValueTracking.h @@ -380,6 +380,11 @@ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT); + OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const DataLayout &DL, @@ -397,6 +402,16 @@ AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr); + OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); + OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); /// Returns true if the arithmetic part of the \p II 's result is /// used only along the paths control dependent on the computation Index: lib/Analysis/InstructionSimplify.cpp =================================================================== --- lib/Analysis/InstructionSimplify.cpp +++ lib/Analysis/InstructionSimplify.cpp @@ -3422,6 +3422,86 @@ return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); } +static bool +IsSafeOverflowingBinaryOperator(const OverflowingBinaryOperator *OBO, + const SimplifyQuery &Q) { + bool NUW = OBO->hasNoUnsignedWrap(), NSW = OBO->hasNoSignedWrap(); + if (!NUW && !NSW) + return true; + const Instruction *I = dyn_cast_or_null(OBO); + if (!I) + return false; + Value *LHS = OBO->getOperand(0), *RHS = OBO->getOperand(1); + switch (OBO->getOpcode()) { + default: + return false; + case Instruction::Add: + if (NUW && (computeOverflowForUnsignedAdd(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedAdd(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Sub: + if (NUW && (computeOverflowForUnsignedSub(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedSub(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Mul: + if (NUW && (computeOverflowForUnsignedMul(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedMul(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Shl: + const APInt *ShAmtAPInt; + if (!match(RHS, m_APInt(ShAmtAPInt))) + return false; + + unsigned ShAmt = ShAmtAPInt->getZExtValue(); + unsigned BitWidth = OBO->getType()->getScalarSizeInBits(); + + if (NUW && !MaskedValueIsZero(LHS, APInt::getHighBitsSet(BitWidth, ShAmt), + Q.DL, 0, Q.AC, I, Q.DT)) + return false; + if (NSW && ComputeNumSignBits(LHS, Q.DL, 0, Q.AC, I, Q.DT) <= ShAmt) + return false; + return true; + } +} + +static bool IsSafePossiblyExactOperator(const PossiblyExactOperator *PEO, + const SimplifyQuery &Q) { + if (!PEO->isExact()) + return true; + const Instruction *I = dyn_cast_or_null(PEO); + if (!I) + return false; + switch (PEO->getOpcode()) { + default: + return false; + case Instruction::LShr: + case Instruction::AShr: + Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1); + + const APInt *ShAmtAPInt; + if (!match(Op1, m_APInt(ShAmtAPInt))) + return false; + + unsigned ShAmt = ShAmtAPInt->getZExtValue(); + unsigned BitWidth = I->getType()->getScalarSizeInBits(); + + return MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), Q.DL, + 0, Q.AC, I, Q.DT); + } +} + /// See if V simplifies when its operand Op is replaced with RepOp. static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, @@ -3446,11 +3526,11 @@ // %sel = select i1 %cmp, i32 -2147483648, i32 %add // // We can't replace %sel with %add unless we strip away the flags. - if (isa(B)) - if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap()) + if (OverflowingBinaryOperator *OBO = dyn_cast(B)) + if (!IsSafeOverflowingBinaryOperator(OBO, Q)) return nullptr; - if (isa(B)) - if (B->isExact()) + if (PossiblyExactOperator *PEO = dyn_cast(B)) + if (!IsSafePossiblyExactOperator(PEO, Q)) return nullptr; if (MaxRecurse) { @@ -3475,6 +3555,17 @@ } } + if (CastInst *C = dyn_cast(I)) { + if (MaxRecurse) { + const Value *CastInstWithReplacement = SimplifyWithOpReplaced( + I->getOperand(0), Op, RepOp, Q, MaxRecurse - 1); + if (const Constant *Const = + dyn_cast_or_null(CastInstWithReplacement)) + return ConstantExpr::getCast( + C->getOpcode(), const_cast(Const), C->getType()); + } + } + // TODO: We could hand off more cases to instsimplify here. // If all operands are constant after substituting Op for RepOp then we can Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -3658,6 +3658,48 @@ return OverflowResult::MayOverflow; } +OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // Multiplying n * m significant bits yields a result of n + m significant + // bits. If the total number of significant bits does not exceed the + // result bit width (minus 1), there is no overflow. + // This means if we have enough leading sign bits in the operands + // we can guarantee that the result does not overflow. + // Ref: "Hacker's Delight" by Henry Warren + unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); + + // Note that underestimating the number of sign bits gives a more + // conservative answer. + unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + + ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); + + // First handle the easy case: if we have enough sign bits there's + // definitely no overflow. + if (SignBits > BitWidth + 1) + return OverflowResult::NeverOverflows; + + // There are two ambiguous cases where there can be no overflow: + // SignBits == BitWidth + 1 and + // SignBits == BitWidth + // The second case is difficult to check, therefore we only handle the + // first case. + if (SignBits == BitWidth + 1) { + // It overflows only when both arguments are negative and the true + // product is exactly the minimum negative number. + // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 + // For simplicity we just check if at least one side is not negative. + KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); + if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) + return OverflowResult::NeverOverflows; + } + return OverflowResult::MayOverflow; +} + OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const DataLayout &DL, @@ -3787,6 +3829,47 @@ return OverflowResult::MayOverflow; } +OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // If the LHS is negative and the RHS is non-negative, no unsigned wrap. + KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); + if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) + return OverflowResult::NeverOverflows; + + return OverflowResult::MayOverflow; +} + +OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // If LHS and RHS each have at least two sign bits, the subtraction + // cannot overflow. + if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && + ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) + return OverflowResult::NeverOverflows; + + KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); + + KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); + + // Subtraction of two 2's complement numbers having identical signs will + // never overflow. + if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || + (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) + return OverflowResult::NeverOverflows; + + // TODO: implement logic similar to checkRippleForAdd + return OverflowResult::MayOverflow; +} + bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, const DominatorTree &DT) { #ifndef NDEBUG Index: lib/Transforms/InstCombine/InstCombineAddSub.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -855,48 +855,6 @@ return createFMul(OpndVal, Coeff.getValue(Instr->getType())); } -/// \brief Return true if we can prove that: -/// (sub LHS, RHS) === (sub nsw LHS, RHS) -/// This basically requires proving that the add in the original type would not -/// overflow to change the sign bit or have a carry out. -/// TODO: Handle this for Vectors. -bool InstCombiner::willNotOverflowSignedSub(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // If LHS and RHS each have at least two sign bits, the subtraction - // cannot overflow. - if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 && - ComputeNumSignBits(RHS, 0, &CxtI) > 1) - return true; - - KnownBits LHSKnown = computeKnownBits(LHS, 0, &CxtI); - - KnownBits RHSKnown = computeKnownBits(RHS, 0, &CxtI); - - // Subtraction of two 2's complement numbers having identical signs will - // never overflow. - if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || - (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) - return true; - - // TODO: implement logic similar to checkRippleForAdd - return false; -} - -/// \brief Return true if we can prove that: -/// (sub LHS, RHS) === (sub nuw LHS, RHS) -bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // If the LHS is negative and the RHS is non-negative, no unsigned wrap. - KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); - KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); - if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) - return true; - - return false; -} - // Checks if any operand is negative and we can convert add to sub. // This function checks for following negative patterns // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -445,11 +445,22 @@ } bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForSignedSub(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } + bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } + bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForSignedMul(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, const Instruction &CxtI) const { @@ -599,6 +610,12 @@ return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); } + OverflowResult computeOverflowForSignedMul(const Value *LHS, + const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); + } + OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const Instruction *CxtI) const { @@ -611,6 +628,17 @@ return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); } + OverflowResult computeOverflowForUnsignedSub(const Value *LHS, + const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); + } + + OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); + } + /// Maximum size of array considered when transforming. uint64_t MaxArraySizeForCombine; Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -148,47 +148,6 @@ return ConstantVector::get(Elts); } -/// \brief Return true if we can prove that: -/// (mul LHS, RHS) === (mul nsw LHS, RHS) -bool InstCombiner::willNotOverflowSignedMul(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // Multiplying n * m significant bits yields a result of n + m significant - // bits. If the total number of significant bits does not exceed the - // result bit width (minus 1), there is no overflow. - // This means if we have enough leading sign bits in the operands - // we can guarantee that the result does not overflow. - // Ref: "Hacker's Delight" by Henry Warren - unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); - - // Note that underestimating the number of sign bits gives a more - // conservative answer. - unsigned SignBits = - ComputeNumSignBits(LHS, 0, &CxtI) + ComputeNumSignBits(RHS, 0, &CxtI); - - // First handle the easy case: if we have enough sign bits there's - // definitely no overflow. - if (SignBits > BitWidth + 1) - return true; - - // There are two ambiguous cases where there can be no overflow: - // SignBits == BitWidth + 1 and - // SignBits == BitWidth - // The second case is difficult to check, therefore we only handle the - // first case. - if (SignBits == BitWidth + 1) { - // It overflows only when both arguments are negative and the true - // product is exactly the minimum negative number. - // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 - // For simplicity we just check if at least one side is not negative. - KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); - KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); - if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) - return true; - } - return false; -} - Instruction *InstCombiner::visitMul(BinaryOperator &I) { bool Changed = SimplifyAssociativeOrCommutative(I); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); Index: test/Transforms/InstCombine/select-bitext-bitwise-ops.ll =================================================================== --- test/Transforms/InstCombine/select-bitext-bitwise-ops.ll +++ /dev/null @@ -1,112 +0,0 @@ -; RUN: opt -S -instcombine < %s | FileCheck %s - -define i64 @sel_false_val_is_a_masked_shl_of_true_val1(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_shl_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] -; - %1 = and i32 %x, 15 - %2 = shl nuw nsw i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %1, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - -define i64 @sel_false_val_is_a_masked_shl_of_true_val2(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_shl_of_true_val2( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] -; - %1 = and i32 %x, 15 - %2 = shl nuw nsw i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %2, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - -define i64 @sel_false_val_is_a_masked_lshr_of_true_val1(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_lshr_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 60 -; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] -; - %1 = and i32 %x, 60 - %2 = lshr i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %1, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - -define i64 @sel_false_val_is_a_masked_lshr_of_true_val2(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_lshr_of_true_val2( -; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %x, 2 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] -; CHECK-NEXT: ret i64 [[TMP4]] -; - %1 = and i32 %x, 60 - %2 = lshr i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %2, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - -define i64 @sel_false_val_is_a_masked_ashr_of_true_val1(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_ashr_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2147483588 -; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] -; - %1 = and i32 %x, -2147483588 - %2 = ashr i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %1, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - -define i64 @sel_false_val_is_a_masked_ashr_of_true_val2(i32 %x, i64 %y) { -; CHECK-LABEL: @sel_false_val_is_a_masked_ashr_of_true_val2( -; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %x, 2 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 -; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] -; CHECK-NEXT: ret i64 [[TMP4]] -; - %1 = and i32 %x, -2147483588 - %2 = ashr i32 %1, 2 - %3 = zext i32 %2 to i64 - %4 = icmp eq i32 %2, 0 - %5 = ashr i64 %y, %3 - %6 = select i1 %4, i64 %y, i64 %5 - ret i64 %6 -} - Index: test/Transforms/InstCombine/select-obo-peo-ops.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/select-obo-peo-ops.ll @@ -0,0 +1,1070 @@ +; RUN: opt -S -instcombine < %s | FileCheck %s + +define i64 @test_shl_nuw_nsw__all_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw_nsw__all_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 15 + %2 = shl nuw nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nuw__all_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw__all_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 15 + %2 = shl nuw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nsw__all_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nsw__all_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 15 + %2 = shl nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl__all_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl__all_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 15 + %2 = shl i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nuw_nsw__nuw_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw_nsw__nuw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1073741822 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 1073741822 + %2 = shl nuw nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nuw__nuw_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw__nuw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1073741822 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 1073741822 + %2 = shl nuw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nsw__nuw_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nsw__nuw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1073741822 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 1073741822 + %2 = shl nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl__nuw_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl__nuw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1073741822 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 1073741822 + %2 = shl i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i32 @test_shl_nuw_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_shl_nuw_nsw__nsw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], -83886079 +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP2]], i32 -335544316, i32 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP3]] +; CHECK-NEXT: ret i32 [[TMP6]] +; + %1 = or i32 %x, -83886080 + %2 = icmp eq i32 %1, -83886079 + %3 = shl nuw nsw i32 %1, 2 + %4 = select i1 %2, i32 -335544316, i32 %3 + %5 = mul i32 %4, %1 + %6 = mul i32 %5, %3 + ret i32 %6 +} + +define i32 @test_shl_nuw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_shl_nuw__nsw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], -83886079 +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP2]], i32 -335544316, i32 [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP3]] +; CHECK-NEXT: ret i32 [[TMP6]] +; + %1 = or i32 %x, -83886080 + %2 = icmp eq i32 %1, -83886079 + %3 = shl nuw i32 %1, 2 + %4 = select i1 %2, i32 -335544316, i32 %3 + %5 = mul i32 %4, %1 + %6 = mul i32 %5, %3 + ret i32 %6 +} + +define i32 @test_shl_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_shl_nsw__nsw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[TMP2]] +; CHECK-NEXT: ret i32 [[TMP4]] +; + %1 = or i32 %x, -83886080 + %2 = icmp eq i32 %1, -83886079 + %3 = shl nsw i32 %1, 2 + %4 = select i1 %2, i32 -335544316, i32 %3 + %5 = mul i32 %4, %1 + %6 = mul i32 %5, %3 + ret i32 %6 +} + +define i32 @test_shl__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_shl__nsw_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[TMP2]] +; CHECK-NEXT: ret i32 [[TMP4]] +; + %1 = or i32 %x, -83886080 + %2 = icmp eq i32 %1, -83886079 + %3 = shl i32 %1, 2 + %4 = select i1 %2, i32 -335544316, i32 %3 + %5 = mul i32 %4, %1 + %6 = mul i32 %5, %3 + ret i32 %6 +} + + +define i64 @test_shl_nuw_nsw__none_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw_nsw__none_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 4294967294 + %2 = shl nuw nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nuw__none_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nuw__none_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 4294967294 + %2 = shl nuw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl_nsw__none_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl_nsw__none_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2 +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 4294967294 + %2 = shl nsw i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_shl__none_are_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_shl__none_are_safe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 1073741822 +; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 4294967294 + %2 = shl i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_lshr_exact__exact_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_lshr_exact__exact_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 60 + %2 = lshr exact i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_lshr__exact_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_lshr__exact_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 60 + %2 = lshr i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_lshr_exact__exact_is_unsafe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_lshr_exact__exact_is_unsafe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 63 +; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, 63 + %2 = lshr exact i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_lshr__exact_is_unsafe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_lshr__exact_is_unsafe( +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, 63 + %2 = lshr i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_ashr_exact__exact_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_ashr_exact__exact_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, -2147483588 + %2 = ashr exact i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_ashr__exact_is_safe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_ashr__exact_is_safe( +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, -2147483588 + %2 = ashr i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_ashr_exact__exact_is_unsafe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_ashr_exact__exact_is_unsafe( +; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2147483585 +; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] +; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] +; CHECK-NEXT: ret i64 [[TMP6]] +; + %1 = and i32 %x, -2147483585 + %2 = ashr exact i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i64 @test_ashr__exact_is_unsafe(i32 %x, i64 %y) { +; CHECK-LABEL: @test_ashr__exact_is_unsafe( +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 +; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] +; + %1 = and i32 %x, -2147483585 + %2 = ashr i32 %1, 2 + %3 = zext i32 %2 to i64 + %4 = icmp eq i32 %1, 0 + %5 = ashr i64 %y, %3 + %6 = select i1 %4, i64 %y, i64 %5 + ret i64 %6 +} + +define i32 @test_add_nuw_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 1073741823 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 1073741823 + %cmp = icmp eq i32 %and, 3 + %add = add nuw nsw i32 %and, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 1073741823 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 1073741823 + %cmp = icmp eq i32 %and, 3 + %add = add nuw i32 %and, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 1073741823 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 1073741823 + %cmp = icmp eq i32 %and, 3 + %add = add nsw i32 %and, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_add__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 1073741823 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 1073741823 + %cmp = icmp eq i32 %and, 3 + %add = add i32 %and, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2147483647 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -2147483648, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 2147483647 + %add = add nuw nsw i32 %and, 1 + %sel = select i1 %cmp, i32 -2147483648, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 2147483647 + %add = add nuw i32 %and, 1 + %sel = select i1 %cmp, i32 -2147483648, i32 %add + ret i32 %sel +} + +define i32 @test_add_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2147483647 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -2147483648, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 2147483647 + %add = add nsw i32 %and, 1 + %sel = select i1 %cmp, i32 -2147483648, i32 %add + ret i32 %sel +} + +define i32 @test_add__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[AND]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 2147483647 + %add = add i32 %and, 1 + %sel = select i1 %cmp, i32 -2147483648, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw_nsw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -1 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[OR]], 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 0, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -1 + %add = add nuw nsw i32 %or, 1 + %sel = select i1 %cmp, i32 0, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -1 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[OR]], 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 0, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -1 + %add = add nuw i32 %or, 1 + %sel = select i1 %cmp, i32 0, i32 %add + ret i32 %sel +} + +define i32 @test_add_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add_nsw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[OR]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -1 + %add = add nsw i32 %or, 1 + %sel = select i1 %cmp, i32 0, i32 %add + ret i32 %sel +} + +define i32 @test_add__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_add__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[OR]], 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -1 + %add = add i32 %or, 1 + %sel = select i1 %cmp, i32 0, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 3 +; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 %x, 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 3 + %add = add nuw nsw i32 %x, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add_nuw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nuw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 3 +; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 %x, 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 3 + %add = add nuw i32 %x, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_add_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 3 +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 %x, 1 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 3 + %add = add nsw i32 %x, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_add__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_add__none_are_safe( +; CHECK-NEXT: [[ADD:%.*]] = add i32 %x, 1 +; CHECK-NEXT: ret i32 [[ADD]] +; + %cmp = icmp eq i32 %x, 3 + %add = add i32 %x, 1 + %sel = select i1 %cmp, i32 4, i32 %add + ret i32 %sel +} + +define i32 @test_sub_nuw_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 6 + %sub = sub nuw nsw i32 -254, %and + %sel = select i1 %cmp, i32 -260, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 6 + %sub = sub nuw i32 -254, %and + %sel = select i1 %cmp, i32 -260, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 6 + %sub = sub nsw i32 -254, %and + %sel = select i1 %cmp, i32 -260, i32 %sub + ret i32 %sel +} + +define i32 @test_sub__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 6 + %sub = sub i32 -254, %and + %sel = select i1 %cmp, i32 -260, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1073741824 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -2147483648, [[AND]] +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1073741824, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 1073741824 + %sub = sub nuw nsw i32 -2147483648, %and + %sel = select i1 %cmp, i32 1073741824, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 -2147483648, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 1073741824 + %sub = sub nuw i32 -2147483648, %and + %sel = select i1 %cmp, i32 1073741824, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1073741824 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -2147483648, [[AND]] +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1073741824, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 1073741824 + %sub = sub nsw i32 -2147483648, %and + %sel = select i1 %cmp, i32 1073741824, i32 %sub + ret i32 %sel +} + +define i32 @test_sub__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 2147483647 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 -2147483648, [[AND]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %and = and i32 %x, 2147483647 + %cmp = icmp eq i32 %and, 1073741824 + %sub = sub i32 -2147483648, %and + %sel = select i1 %cmp, i32 1073741824, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw_nsw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -2147483647 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -2147483648, [[OR]] +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -2147483647 + %sub = sub nuw nsw i32 -2147483648, %or + %sel = select i1 %cmp, i32 -1, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -2147483647 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -2147483648, [[OR]] +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -2147483647 + %sub = sub nuw i32 -2147483648, %or + %sel = select i1 %cmp, i32 -1, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nsw__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 -2147483648, [[OR]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -2147483647 + %sub = sub nsw i32 -2147483648, %or + %sel = select i1 %cmp, i32 -1, i32 %sub + ret i32 %sel +} + +define i32 @test_sub__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_sub__nsw_is_safe( +; CHECK-NEXT: [[OR:%.*]] = or i32 %x, -2147483648 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 -2147483648, [[OR]] +; CHECK-NEXT: ret i32 [[SUB]] +; + %or = or i32 %x, -2147483648 + %cmp = icmp eq i32 %or, -2147483647 + %sub = sub i32 -2147483648, %or + %sel = select i1 %cmp, i32 -1, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 1 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -2147483648, %x +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 2147483647, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 1 + %sub = sub nuw nsw i32 -2147483648, %x + %sel = select i1 %cmp, i32 2147483647, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nuw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nuw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 1 +; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 -2147483648, %x +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 2147483647, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 1 + %sub = sub nuw i32 -2147483648, %x + %sel = select i1 %cmp, i32 2147483647, i32 %sub + ret i32 %sel +} + +define i32 @test_sub_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 1 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 -2147483648, %x +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 2147483647, i32 [[SUB]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 1 + %sub = sub nsw i32 -2147483648, %x + %sel = select i1 %cmp, i32 2147483647, i32 %sub + ret i32 %sel +} + +define i32 @test_sub__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_sub__none_are_safe( +; CHECK-NEXT: [[SUB:%.*]] = sub i32 -2147483648, %x +; CHECK-NEXT: ret i32 [[SUB]] +; + %cmp = icmp eq i32 %x, 1 + %sub = sub i32 -2147483648, %x + %sel = select i1 %cmp, i32 2147483647, i32 %sub + ret i32 %sel +} + +define i32 @test_mul_nuw_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 17 + %mul = mul nuw nsw i32 %and, 9 + %sel = select i1 %cmp, i32 153, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 17 + %mul = mul nuw i32 %and, 9 + %sel = select i1 %cmp, i32 153, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nsw__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nsw__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 17 + %mul = mul nsw i32 %and, 9 + %sel = select i1 %cmp, i32 153, i32 %mul + ret i32 %sel +} + +define i32 @test_mul__all_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul__all_are_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 255 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 255 + %cmp = icmp eq i32 %and, 17 + %mul = mul i32 %and, 9 + %sel = select i1 %cmp, i32 153, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 268435457 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 268435456 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1879048192, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 268435457 + %cmp = icmp eq i32 %and, 268435456 + %mul = mul nuw nsw i32 %and, 9 + %sel = select i1 %cmp, i32 -1879048192, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 268435457 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 268435457 + %cmp = icmp eq i32 %and, 268435456 + %mul = mul nuw i32 %and, 9 + %sel = select i1 %cmp, i32 -1879048192, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nsw__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nsw__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 268435457 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 268435456 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1879048192, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = and i32 %x, 268435457 + %cmp = icmp eq i32 %and, 268435456 + %mul = mul nsw i32 %and, 9 + %sel = select i1 %cmp, i32 -1879048192, i32 %mul + ret i32 %sel +} + +define i32 @test_mul__nuw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul__nuw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 268435457 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = and i32 %x, 268435457 + %cmp = icmp eq i32 %and, 268435456 + %mul = mul i32 %and, 9 + %sel = select i1 %cmp, i32 -1879048192, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw_nsw__nsw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], -83886079 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -754974711, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = or i32 %x, -83886080 + %cmp = icmp eq i32 %and, -83886079 + %mul = mul nuw nsw i32 %and, 9 + %sel = select i1 %cmp, i32 -754974711, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw__nsw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], -83886079 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -754974711, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %and = or i32 %x, -83886080 + %cmp = icmp eq i32 %and, -83886079 + %mul = mul nuw i32 %and, 9 + %sel = select i1 %cmp, i32 -754974711, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nsw__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nsw__nsw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = or i32 %x, -83886080 + %cmp = icmp eq i32 %and, -83886079 + %mul = mul nsw i32 %and, 9 + %sel = select i1 %cmp, i32 -754974711, i32 %mul + ret i32 %sel +} + +define i32 @test_mul__nsw_is_safe(i32 %x) { +; CHECK-LABEL: @test_mul__nsw_is_safe( +; CHECK-NEXT: [[AND:%.*]] = or i32 %x, -83886080 +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[AND]], 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %and = or i32 %x, -83886080 + %cmp = icmp eq i32 %and, -83886079 + %mul = mul i32 %and, 9 + %sel = select i1 %cmp, i32 -754974711, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 805306368 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 %x, 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1342177280, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 805306368 + %mul = mul nuw nsw i32 %x, 9 + %sel = select i1 %cmp, i32 -1342177280, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nuw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nuw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 805306368 +; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 %x, 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1342177280, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 805306368 + %mul = mul nuw i32 %x, 9 + %sel = select i1 %cmp, i32 -1342177280, i32 %mul + ret i32 %sel +} + +define i32 @test_mul_nsw__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul_nsw__none_are_safe( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 %x, 805306368 +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 %x, 9 +; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1342177280, i32 [[MUL]] +; CHECK-NEXT: ret i32 [[SEL]] +; + %cmp = icmp eq i32 %x, 805306368 + %mul = mul nsw i32 %x, 9 + %sel = select i1 %cmp, i32 -1342177280, i32 %mul + ret i32 %sel +} + +define i32 @test_mul__none_are_safe(i32 %x) { +; CHECK-LABEL: @test_mul__none_are_safe( +; CHECK-NEXT: [[MUL:%.*]] = mul i32 %x, 9 +; CHECK-NEXT: ret i32 [[MUL]] +; + %cmp = icmp eq i32 %x, 805306368 + %mul = mul i32 %x, 9 + %sel = select i1 %cmp, i32 -1342177280, i32 %mul + ret i32 %sel +}