Index: include/llvm/Analysis/ValueTracking.h =================================================================== --- include/llvm/Analysis/ValueTracking.h +++ include/llvm/Analysis/ValueTracking.h @@ -384,6 +384,11 @@ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT); + OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const DataLayout &DL, @@ -401,6 +406,16 @@ AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr); + OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); + OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); /// Returns true if the arithmetic part of the \p II 's result is /// used only along the paths control dependent on the computation Index: lib/Analysis/InstructionSimplify.cpp =================================================================== --- lib/Analysis/InstructionSimplify.cpp +++ lib/Analysis/InstructionSimplify.cpp @@ -3477,6 +3477,86 @@ return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); } +static bool +IsSafeOverflowingBinaryOperator(const OverflowingBinaryOperator *OBO, + const SimplifyQuery &Q) { + bool NUW = OBO->hasNoUnsignedWrap(), NSW = OBO->hasNoSignedWrap(); + if (!NUW && !NSW) + return true; + const Instruction *I = dyn_cast_or_null(OBO); + if (!I) + return false; + Value *LHS = OBO->getOperand(0), *RHS = OBO->getOperand(1); + switch (OBO->getOpcode()) { + default: + return false; + case Instruction::Add: + if (NUW && (computeOverflowForUnsignedAdd(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedAdd(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Sub: + if (NUW && (computeOverflowForUnsignedSub(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedSub(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Mul: + if (NUW && (computeOverflowForUnsignedMul(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + if (NSW && (computeOverflowForSignedMul(LHS, RHS, Q.DL, Q.AC, I, Q.DT) != + OverflowResult::NeverOverflows)) + return false; + return true; + case Instruction::Shl: + const APInt *ShAmtAPInt; + if (!match(RHS, m_APInt(ShAmtAPInt))) + return false; + + unsigned ShAmt = ShAmtAPInt->getZExtValue(); + unsigned BitWidth = OBO->getType()->getScalarSizeInBits(); + + if (NUW && !MaskedValueIsZero(LHS, APInt::getHighBitsSet(BitWidth, ShAmt), + Q.DL, 0, Q.AC, I, Q.DT)) + return false; + if (NSW && ComputeNumSignBits(LHS, Q.DL, 0, Q.AC, I, Q.DT) <= ShAmt) + return false; + return true; + } +} + +static bool IsSafePossiblyExactOperator(const PossiblyExactOperator *PEO, + const SimplifyQuery &Q) { + if (!PEO->isExact()) + return true; + const Instruction *I = dyn_cast_or_null(PEO); + if (!I) + return false; + switch (PEO->getOpcode()) { + default: + return false; + case Instruction::LShr: + case Instruction::AShr: + Value *Op0 = I->getOperand(0), *Op1 = I->getOperand(1); + + const APInt *ShAmtAPInt; + if (!match(Op1, m_APInt(ShAmtAPInt))) + return false; + + unsigned ShAmt = ShAmtAPInt->getZExtValue(); + unsigned BitWidth = I->getType()->getScalarSizeInBits(); + + return MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), Q.DL, + 0, Q.AC, I, Q.DT); + } +} + /// See if V simplifies when its operand Op is replaced with RepOp. static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, @@ -3501,11 +3581,11 @@ // %sel = select i1 %cmp, i32 -2147483648, i32 %add // // We can't replace %sel with %add unless we strip away the flags. - if (isa(B)) - if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap()) + if (OverflowingBinaryOperator *OBO = dyn_cast(B)) + if (!IsSafeOverflowingBinaryOperator(OBO, Q)) return nullptr; - if (isa(B)) - if (B->isExact()) + if (PossiblyExactOperator *PEO = dyn_cast(B)) + if (!IsSafePossiblyExactOperator(PEO, Q)) return nullptr; if (MaxRecurse) { @@ -3541,6 +3621,17 @@ } } + if (CastInst *C = dyn_cast(I)) { + if (MaxRecurse) { + const Value *CastInstWithReplacement = SimplifyWithOpReplaced( + I->getOperand(0), Op, RepOp, Q, MaxRecurse - 1); + if (const Constant *Const = + dyn_cast_or_null(CastInstWithReplacement)) + return ConstantExpr::getCast( + C->getOpcode(), const_cast(Const), C->getType()); + } + } + // TODO: We could hand off more cases to instsimplify here. // If all operands are constant after substituting Op for RepOp then we can Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -3703,6 +3703,48 @@ return OverflowResult::MayOverflow; } +OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // Multiplying n * m significant bits yields a result of n + m significant + // bits. If the total number of significant bits does not exceed the + // result bit width (minus 1), there is no overflow. + // This means if we have enough leading sign bits in the operands + // we can guarantee that the result does not overflow. + // Ref: "Hacker's Delight" by Henry Warren + unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); + + // Note that underestimating the number of sign bits gives a more + // conservative answer. + unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + + ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); + + // First handle the easy case: if we have enough sign bits there's + // definitely no overflow. + if (SignBits > BitWidth + 1) + return OverflowResult::NeverOverflows; + + // There are two ambiguous cases where there can be no overflow: + // SignBits == BitWidth + 1 and + // SignBits == BitWidth + // The second case is difficult to check, therefore we only handle the + // first case. + if (SignBits == BitWidth + 1) { + // It overflows only when both arguments are negative and the true + // product is exactly the minimum negative number. + // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 + // For simplicity we just check if at least one side is not negative. + KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); + if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) + return OverflowResult::NeverOverflows; + } + return OverflowResult::MayOverflow; +} + OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const DataLayout &DL, @@ -3832,6 +3874,47 @@ return OverflowResult::MayOverflow; } +OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // If the LHS is negative and the RHS is non-negative, no unsigned wrap. + KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT); + KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT); + if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) + return OverflowResult::NeverOverflows; + + return OverflowResult::MayOverflow; +} + +OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, + const Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + // If LHS and RHS each have at least two sign bits, the subtraction + // cannot overflow. + if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && + ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) + return OverflowResult::NeverOverflows; + + KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT); + + KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT); + + // Subtraction of two 2's complement numbers having identical signs will + // never overflow. + if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || + (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) + return OverflowResult::NeverOverflows; + + // TODO: implement logic similar to checkRippleForAdd + return OverflowResult::MayOverflow; +} + bool llvm::isOverflowIntrinsicNoWrap(const IntrinsicInst *II, const DominatorTree &DT) { #ifndef NDEBUG Index: lib/Transforms/InstCombine/InstCombineAddSub.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -856,48 +856,6 @@ return createFMul(OpndVal, Coeff.getValue(Instr->getType())); } -/// Return true if we can prove that: -/// (sub LHS, RHS) === (sub nsw LHS, RHS) -/// This basically requires proving that the add in the original type would not -/// overflow to change the sign bit or have a carry out. -/// TODO: Handle this for Vectors. -bool InstCombiner::willNotOverflowSignedSub(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // If LHS and RHS each have at least two sign bits, the subtraction - // cannot overflow. - if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 && - ComputeNumSignBits(RHS, 0, &CxtI) > 1) - return true; - - KnownBits LHSKnown = computeKnownBits(LHS, 0, &CxtI); - - KnownBits RHSKnown = computeKnownBits(RHS, 0, &CxtI); - - // Subtraction of two 2's complement numbers having identical signs will - // never overflow. - if ((LHSKnown.isNegative() && RHSKnown.isNegative()) || - (LHSKnown.isNonNegative() && RHSKnown.isNonNegative())) - return true; - - // TODO: implement logic similar to checkRippleForAdd - return false; -} - -/// Return true if we can prove that: -/// (sub LHS, RHS) === (sub nuw LHS, RHS) -bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // If the LHS is negative and the RHS is non-negative, no unsigned wrap. - KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); - KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); - if (LHSKnown.isNegative() && RHSKnown.isNonNegative()) - return true; - - return false; -} - // Checks if any operand is negative and we can convert add to sub. // This function checks for following negative patterns // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C)) Index: lib/Transforms/InstCombine/InstCombineInternal.h =================================================================== --- lib/Transforms/InstCombine/InstCombineInternal.h +++ lib/Transforms/InstCombine/InstCombineInternal.h @@ -442,11 +442,22 @@ } bool willNotOverflowSignedSub(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForSignedSub(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } + bool willNotOverflowUnsignedSub(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForUnsignedSub(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } + bool willNotOverflowSignedMul(const Value *LHS, const Value *RHS, - const Instruction &CxtI) const; + const Instruction &CxtI) const { + return computeOverflowForSignedMul(LHS, RHS, &CxtI) == + OverflowResult::NeverOverflows; + } bool willNotOverflowUnsignedMul(const Value *LHS, const Value *RHS, const Instruction &CxtI) const { @@ -597,6 +608,12 @@ return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT); } + OverflowResult computeOverflowForSignedMul(const Value *LHS, + const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForSignedMul(LHS, RHS, DL, &AC, CxtI, &DT); + } + OverflowResult computeOverflowForUnsignedAdd(const Value *LHS, const Value *RHS, const Instruction *CxtI) const { @@ -609,6 +626,17 @@ return llvm::computeOverflowForSignedAdd(LHS, RHS, DL, &AC, CxtI, &DT); } + OverflowResult computeOverflowForUnsignedSub(const Value *LHS, + const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForUnsignedSub(LHS, RHS, DL, &AC, CxtI, &DT); + } + + OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, + const Instruction *CxtI) const { + return llvm::computeOverflowForSignedSub(LHS, RHS, DL, &AC, CxtI, &DT); + } + /// Maximum size of array considered when transforming. uint64_t MaxArraySizeForCombine; Index: lib/Transforms/InstCombine/InstCombineMulDivRem.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -125,47 +125,6 @@ return ConstantVector::get(Elts); } -/// Return true if we can prove that: -/// (mul LHS, RHS) === (mul nsw LHS, RHS) -bool InstCombiner::willNotOverflowSignedMul(const Value *LHS, - const Value *RHS, - const Instruction &CxtI) const { - // Multiplying n * m significant bits yields a result of n + m significant - // bits. If the total number of significant bits does not exceed the - // result bit width (minus 1), there is no overflow. - // This means if we have enough leading sign bits in the operands - // we can guarantee that the result does not overflow. - // Ref: "Hacker's Delight" by Henry Warren - unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); - - // Note that underestimating the number of sign bits gives a more - // conservative answer. - unsigned SignBits = - ComputeNumSignBits(LHS, 0, &CxtI) + ComputeNumSignBits(RHS, 0, &CxtI); - - // First handle the easy case: if we have enough sign bits there's - // definitely no overflow. - if (SignBits > BitWidth + 1) - return true; - - // There are two ambiguous cases where there can be no overflow: - // SignBits == BitWidth + 1 and - // SignBits == BitWidth - // The second case is difficult to check, therefore we only handle the - // first case. - if (SignBits == BitWidth + 1) { - // It overflows only when both arguments are negative and the true - // product is exactly the minimum negative number. - // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 - // For simplicity we just check if at least one side is not negative. - KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, &CxtI); - KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, &CxtI); - if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) - return true; - } - return false; -} - Instruction *InstCombiner::visitMul(BinaryOperator &I) { bool Changed = SimplifyAssociativeOrCommutative(I); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); Index: test/Transforms/InstCombine/select-bitext-bitwise-ops.ll =================================================================== --- test/Transforms/InstCombine/select-bitext-bitwise-ops.ll +++ test/Transforms/InstCombine/select-bitext-bitwise-ops.ll @@ -1,14 +1,13 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -instcombine < %s | FileCheck %s define i64 @sel_false_val_is_a_masked_shl_of_true_val1(i32 %x, i64 %y) { ; CHECK-LABEL: @sel_false_val_is_a_masked_shl_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 15 %2 = shl nuw nsw i32 %1, 2 @@ -38,13 +37,11 @@ define i64 @sel_false_val_is_a_masked_lshr_of_true_val1(i32 %x, i64 %y) { ; CHECK-LABEL: @sel_false_val_is_a_masked_lshr_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, 60 -; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 60 %2 = lshr i32 %1, 2 @@ -74,13 +71,11 @@ define i64 @sel_false_val_is_a_masked_ashr_of_true_val1(i32 %x, i64 %y) { ; CHECK-LABEL: @sel_false_val_is_a_masked_ashr_of_true_val1( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 %x, -2147483588 -; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 %x, 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 %y, [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 %y, [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, -2147483588 %2 = ashr i32 %1, 2 Index: test/Transforms/InstCombine/select-obo-peo-ops.ll =================================================================== --- test/Transforms/InstCombine/select-obo-peo-ops.ll +++ test/Transforms/InstCombine/select-obo-peo-ops.ll @@ -3,13 +3,11 @@ define i64 @test_shl_nuw_nsw__all_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw_nsw__all_are_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 15 %2 = shl nuw nsw i32 %1, 2 @@ -22,13 +20,11 @@ define i64 @test_shl_nuw__all_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw__all_are_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 15 %2 = shl nuw i32 %1, 2 @@ -41,13 +37,11 @@ define i64 @test_shl_nsw__all_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nsw__all_are_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 15 %2 = shl nsw i32 %1, 2 @@ -60,13 +54,11 @@ define i64 @test_shl__all_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl__all_are_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 15 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 60 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 15 %2 = shl i32 %1, 2 @@ -98,13 +90,11 @@ define i64 @test_shl_nuw__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl_nuw__nuw_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 1073741822 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 1073741822 %2 = shl nuw i32 %1, 2 @@ -136,13 +126,11 @@ define i64 @test_shl__nuw_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl__nuw_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 1073741822 -; CHECK-NEXT: [[TMP2:%.*]] = shl nuw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 1073741822 %2 = shl i32 %1, 2 @@ -194,12 +182,10 @@ define i32 @test_shl_nsw__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_shl_nsw__nsw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[X:%.*]], -83886080 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], -83886079 -; CHECK-NEXT: [[TMP3:%.*]] = shl nsw i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP2]], i32 -335544316, i32 [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP3]] -; CHECK-NEXT: ret i32 [[TMP6]] +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[TMP2]] +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = or i32 %x, -83886080 %2 = icmp eq i32 %1, -83886079 @@ -213,12 +199,10 @@ define i32 @test_shl__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_shl__nsw_is_safe( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[X:%.*]], -83886080 -; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], -83886079 -; CHECK-NEXT: [[TMP3:%.*]] = shl nsw i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP2]], i32 -335544316, i32 [[TMP3]] -; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP3]] -; CHECK-NEXT: ret i32 [[TMP6]] +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[TMP2]] +; CHECK-NEXT: ret i32 [[TMP4]] ; %1 = or i32 %x, -83886080 %2 = icmp eq i32 %1, -83886079 @@ -289,13 +273,11 @@ define i64 @test_shl__none_are_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_shl__none_are_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2 -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -8 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 4294967294 %2 = shl i32 %1, 2 @@ -308,13 +290,11 @@ define i64 @test_lshr_exact__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_lshr_exact__exact_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 60 -; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 60 %2 = lshr exact i32 %1, 2 @@ -327,13 +307,11 @@ define i64 @test_lshr__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_lshr__exact_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 60 -; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 60 %2 = lshr i32 %1, 2 @@ -365,13 +343,11 @@ define i64 @test_lshr__exact_is_unsafe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_lshr__exact_is_unsafe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 63 -; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 15 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, 63 %2 = lshr i32 %1, 2 @@ -384,13 +360,11 @@ define i64 @test_ashr_exact__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr_exact__exact_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2147483588 -; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, -2147483588 %2 = ashr exact i32 %1, 2 @@ -403,13 +377,11 @@ define i64 @test_ashr__exact_is_safe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr__exact_is_safe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2147483588 -; CHECK-NEXT: [[TMP2:%.*]] = ashr exact i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, -2147483588 %2 = ashr i32 %1, 2 @@ -441,13 +413,11 @@ define i64 @test_ashr__exact_is_unsafe(i32 %x, i64 %y) { ; CHECK-LABEL: @test_ashr__exact_is_unsafe( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2147483585 -; CHECK-NEXT: [[TMP2:%.*]] = ashr i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -536870897 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP1]], 0 -; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = ashr i64 [[Y:%.*]], [[TMP5]] -; CHECK-NEXT: ret i64 [[TMP6]] +; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]] +; CHECK-NEXT: ret i64 [[TMP4]] ; %1 = and i32 %x, -2147483585 %2 = ashr i32 %1, 2 @@ -461,10 +431,8 @@ define i32 @test_add_nuw_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_add_nuw_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1073741823 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 3 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 1073741823 %cmp = icmp eq i32 %and, 3 @@ -476,10 +444,8 @@ define i32 @test_add_nuw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_add_nuw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1073741823 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 3 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 1073741823 %cmp = icmp eq i32 %and, 3 @@ -491,10 +457,8 @@ define i32 @test_add_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_add_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1073741823 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 3 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 1073741823 %cmp = icmp eq i32 %and, 3 @@ -506,10 +470,8 @@ define i32 @test_add__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_add__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1073741823 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 3 ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 4, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 1073741823 %cmp = icmp eq i32 %and, 3 @@ -536,10 +498,8 @@ define i32 @test_add_nuw__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_add_nuw__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 2147483647 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2147483647 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -2147483648, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 2147483647 %cmp = icmp eq i32 %and, 2147483647 @@ -566,10 +526,8 @@ define i32 @test_add__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_add__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 2147483647 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 2147483647 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[AND]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -2147483648, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %and = and i32 %x, 2147483647 %cmp = icmp eq i32 %and, 2147483647 @@ -611,10 +569,8 @@ define i32 @test_add_nsw__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_add_nsw__nsw_is_safe( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], -2147483648 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -1 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[OR]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 0, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %or = or i32 %x, -2147483648 %cmp = icmp eq i32 %or, -1 @@ -626,10 +582,8 @@ define i32 @test_add__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_add__nsw_is_safe( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], -2147483648 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -1 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[OR]], 1 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 0, i32 [[ADD]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[ADD]] ; %or = or i32 %x, -2147483648 %cmp = icmp eq i32 %or, -1 @@ -691,10 +645,8 @@ define i32 @test_sub_nuw_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_sub_nuw_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 6 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -260, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 6 @@ -706,10 +658,8 @@ define i32 @test_sub_nuw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_sub_nuw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 6 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -260, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 6 @@ -721,10 +671,8 @@ define i32 @test_sub_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_sub_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 6 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -260, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 6 @@ -736,10 +684,8 @@ define i32 @test_sub__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_sub__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 6 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw nsw i32 -254, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -260, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 6 @@ -766,10 +712,8 @@ define i32 @test_sub_nuw__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_sub_nuw__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 2147483647 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1073741824 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 -2147483648, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1073741824, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 2147483647 %cmp = icmp eq i32 %and, 1073741824 @@ -796,10 +740,8 @@ define i32 @test_sub__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_sub__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 2147483647 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 1073741824 ; CHECK-NEXT: [[SUB:%.*]] = sub nuw i32 -2147483648, [[AND]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1073741824, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %and = and i32 %x, 2147483647 %cmp = icmp eq i32 %and, 1073741824 @@ -841,10 +783,8 @@ define i32 @test_sub_nsw__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_sub_nsw__nsw_is_safe( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], -2147483648 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -2147483647 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 -2147483648, [[OR]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %or = or i32 %x, -2147483648 %cmp = icmp eq i32 %or, -2147483647 @@ -856,10 +796,8 @@ define i32 @test_sub__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_sub__nsw_is_safe( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], -2147483648 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[OR]], -2147483647 ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 -2147483648, [[OR]] -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1, i32 [[SUB]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[SUB]] ; %or = or i32 %x, -2147483648 %cmp = icmp eq i32 %or, -2147483647 @@ -921,10 +859,8 @@ define i32 @test_mul_nuw_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_mul_nuw_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 17 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 153, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 17 @@ -936,10 +872,8 @@ define i32 @test_mul_nuw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_mul_nuw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 17 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 153, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 17 @@ -951,10 +885,8 @@ define i32 @test_mul_nsw__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_mul_nsw__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 17 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 153, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 17 @@ -966,10 +898,8 @@ define i32 @test_mul__all_are_safe(i32 %x) { ; CHECK-LABEL: @test_mul__all_are_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 255 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 17 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 153, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 255 %cmp = icmp eq i32 %and, 17 @@ -996,10 +926,8 @@ define i32 @test_mul_nuw__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_mul_nuw__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 268435457 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 268435456 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1879048192, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 268435457 %cmp = icmp eq i32 %and, 268435456 @@ -1026,10 +954,8 @@ define i32 @test_mul__nuw_is_safe(i32 %x) { ; CHECK-LABEL: @test_mul__nuw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 268435457 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 268435456 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -1879048192, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = and i32 %x, 268435457 %cmp = icmp eq i32 %and, 268435456 @@ -1071,10 +997,8 @@ define i32 @test_mul_nsw__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_mul_nsw__nsw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = or i32 [[X:%.*]], -83886080 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], -83886079 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -754974711, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = or i32 %x, -83886080 %cmp = icmp eq i32 %and, -83886079 @@ -1086,10 +1010,8 @@ define i32 @test_mul__nsw_is_safe(i32 %x) { ; CHECK-LABEL: @test_mul__nsw_is_safe( ; CHECK-NEXT: [[AND:%.*]] = or i32 [[X:%.*]], -83886080 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], -83886079 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[AND]], 9 -; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 -754974711, i32 [[MUL]] -; CHECK-NEXT: ret i32 [[SEL]] +; CHECK-NEXT: ret i32 [[MUL]] ; %and = or i32 %x, -83886080 %cmp = icmp eq i32 %and, -83886079