diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -2541,6 +2541,48 @@ } } + if (Ops.size() == 2) { + // Check if we have an expression of the form ((X + C1) - C2), where C1 and + // C2 can be folded in a way that allows retaining wrapping flags of (X + + // C1). + const SCEV *A = Ops[0]; + const SCEV *B = Ops[1]; + auto *AddExpr = dyn_cast(B); + auto *C = dyn_cast(A); + if (AddExpr && C && isa(AddExpr->getOperand(0))) { + auto C1 = cast(AddExpr->getOperand(0))->getAPInt(); + auto C2 = C->getAPInt(); + SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap; + + APInt ConstAdd = C1 + C2; + auto AddFlags = AddExpr->getNoWrapFlags(); + // Adding a smaller constant is NUW if the original AddExpr was NUW. + if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) == + SCEV::FlagNUW && + ConstAdd.ule(C1)) { + PreservedFlags = + ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW); + } + + // Adding a constant with the same sign and small magnitude is NSW, if the + // original AddExpr was NSW. + if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) == + SCEV::FlagNSW && + C1.isSignBitSet() == ConstAdd.isSignBitSet() && + ConstAdd.abs().ule(C1.abs())) { + PreservedFlags = + ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW); + } + + if (PreservedFlags != SCEV::FlagAnyWrap) { + SmallVector NewOps(AddExpr->op_begin(), + AddExpr->op_end()); + NewOps[0] = getConstant(ConstAdd); + return getAddExpr(NewOps, PreservedFlags); + } + } + } + // Skip past any other cast SCEVs. while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr) ++Idx; diff --git a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll --- a/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll +++ b/llvm/test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll @@ -16,7 +16,7 @@ target triple = "aarch64--linux-gnueabi" ; CHECK: function 'f': -; CHECK: (Low: (20000 + %a) High: (60004 + %a)) +; CHECK: (Low: (20000 + %a) High: (60004 + %a)) @B = common global i32* null, align 8 @A = common global i32* null, align 8 diff --git a/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll b/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll --- a/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll +++ b/llvm/test/Transforms/IndVarSimplify/eliminate-exit-no-dl.ll @@ -12,15 +12,12 @@ define void @foo() { ; CHECK-LABEL: @foo( ; CHECK-NEXT: bb: -; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 add (i64 ptrtoint ([0 x i8]* @global to i64), i64 500), i64 add (i64 ptrtoint ([0 x i8]* @global to i64), i64 1)) -; CHECK-NEXT: [[TMP0:%.*]] = sub i64 add (i64 ptrtoint ([0 x i8]* @global to i64), i64 1), [[UMIN]] -; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i64 [[TMP0]], 0 ; CHECK-NEXT: br label [[BB3:%.*]] ; CHECK: bb3: ; CHECK-NEXT: [[TMP:%.*]] = phi i8* [ [[TMP4:%.*]], [[BB7:%.*]] ], [ getelementptr inbounds ([0 x i8], [0 x i8]* @global, i64 0, i64 2), [[BB:%.*]] ] ; CHECK-NEXT: [[TMP4]] = getelementptr inbounds i8, i8* [[TMP]], i64 -1 ; CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[TMP4]], align 1 -; CHECK-NEXT: br i1 [[TMP1]], label [[BB7]], label [[BB11:%.*]] +; CHECK-NEXT: br i1 false, label [[BB7]], label [[BB11:%.*]] ; CHECK: bb7: ; CHECK-NEXT: [[TMP8:%.*]] = zext i8 [[TMP6]] to i64 ; CHECK-NEXT: br i1 true, label [[BB11]], label [[BB3]]