Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -2280,7 +2280,10 @@ SmallVector AddRecOps(AddRec->op_begin(), AddRec->op_end()); - AddRecOps[0] = getAddExpr(LIOps); + // This follows from the fact that the no-wrap flags on the outer add + // expression are applicable on the 0th iteration, when the add recurrence + // will be equal to its start value. + AddRecOps[0] = getAddExpr(LIOps, Flags); // Build the new addrec. Propagate the NUW and NSW flags if both the // outer add and the inner addrec are guaranteed to have no overflow. Index: test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll =================================================================== --- test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll +++ test/Analysis/LoopAccessAnalysis/number-of-memchecks.ll @@ -96,11 +96,11 @@ ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group {{.*}}[[ZERO]]: ; CHECK-NEXT: (Low: %c High: (78 + %c)) -; CHECK-NEXT: Member: {(2 + %c),+,4} +; CHECK-NEXT: Member: {(2 + %c),+,4} ; CHECK-NEXT: Member: {%c,+,4} ; CHECK-NEXT: Group {{.*}}[[ONE]]: ; CHECK-NEXT: (Low: %a High: (40 + %a)) -; CHECK-NEXT: Member: {(2 + %a),+,2} +; CHECK-NEXT: Member: {(2 + %a),+,2} ; CHECK-NEXT: Member: {%a,+,2} ; CHECK-NEXT: Group {{.*}}[[TWO]]: ; CHECK-NEXT: (Low: %b High: (38 + %b)) @@ -168,7 +168,7 @@ ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group {{.*}}[[ZERO]]: ; CHECK-NEXT: (Low: %c High: (78 + %c)) -; CHECK-NEXT: Member: {(2 + %c),+,4} +; CHECK-NEXT: Member: {(2 + %c),+,4} ; CHECK-NEXT: Member: {%c,+,4} ; CHECK-NEXT: Group {{.*}}[[ONE]]: ; CHECK-NEXT: (Low: %a High: (40 + %a)) @@ -246,8 +246,8 @@ ; CHECK-NEXT: %arrayidxA2 = getelementptr i16, i16* %a, i64 %ind2 ; CHECK-NEXT: Grouped accesses: ; CHECK-NEXT: Group {{.*}}[[ZERO]]: -; CHECK-NEXT: (Low: ((2 * %offset) + %a) High: (9998 + (2 * %offset) + %a)) -; CHECK-NEXT: Member: {((2 * %offset) + %a),+,2}<%for.body> +; CHECK-NEXT: (Low: ((2 * %offset) + %a) High: (9998 + (2 * %offset) + %a)) +; CHECK-NEXT: Member: {((2 * %offset) + %a),+,2}<%for.body> ; CHECK-NEXT: Group {{.*}}[[ONE]]: ; CHECK-NEXT: (Low: %a High: (9998 + %a)) ; CHECK-NEXT: Member: {%a,+,2}<%for.body> Index: test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll =================================================================== --- test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll +++ test/Analysis/LoopAccessAnalysis/reverse-memcheck-bounds.ll @@ -15,7 +15,7 @@ target triple = "aarch64--linux-gnueabi" ; CHECK: function 'f': -; CHECK: (Low: (20000 + %a) High: (60000 + %a)) +; CHECK: (Low: (20000 + %a) High: (60000 + %a)) @B = common global i32* null, align 8 @A = common global i32* null, align 8 @@ -58,7 +58,7 @@ ; Here it is not obvious what the limits are, since 'step' could be negative. ; CHECK: Low: (-1 + (-1 * ((-60001 + (-1 * %a)) umax (-60001 + (40000 * %step) + (-1 * %a))))) -; CHECK: High: ((60000 + %a) umax (60000 + (-40000 * %step) + %a)) +; CHECK: High: ((60000 + %a) umax (60000 + (-40000 * %step) + %a)) define void @g(i64 %step) { entry: Index: test/Analysis/ScalarEvolution/flags-from-poison.ll =================================================================== --- test/Analysis/ScalarEvolution/flags-from-poison.ll +++ test/Analysis/ScalarEvolution/flags-from-poison.ll @@ -346,7 +346,7 @@ %j = add nsw i32 %i, 1 ; CHECK: %index32 = -; CHECK: --> {(1 + %offset),+,1} +; CHECK: --> {(1 + %offset),+,1} %index32 = add nsw i32 %j, %offset %ptr = getelementptr inbounds float, float* %input, i32 %index32 @@ -488,7 +488,7 @@ %i = phi i32 [ %nexti, %loop ], [ %start, %entry ] ; CHECK: %index32 = -; CHECK: --> {((-1 * %halfsub) + %start),+,1} +; CHECK: --> {((-1 * %halfsub) + %start),+,1} %index32 = sub nsw i32 %i, %halfsub %index64 = sext i32 %index32 to i64 @@ -547,7 +547,7 @@ %j = add nsw i32 %i, 1 ; CHECK: %index32 = -; CHECK: --> {(1 + (-1 * %offset)),+,1} +; CHECK: --> {(1 + (-1 * %offset)),+,1} %index32 = sub nsw i32 %j, %offset %ptr = getelementptr inbounds float, float* %input, i32 %index32 Index: test/Analysis/ScalarEvolution/nsw-offset-assume.ll =================================================================== --- test/Analysis/ScalarEvolution/nsw-offset-assume.ll +++ test/Analysis/ScalarEvolution/nsw-offset-assume.ll @@ -39,7 +39,7 @@ %8 = sext i32 %7 to i64 ; [#uses=1] ; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8 -; CHECK: {(8 + %q),+,16}<%bb> +; CHECK: {(8 + %q),+,16}<%bb> %9 = getelementptr inbounds double, double* %q, i64 %8 ; [#uses=1] ; Artificially repeat the above three instructions, this time using @@ -51,7 +51,7 @@ %t8 = sext i32 %t7 to i64 ; [#uses=1] ; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8 -; CHECK: {(8 + %q),+,16}<%bb> +; CHECK: {(8 + %q),+,16}<%bb> %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; [#uses=1] %10 = load double, double* %9, align 8 ; [#uses=1] Index: test/Analysis/ScalarEvolution/nsw-offset.ll =================================================================== --- test/Analysis/ScalarEvolution/nsw-offset.ll +++ test/Analysis/ScalarEvolution/nsw-offset.ll @@ -37,7 +37,7 @@ %8 = sext i32 %7 to i64 ; [#uses=1] ; CHECK: %9 = getelementptr inbounds double, double* %q, i64 %8 -; CHECK: {(8 + %q),+,16}<%bb> +; CHECK: {(8 + %q),+,16}<%bb> %9 = getelementptr inbounds double, double* %q, i64 %8 ; [#uses=1] ; Artificially repeat the above three instructions, this time using @@ -49,7 +49,7 @@ %t8 = sext i32 %t7 to i64 ; [#uses=1] ; CHECK: %t9 = getelementptr inbounds double, double* %q, i64 %t8 -; CHECK: {(8 + %q),+,16}<%bb> +; CHECK: {(8 + %q),+,16}<%bb> %t9 = getelementptr inbounds double, double* %q, i64 %t8 ; [#uses=1] %10 = load double, double* %9, align 8 ; [#uses=1] Index: test/Analysis/ScalarEvolution/nsw.ll =================================================================== --- test/Analysis/ScalarEvolution/nsw.ll +++ test/Analysis/ScalarEvolution/nsw.ll @@ -66,7 +66,7 @@ store i32 0, i32* %__first.addr.02.i.i, align 4 %ptrincdec.i.i = getelementptr inbounds i32, i32* %__first.addr.02.i.i, i64 1 ; CHECK: %ptrincdec.i.i -; CHECK-NEXT: --> {(4 + %begin),+,4}<%for.body.i.i> +; CHECK-NEXT: --> {(4 + %begin),+,4}<%for.body.i.i> %cmp.i.i = icmp eq i32* %ptrincdec.i.i, %end br i1 %cmp.i.i, label %for.cond.for.end_crit_edge.i.i, label %for.body.i.i @@ -92,7 +92,7 @@ ; CHECK: {1,+,1}<%for.body.i.i> %ptrincdec.i.i = getelementptr inbounds i32, i32* %begin, i64 %tmp ; CHECK: %ptrincdec.i.i = -; CHECK: {(4 + %begin),+,4}<%for.body.i.i> +; CHECK: {(4 + %begin),+,4}<%for.body.i.i> %__first.addr.08.i.i = getelementptr inbounds i32, i32* %begin, i64 %indvar.i.i ; CHECK: %__first.addr.08.i.i ; CHECK: {%begin,+,4}<%for.body.i.i> @@ -124,7 +124,7 @@ } ; CHECK-LABEL: PR12375 -; CHECK: --> {(4 + %arg),+,4}<%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (8 + %arg) +; CHECK: --> {(4 + %arg),+,4}<%bb1>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (8 + %arg) define i32 @PR12375(i32* readnone %arg) { bb: %tmp = getelementptr inbounds i32, i32* %arg, i64 2 @@ -143,7 +143,7 @@ } ; CHECK-LABEL: PR12376 -; CHECK: --> {(4 + %arg),+,4}<%bb2>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (4 + (4 * ((3 + (-1 * %arg) + (%arg umax %arg1)) /u 4)) + %arg) +; CHECK: --> {(4 + %arg),+,4}<%bb2>{{ U: [^ ]+ S: [^ ]+}}{{ *}}Exits: (4 + (4 * ((3 + (-1 * %arg) + (%arg umax %arg1)) /u 4)) + %arg) define void @PR12376(i32* nocapture %arg, i32* nocapture %arg1) { bb: br label %bb2 @@ -177,3 +177,27 @@ for.end: ret void } + +; This test checks if no-wrap flags are propagated when folding {S,+,X}+T ==> {S+T,+,X} +; CHECK-LABEL: test4 +; CHECK: %idxprom +; CHECK-NEXT: --> {(-2 + (sext i32 %arg to i64)),+,1}<%for.body> +define void @test4(i32 %arg) { +entry: + %array = alloca [10 x i32], align 4 + br label %for.body + +for.body: + %index = phi i32 [ %inc5, %for.body ], [ %arg, %entry ] + %sub = add nsw i32 %index, -2 + %idxprom = sext i32 %sub to i64 + %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %array, i64 0, i64 %idxprom + %data = load i32, i32* %arrayidx, align 4 + %inc5 = add nsw i32 %index, 1 + %cmp2 = icmp slt i32 %inc5, 10 + br i1 %cmp2, label %for.body, label %for.end + +for.end: + ret void +} +