Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2088,7 +2088,26 @@ LSRInstance::OptimizeLoopTermCond() { SmallPtrSet PostIncs; + // We need a different set of heuristics for rotated and non-rotated loops. + // If a loop is rotated then the latch is also the backedge, so inserting + // post-inc expressions just before the latch is ideal. To reduce live ranges + // it also makes sense to rewrite terminating conditions to use post-inc + // expressions. + // + // If the loop is not rotated then the latch is not a backedge; the latch + // check is done in the loop head. Adding post-inc expressions before the + // latch will cause overlapping live-ranges of pre-inc and post-inc expressions + // in the loop body. In this case we do *not* want to use post-inc expressions + // in the latch check, and we want to insert post-inc expressions before + // the backedge. BasicBlock *LatchBlock = L->getLoopLatch(); + if (L->getExitingBlock() && LatchBlock != L->getExitingBlock()) { + // The backedge doesn't exit the loop; treat this as a head-tested loop. + IVIncInsertPos = LatchBlock->getTerminator(); + return; + } + + // Otherwise treat this as a rotated loop. SmallVector ExitingBlocks; L->getExitingBlocks(ExitingBlocks); Index: test/CodeGen/Hexagon/hwloop-loop1.ll =================================================================== --- test/CodeGen/Hexagon/hwloop-loop1.ll +++ test/CodeGen/Hexagon/hwloop-loop1.ll @@ -2,8 +2,6 @@ ; ; Generate loop1 instruction for double loop sequence. -; CHECK: loop0(.LBB{{.}}_{{.}}, #100) -; CHECK: endloop0 ; CHECK: loop1(.LBB{{.}}_{{.}}, #100) ; CHECK: loop0(.LBB{{.}}_{{.}}, #100) ; CHECK: endloop0 Index: test/CodeGen/X86/lsr-loop-exit-cond.ll =================================================================== --- test/CodeGen/X86/lsr-loop-exit-cond.ll +++ test/CodeGen/X86/lsr-loop-exit-cond.ll @@ -3,12 +3,12 @@ ; CHECK-LABEL: t: ; CHECK: movl (%r9,%rax,4), %e{{..}} -; CHECK-NEXT: decq +; CHECK-NEXT: testq ; CHECK-NEXT: jne ; ATOM-LABEL: t: ; ATOM: movl (%r9,%r{{.+}},4), %e{{..}} -; ATOM-NEXT: decq +; ATOM-NEXT: testq ; ATOM-NEXT: jne @Te0 = external global [256 x i32] ; <[256 x i32]*> [#uses=5] Index: test/Transforms/LoopStrengthReduce/post-inc-optsize.ll =================================================================== --- /dev/null +++ test/Transforms/LoopStrengthReduce/post-inc-optsize.ll @@ -0,0 +1,41 @@ +; RUN: opt < %s -loop-reduce + +target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" +target triple = "thumbv7m-arm-none-eabi" + +; Check that the PHIs are not transformed to post-inc, and that they are inserted in the +; latch block. Inserting them in the header block doubles register pressure and adds +; moves. + +; CHECK-LABEL: @f +; CHECK: while.body: +; CHECK: incdec.ptr = +; CHECK: incdec.ptr1 = +; CHECK: incdec.ptr2 = +; CHECK: dec = +define void @f(float* nocapture readonly %a, float* nocapture readonly %b, float* nocapture %c, i32 %n) { +entry: + br label %while.cond + +while.cond: ; preds = %while.body, %entry + %a.addr.0 = phi float* [ %a, %entry ], [ %incdec.ptr, %while.body ] + %b.addr.0 = phi float* [ %b, %entry ], [ %incdec.ptr1, %while.body ] + %c.addr.0 = phi float* [ %c, %entry ], [ %incdec.ptr2, %while.body ] + %n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %while.body ] + %cmp = icmp sgt i32 %n.addr.0, 0 + br i1 %cmp, label %while.body, label %while.end + +while.body: ; preds = %while.cond + %incdec.ptr = getelementptr inbounds float, float* %a.addr.0, i32 1 + %0 = load float, float* %a.addr.0, align 4 + %incdec.ptr1 = getelementptr inbounds float, float* %b.addr.0, i32 1 + %1 = load float, float* %b.addr.0, align 4 + %add = fadd float %0, %1 + %incdec.ptr2 = getelementptr inbounds float, float* %c.addr.0, i32 1 + store float %add, float* %c.addr.0, align 4 + %dec = add nsw i32 %n.addr.0, -1 + br label %while.cond + +while.end: ; preds = %while.cond + ret void +}