Index: llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ llvm/trunk/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -3638,32 +3638,62 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base) { // This method is only interesting on a plurality of registers. - if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1) + if (Base.BaseRegs.size() + (Base.Scale == 1) + + (Base.UnfoldedOffset != 0) <= 1) return; // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before // processing the formula. Base.unscale(); - Formula F = Base; - F.BaseRegs.clear(); SmallVector Ops; + Formula NewBase = Base; + NewBase.BaseRegs.clear(); + Type *CombinedIntegerType = nullptr; for (const SCEV *BaseReg : Base.BaseRegs) { if (SE.properlyDominates(BaseReg, L->getHeader()) && - !SE.hasComputableLoopEvolution(BaseReg, L)) + !SE.hasComputableLoopEvolution(BaseReg, L)) { + if (!CombinedIntegerType) + CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType()); Ops.push_back(BaseReg); + } else - F.BaseRegs.push_back(BaseReg); + NewBase.BaseRegs.push_back(BaseReg); } - if (Ops.size() > 1) { - const SCEV *Sum = SE.getAddExpr(Ops); + + // If no register is relevant, we're done. + if (Ops.size() == 0) + return; + + // Utility function for generating the required variants of the combined + // registers. + auto GenerateFormula = [&](const SCEV *Sum) { + Formula F = NewBase; + // TODO: If Sum is zero, it probably means ScalarEvolution missed an // opportunity to fold something. For now, just ignore such cases // rather than proceed with zero in a register. - if (!Sum->isZero()) { - F.BaseRegs.push_back(Sum); - F.canonicalize(*L); - (void)InsertFormula(LU, LUIdx, F); - } + if (Sum->isZero()) + return; + + F.BaseRegs.push_back(Sum); + F.canonicalize(*L); + (void)InsertFormula(LU, LUIdx, F); + }; + + // If we collected at least two registers, generate a formula combining them. + if (Ops.size() > 1) { + SmallVector OpsCopy(Ops); // Don't let SE modify Ops. + GenerateFormula(SE.getAddExpr(OpsCopy)); + } + + // If we have an unfolded offset, generate a formula combining it with the + // registers collected. + if (NewBase.UnfoldedOffset) { + assert(CombinedIntegerType && "Missing a type for the unfolded offset"); + Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset, + true)); + NewBase.UnfoldedOffset = 0; + GenerateFormula(SE.getAddExpr(Ops)); } } Index: llvm/trunk/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll =================================================================== --- llvm/trunk/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll +++ llvm/trunk/test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll @@ -2,45 +2,10 @@ ; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s -; LSR doesn't consider bumping a pointer by constants outside the loop when the -; constants fit as immediate add operands. The constants are re-associated as an -; unfolded offset rather than a register and are not combined later with -; loop-invariant registers. For large-enough constants LSR produces better -; solutions for these test cases, with test1 switching from: -; -; The chosen solution requires 2 instructions 2 regs, with addrec cost 1, plus 1 scale cost, plus 4 imm cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; -7 + reg({(7 + %start),+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg(%arr) + 4*reg({(7 + %start),+,1}<%for.body>) -; -; to: -; -; The chosen solution requires 1 instruction 2 regs, with addrec cost 1, plus 1 scale cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg((88888 + %arr)) + 4*reg({%start,+,1}<%for.body>) -; -; and test2 switching from: -; -; The chosen solution requires 2 instructions 2 regs, with addrec cost 1, plus 1 base add, plus 1 scale cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Basic, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg(%arr) + 4*reg({%start,+,1}<%for.body>) + imm(28) -; -; to: -; -; The chosen solution requires 1 instruction 2 regs, with addrec cost 1, plus 1 scale cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Basic, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg((88888 + %arr)) + 4*reg({%start,+,1}<%for.body>) +; Test LSR for giving small constants, which get re-associated as unfolded +; offset, a chance to get combined with loop-invariant registers (same as +; large constants which do not fit as add immediate operands). LSR +; favors here to bump the base pointer outside the loop. ; float test(float *arr, long long start, float threshold) { ; for (long long i = start; i != 0; ++i) { @@ -56,17 +21,16 @@ ; CHECK-NEXT: fmov s2, #-7.00000000 ; CHECK-NEXT: cbz x1, .LBB0_5 ; CHECK-NEXT: // %bb.1: // %for.body.preheader -; CHECK-NEXT: add x8, x1, #7 // =7 +; CHECK-NEXT: add x8, x0, #28 // =28 ; CHECK-NEXT: .LBB0_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ldr s1, [x0, x8, lsl #2] +; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] ; CHECK-NEXT: fcmp s1, s0 ; CHECK-NEXT: b.gt .LBB0_6 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: add x8, x8, #1 // =1 -; CHECK-NEXT: cmp x8, #7 // =7 -; CHECK-NEXT: b.ne .LBB0_2 +; CHECK-NEXT: add x1, x1, #1 // =1 +; CHECK-NEXT: cbnz x1, .LBB0_2 ; CHECK-NEXT: // %bb.4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret @@ -104,26 +68,27 @@ ; CHECK-LABEL: test2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmov s2, #-7.00000000 -; CHECK-NEXT: cbz x1, .LBB1_4 -; CHECK-NEXT: .LBB1_1: // %for.body +; CHECK-NEXT: cbz x1, .LBB1_5 +; CHECK-NEXT: // %bb.1: // %for.body.preheader +; CHECK-NEXT: add x8, x0, #28 // =28 +; CHECK-NEXT: .LBB1_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: add x8, x0, x1, lsl #2 -; CHECK-NEXT: ldr s1, [x8, #28] +; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] ; CHECK-NEXT: scvtf s3, x1 ; CHECK-NEXT: fadd s3, s3, s0 ; CHECK-NEXT: fcmp s1, s3 -; CHECK-NEXT: b.gt .LBB1_5 -; CHECK-NEXT: // %bb.2: // %for.cond -; CHECK-NEXT: // in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: b.gt .LBB1_6 +; CHECK-NEXT: // %bb.3: // %for.cond +; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1 ; CHECK-NEXT: add x1, x1, #1 // =1 -; CHECK-NEXT: cbnz x1, .LBB1_1 -; CHECK-NEXT: // %bb.3: +; CHECK-NEXT: cbnz x1, .LBB1_2 +; CHECK-NEXT: // %bb.4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_4: +; CHECK-NEXT: .LBB1_5: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_5: // %cleanup4 +; CHECK-NEXT: .LBB1_6: // %cleanup4 ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: ret entry: Index: llvm/trunk/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll =================================================================== --- llvm/trunk/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll +++ llvm/trunk/test/Transforms/LoopStrengthReduce/two-combinations-bug.ll @@ -0,0 +1,55 @@ +; RUN: opt < %s -loop-reduce -S | FileCheck %s + +; This test is adapted from the n-body test of the LLVM test-suite: A bug in +; r345114 caused LSR to generate incorrect code. The test verifies that the +; induction variable generated for the inner loop depends on the induction +; variable of the outer loop. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44 = type { double, double, double, double, double, double, double } + +; Function Attrs: nounwind uwtable +define dso_local void @advance(i32 %nbodies, %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44* nocapture %bodies) local_unnamed_addr #0 { +; CHECK-LABEL: @advance( +; CHECK: for.cond.loopexit: +; CHECK: [[LSR_IV_NEXT:%.*]] = add i64 [[LSR_IV:%.*]], -1 +; CHECK: br label %for.body +; CHECK: for.body: +; CHECK: [[LSR_IV]] = phi i64 [ [[LSR_IV_NEXT]] +; CHECK: br label %for.body3 +; CHECK: for.body3: +; CHECK: [[LSR_IV1:%.*]] = phi i64 [ [[LSR_IV_NEXT2:%.*]], %for.body3 ], [ [[LSR_IV]], %for.body ] +; CHECK: [[LSR_IV_NEXT2]] = add i64 [[LSR_IV1]], -1 +; CHECK: [[EXITCOND:%.*]] = icmp eq i64 [[LSR_IV_NEXT2]], 0 +; CHECK: br i1 [[EXITCOND]], label %for.cond.loopexit, label %for.body3 +; +entry: + %wide.trip.count = zext i32 %nbodies to i64 + br label %for.body + +for.cond.loopexit: ; preds = %for.body3 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + br label %for.body + +for.body: ; preds = %for.cond.loopexit, %entry + %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.cond.loopexit ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.body + %indvars.iv98 = phi i64 [ %indvars.iv, %for.body ], [ %indvars.iv.next99, %for.body3 ] + %z9 = getelementptr inbounds %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44, %struct.planet.0.3.6.11.12.15.16.17.24.25.26.33.44* %bodies, i64 %indvars.iv98, i32 2 + %tmp = load double, double* %z9, align 8, !tbaa !0 + %indvars.iv.next99 = add nuw nsw i64 %indvars.iv98, 1 + %exitcond = icmp eq i64 %indvars.iv.next99, %wide.trip.count + br i1 %exitcond, label %for.cond.loopexit, label %for.body3 +} + +attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } + +!0 = !{!1, !2, i64 16} +!1 = !{!"planet", !2, i64 0, !2, i64 8, !2, i64 16, !2, i64 24, !2, i64 32, !2, i64 40, !2, i64 48} +!2 = !{!"double", !3, i64 0} +!3 = !{!"omnipotent char", !4, i64 0} +!4 = !{!"Simple C/C++ TBAA"}