Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -3638,32 +3638,62 @@ void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base) { // This method is only interesting on a plurality of registers. - if (Base.BaseRegs.size() + (Base.Scale == 1) <= 1) + if (Base.BaseRegs.size() + (Base.Scale == 1) + + (Base.UnfoldedOffset != 0) <= 1) return; // Flatten the representation, i.e., reg1 + 1*reg2 => reg1 + reg2, before // processing the formula. Base.unscale(); - Formula F = Base; - F.BaseRegs.clear(); SmallVector Ops; + Formula NewBase = Base; + NewBase.BaseRegs.clear(); + Type *CombinedIntegerType = nullptr; for (const SCEV *BaseReg : Base.BaseRegs) { if (SE.properlyDominates(BaseReg, L->getHeader()) && - !SE.hasComputableLoopEvolution(BaseReg, L)) + !SE.hasComputableLoopEvolution(BaseReg, L)) { + if (!CombinedIntegerType) + CombinedIntegerType = SE.getEffectiveSCEVType(BaseReg->getType()); Ops.push_back(BaseReg); + } else - F.BaseRegs.push_back(BaseReg); + NewBase.BaseRegs.push_back(BaseReg); } - if (Ops.size() > 1) { - const SCEV *Sum = SE.getAddExpr(Ops); + + // If no register is relevant, we're done. + if (Ops.size() == 0) + return; + + // Utility function for generating the required variants of the combined + // registers. + auto GenerateFormula = [&](const SCEV *Sum) { + Formula F = NewBase; + // TODO: If Sum is zero, it probably means ScalarEvolution missed an // opportunity to fold something. For now, just ignore such cases // rather than proceed with zero in a register. - if (!Sum->isZero()) { - F.BaseRegs.push_back(Sum); - F.canonicalize(*L); - (void)InsertFormula(LU, LUIdx, F); - } + if (Sum->isZero()) + return; + + F.BaseRegs.push_back(Sum); + F.canonicalize(*L); + (void)InsertFormula(LU, LUIdx, F); + }; + + // If we collected at least two registers, generate a formula combining them. + if (Ops.size() > 1) { + SmallVector OpsCopy(Ops); // Don't let SE modify Ops. + GenerateFormula(SE.getAddExpr(OpsCopy)); + } + + // If we have an unfolded offset, generate a formula combining it with the + // registers collected. + if (NewBase.UnfoldedOffset) { + assert(CombinedIntegerType && "Missing a type for the unfolded offset"); + Ops.push_back(SE.getConstant(CombinedIntegerType, NewBase.UnfoldedOffset, + true)); + NewBase.UnfoldedOffset = 0; + GenerateFormula(SE.getAddExpr(Ops)); } } Index: test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll =================================================================== --- test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll +++ test/Transforms/LoopStrengthReduce/AArch64/small-constant.ll @@ -2,45 +2,10 @@ ; RUN: llc < %s -mtriple=aarch64-unknown-unknown | FileCheck %s -; LSR doesn't consider bumping a pointer by constants outside the loop when the -; constants fit as immediate add operands. The constants are re-associated as an -; unfolded offset rather than a register and are not combined later with -; loop-invariant registers. For large-enough constants LSR produces better -; solutions for these test cases, with test1 switching from: -; -; The chosen solution requires 2 instructions 2 regs, with addrec cost 1, plus 1 scale cost, plus 4 imm cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; -7 + reg({(7 + %start),+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg(%arr) + 4*reg({(7 + %start),+,1}<%for.body>) -; -; to: -; -; The chosen solution requires 1 instruction 2 regs, with addrec cost 1, plus 1 scale cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg((88888 + %arr)) + 4*reg({%start,+,1}<%for.body>) -; -; and test2 switching from: -; -; The chosen solution requires 2 instructions 2 regs, with addrec cost 1, plus 1 base add, plus 1 scale cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Basic, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg(%arr) + 4*reg({%start,+,1}<%for.body>) + imm(28) -; -; to: -; -; The chosen solution requires 1 instruction 2 regs, with addrec cost 1, plus 1 scale cost, plus 1 setup cost: -; LSR Use: Kind=ICmpZero, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Basic, Offsets={0}, widest fixup type: i64 -; reg({%start,+,1}<%for.body>) -; LSR Use: Kind=Address of float in addrspace(0), Offsets={0}, widest fixup type: float* -; reg((88888 + %arr)) + 4*reg({%start,+,1}<%for.body>) +; Test LSR for giving small constants, which get re-associated as unfolded +; offset, a chance to get combined with loop-invariant registers (same as +; large constants which do not fit as add immediate operands). LSR +; favors here to bump the base pointer outside the loop. ; float test(float *arr, long long start, float threshold) { ; for (long long i = start; i != 0; ++i) { @@ -56,17 +21,16 @@ ; CHECK-NEXT: fmov s2, #-7.00000000 ; CHECK-NEXT: cbz x1, .LBB0_5 ; CHECK-NEXT: // %bb.1: // %for.body.preheader -; CHECK-NEXT: add x8, x1, #7 // =7 +; CHECK-NEXT: add x8, x0, #28 // =28 ; CHECK-NEXT: .LBB0_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: ldr s1, [x0, x8, lsl #2] +; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] ; CHECK-NEXT: fcmp s1, s0 ; CHECK-NEXT: b.gt .LBB0_6 ; CHECK-NEXT: // %bb.3: // %for.cond ; CHECK-NEXT: // in Loop: Header=BB0_2 Depth=1 -; CHECK-NEXT: add x8, x8, #1 // =1 -; CHECK-NEXT: cmp x8, #7 // =7 -; CHECK-NEXT: b.ne .LBB0_2 +; CHECK-NEXT: add x1, x1, #1 // =1 +; CHECK-NEXT: cbnz x1, .LBB0_2 ; CHECK-NEXT: // %bb.4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret @@ -104,26 +68,27 @@ ; CHECK-LABEL: test2: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmov s2, #-7.00000000 -; CHECK-NEXT: cbz x1, .LBB1_4 -; CHECK-NEXT: .LBB1_1: // %for.body +; CHECK-NEXT: cbz x1, .LBB1_5 +; CHECK-NEXT: // %bb.1: // %for.body.preheader +; CHECK-NEXT: add x8, x0, #28 // =28 +; CHECK-NEXT: .LBB1_2: // %for.body ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: add x8, x0, x1, lsl #2 -; CHECK-NEXT: ldr s1, [x8, #28] +; CHECK-NEXT: ldr s1, [x8, x1, lsl #2] ; CHECK-NEXT: scvtf s3, x1 ; CHECK-NEXT: fadd s3, s3, s0 ; CHECK-NEXT: fcmp s1, s3 -; CHECK-NEXT: b.gt .LBB1_5 -; CHECK-NEXT: // %bb.2: // %for.cond -; CHECK-NEXT: // in Loop: Header=BB1_1 Depth=1 +; CHECK-NEXT: b.gt .LBB1_6 +; CHECK-NEXT: // %bb.3: // %for.cond +; CHECK-NEXT: // in Loop: Header=BB1_2 Depth=1 ; CHECK-NEXT: add x1, x1, #1 // =1 -; CHECK-NEXT: cbnz x1, .LBB1_1 -; CHECK-NEXT: // %bb.3: +; CHECK-NEXT: cbnz x1, .LBB1_2 +; CHECK-NEXT: // %bb.4: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_4: +; CHECK-NEXT: .LBB1_5: ; CHECK-NEXT: mov v0.16b, v2.16b ; CHECK-NEXT: ret -; CHECK-NEXT: .LBB1_5: // %cleanup4 +; CHECK-NEXT: .LBB1_6: // %cleanup4 ; CHECK-NEXT: mov v0.16b, v1.16b ; CHECK-NEXT: ret entry: Index: test/Transforms/LoopStrengthReduce/two-combinations-bug.ll =================================================================== --- test/Transforms/LoopStrengthReduce/two-combinations-bug.ll +++ test/Transforms/LoopStrengthReduce/two-combinations-bug.ll @@ -0,0 +1,133 @@ +; RUN: opt < %s -loop-reduce -disable-output -debug-only=loop-reduce |& FileCheck %s +; REQUIRES: asserts + +; This test is adopted from the n-body test of the LLVM test-suite: A bug in +; r345114 caused LSR to generate incorrect code. The test verifies that the +; incorrect formula is not selected for the ICmpZero use. + +; CHECK: The chosen solution +; CHECK-NEXT: LSR Use: Kind=ICmpZero +; CHECK-NOT: reg({1,+,1}<%for.body>) + -1*reg({0,+,-1}<%for.body3>) +; CHECK: LSR Use: Kind=Address + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%struct.planet = type { double, double, double, double, double, double, double } + +; Function Attrs: nounwind uwtable +define dso_local void @advance(i32 %nbodies, %struct.planet* nocapture %bodies, double %dt) local_unnamed_addr #0 { +entry: + %cmp96 = icmp sgt i32 %nbodies, 0 + br i1 %cmp96, label %for.body.preheader, label %for.end45 + +for.body.preheader: ; preds = %entry + %0 = sext i32 %nbodies to i64 + %wide.trip.count = zext i32 %nbodies to i64 + %wide.trip.count102.pre-phi = zext i32 %nbodies to i64 + br label %for.body + +for.cond.loopexit: ; preds = %for.body3, %for.body + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %exitcond103 = icmp eq i64 %indvars.iv.next101, %wide.trip.count102.pre-phi + br i1 %exitcond103, label %for.end45, label %for.body + +for.body: ; preds = %for.cond.loopexit, %for.body.preheader + %indvars.iv100 = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next101, %for.cond.loopexit ] + %indvars.iv = phi i64 [ 1, %for.body.preheader ], [ %indvars.iv.next, %for.cond.loopexit ] + %indvars.iv.next101 = add nuw nsw i64 %indvars.iv100, 1 + %cmp294 = icmp slt i64 %indvars.iv.next101, %0 + br i1 %cmp294, label %for.body3.lr.ph, label %for.cond.loopexit + +for.body3.lr.ph: ; preds = %for.body + %x = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv100, i32 0 + %z = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv100, i32 2 + %vx = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv100, i32 3 + %vz = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv100, i32 5 + %mass28 = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv100, i32 6 + %1 = bitcast double* %x to <2 x double>* + %2 = bitcast double* %vx to <2 x double>* + %3 = bitcast double* %vx to <2 x double>* + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.body3.lr.ph + %indvars.iv98 = phi i64 [ %indvars.iv, %for.body3.lr.ph ], [ %indvars.iv.next99, %for.body3 ] + %x6 = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv98, i32 0 + %4 = load <2 x double>, <2 x double>* %1, align 8, !tbaa !2 + %5 = bitcast double* %x6 to <2 x double>* + %6 = load <2 x double>, <2 x double>* %5, align 8, !tbaa !2 + %7 = fsub <2 x double> %4, %6 + %8 = load double, double* %z, align 8, !tbaa !6 + %z9 = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv98, i32 2 + %9 = load double, double* %z9, align 8, !tbaa !6 + %sub10 = fsub double %8, %9 + %10 = extractelement <2 x double> %7, i32 0 + %mul = fmul double %10, %10 + %11 = extractelement <2 x double> %7, i32 1 + %mul11 = fmul double %11, %11 + %add12 = fadd double %mul, %mul11 + %mul13 = fmul double %sub10, %sub10 + %add14 = fadd double %add12, %mul13 + %call = tail call double @sqrt(double %add14) #2 + %mul15 = fmul double %call, %call + %mul16 = fmul double %call, %mul15 + %div = fdiv double %dt, %mul16 + %mass = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv98, i32 6 + %12 = load double, double* %mass, align 8, !tbaa !8 + %13 = insertelement <2 x double> undef, double %12, i32 0 + %14 = shufflevector <2 x double> %13, <2 x double> undef, <2 x i32> zeroinitializer + %15 = fmul <2 x double> %7, %14 + %16 = insertelement <2 x double> undef, double %div, i32 0 + %17 = shufflevector <2 x double> %16, <2 x double> undef, <2 x i32> zeroinitializer + %18 = fmul <2 x double> %15, %17 + %19 = load <2 x double>, <2 x double>* %2, align 8, !tbaa !2 + %20 = fsub <2 x double> %19, %18 + store <2 x double> %20, <2 x double>* %3, align 8, !tbaa !2 + %mul25 = fmul double %sub10, %12 + %mul26 = fmul double %mul25, %div + %21 = load double, double* %vz, align 8, !tbaa !9 + %sub27 = fsub double %21, %mul26 + store double %sub27, double* %vz, align 8, !tbaa !9 + %22 = load double, double* %mass28, align 8, !tbaa !8 + %vx31 = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv98, i32 3 + %23 = insertelement <2 x double> undef, double %22, i32 0 + %24 = shufflevector <2 x double> %23, <2 x double> undef, <2 x i32> zeroinitializer + %25 = fmul <2 x double> %7, %24 + %26 = fmul <2 x double> %17, %25 + %27 = bitcast double* %vx31 to <2 x double>* + %28 = load <2 x double>, <2 x double>* %27, align 8, !tbaa !2 + %29 = fadd <2 x double> %28, %26 + %30 = bitcast double* %vx31 to <2 x double>* + store <2 x double> %29, <2 x double>* %30, align 8, !tbaa !2 + %mul39 = fmul double %sub10, %22 + %mul40 = fmul double %div, %mul39 + %vz41 = getelementptr inbounds %struct.planet, %struct.planet* %bodies, i64 %indvars.iv98, i32 5 + %31 = load double, double* %vz41, align 8, !tbaa !9 + %add42 = fadd double %mul40, %31 + store double %add42, double* %vz41, align 8, !tbaa !9 + %indvars.iv.next99 = add nuw nsw i64 %indvars.iv98, 1 + %exitcond = icmp eq i64 %indvars.iv.next99, %wide.trip.count + br i1 %exitcond, label %for.cond.loopexit, label %for.body3 + +for.end45: ; preds = %for.cond.loopexit, %entry + ret void +} + +; Function Attrs: nounwind +declare dso_local double @sqrt(double) local_unnamed_addr #1 + +attributes #0 = { nounwind uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #1 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+fxsr,+mmx,+sse,+sse2,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #2 = { nounwind } + +!llvm.module.flags = !{!0} + +!0 = !{i32 1, !"wchar_size", i32 4} +!2 = !{!3, !3, i64 0} +!3 = !{!"double", !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C/C++ TBAA"} +!6 = !{!7, !3, i64 16} +!7 = !{!"planet", !3, i64 0, !3, i64 8, !3, i64 16, !3, i64 24, !3, i64 32, !3, i64 40, !3, i64 48} +!8 = !{!7, !3, i64 48} +!9 = !{!7, !3, i64 40}