Index: lib/Transforms/Scalar/LoopStrengthReduce.cpp =================================================================== --- lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -3423,6 +3423,12 @@ if (AddOps.size() == 1) return; + // For AllFixupsOutsideLoop uses, their reassociations have less impact than + // normal uses since they are outside of current loop. + // Arbitrarily choose a cap size of AddOps to protect compile time. + if (AddOps.size() >= 5 && LU.AllFixupsOutsideLoop) + return; + for (SmallVectorImpl::const_iterator J = AddOps.begin(), JE = AddOps.end(); J != JE; ++J) { Index: test/Transforms/LoopStrengthReduce/X86/reassociation-cap.ll =================================================================== --- test/Transforms/LoopStrengthReduce/X86/reassociation-cap.ll +++ test/Transforms/LoopStrengthReduce/X86/reassociation-cap.ll @@ -0,0 +1,111 @@ +; REQUIRES: asserts +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -loop-reduce -disable-output -debug-only=loop-reduce -S < %s 2>&1| FileCheck %s +; PR32043. Check LSR will not generate many formulae via reassociation for +; all-fixups-outside-loop type LSRUse. + +; CHECK: LSR on loop %bb15: +; CHECK: After generating reuse formulae: +; CHECK: LSR Use: Kind=Basic, Offsets={0}, all-fixups-outside-loop, widest fixup type: i64 +; CHECK-NEXT: reg((-4 * (%tmp34 + %tmp27 + %tmp20 + %tmp13 + %tmp6))) +; CHECK-NEXT: reg((-4 * (%tmp34 + %tmp27 + %tmp20 + %tmp13 + %tmp6))) +; CHECK-NEXT: reg((-4 * (%tmp34 + %tmp27 + %tmp20 + %tmp13 + %tmp6))) +; CHECK-NEXT: LSR Use: Kind=Basic, Offsets={0}, all-fixups-outside-loop, widest fixup type: i64 +; CHECK: LSR on loop %bb12: +; After put a cap on reassociation for all-fixups-outside-loop type LSRUse, LSR will only +; generate two reuse formulae for LSRUse %tmp563 used in store. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +define void @foo(i64* %arg) { +bb: + br label %bb3 +bb2: + %tmp2 = add nuw nsw i64 %tmp1, 1 + %tmp7 = icmp slt i64 %tmp1, 29 + br i1 %tmp7, label %bb3, label %bb4 +bb3: + %tmp1 = phi i64 [ %tmp2, %bb2 ], [ 0, %bb ] + %tmp3 = phi i32 [ %tmp4, %bb2 ], [ 0, %bb ] + %tmp5 = getelementptr inbounds i64, i64* %arg, i64 %tmp1 + %tmp6 = load i64, i64* %tmp5, align 1 + %tmp4 = add nuw nsw i32 %tmp3, 1 + br i1 false, label %bb4, label %bb2 +bb4: + %tmp90 = sext i32 %tmp4 to i64 + br label %bb6 + +bb5: + %tmp9 = add nuw nsw i64 %tmp8, 1 + %tmp14 = icmp slt i64 %tmp8, 29 + br i1 %tmp14, label %bb6, label %bb7 +bb6: + %tmp8 = phi i64 [ %tmp9, %bb5 ], [ 0, %bb4 ] + %tmp10 = phi i32 [ %tmp11, %bb5 ], [ 0, %bb4 ] + %tmp12 = getelementptr inbounds i64, i64* %arg, i64 %tmp8 + %tmp13 = load i64, i64* %tmp12, align 1 + %tmp11 = add nuw nsw i32 %tmp10, 1 + br i1 false, label %bb7, label %bb5 +bb7: + %tmp91 = sext i32 %tmp11 to i64 + br label %bb9 + +bb8: + %tmp16 = add nuw nsw i64 %tmp15, 1 + %tmp21 = icmp slt i64 %tmp15, 29 + br i1 %tmp21, label %bb9, label %bb10 +bb9: + %tmp15 = phi i64 [ %tmp16, %bb8 ], [ 0, %bb7 ] + %tmp17 = phi i32 [ %tmp18, %bb8 ], [ 0, %bb7 ] + %tmp19 = getelementptr inbounds i64, i64* %arg, i64 %tmp15 + %tmp20 = load i64, i64* %tmp19, align 1 + %tmp18 = add nuw nsw i32 %tmp17, 1 + br i1 false, label %bb10, label %bb8 +bb10: + %tmp92 = sext i32 %tmp18 to i64 + br label %bb12 + +bb11: + %tmp23 = add nuw nsw i64 %tmp22, 1 + %tmp28 = icmp slt i64 %tmp22, 29 + br i1 %tmp28, label %bb12, label %bb13 +bb12: + %tmp22 = phi i64 [ %tmp23, %bb11 ], [ 0, %bb10 ] + %tmp24 = phi i32 [ %tmp25, %bb11 ], [ 0, %bb10 ] + %tmp26 = getelementptr inbounds i64, i64* %arg, i64 %tmp22 + %tmp27 = load i64, i64* %tmp26, align 1 + %tmp25 = add nuw nsw i32 %tmp24, 1 + br i1 false, label %bb13, label %bb11 +bb13: + %tmp93 = sext i32 %tmp25 to i64 + br label %bb15 + +bb14: + %tmp30 = add nuw nsw i64 %tmp29, 1 + %tmp35 = icmp slt i64 %tmp29, 29 + br i1 %tmp35, label %bb15, label %bb16 +bb15: + %tmp29 = phi i64 [ %tmp30, %bb14 ], [ 0, %bb13 ] + %tmp31 = phi i32 [ %tmp32, %bb14 ], [ 0, %bb13 ] + %tmp33 = getelementptr inbounds i64, i64* %arg, i64 %tmp29 + %tmp34 = load i64, i64* %tmp33, align 1 + %tmp32 = add nuw nsw i32 %tmp31, 1 + br i1 false, label %bb16, label %bb14 +bb16: + %tmp94 = sext i32 %tmp32 to i64 + br label %bb556 + +bb556: + %tmp554 = add i64 %tmp6, %tmp13 + %tmp555 = add i64 %tmp554, %tmp20 + %tmp556 = add i64 %tmp555, %tmp27 + %tmp557 = add i64 %tmp556, %tmp34 + %tmp558 = mul i64 %tmp557, -4 + %tmp559 = add i64 %tmp558, %tmp90 + %tmp560 = add i64 %tmp559, %tmp91 + %tmp561 = add i64 %tmp560, %tmp92 + %tmp562 = add i64 %tmp561, %tmp93 + %tmp563 = add i64 %tmp562, %tmp94 + store i64 %tmp563, i64* %arg, align 8 + ret void +} +