diff --git a/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp b/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp
--- a/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp
+++ b/llvm/lib/Transforms/Utils/LoopUnrollAndJam.cpp
@@ -260,7 +260,7 @@
   // if not outright eliminated.
   if (SE) {
     SE->forgetLoop(L);
-    SE->forgetLoop(SubLoop);
+    SE->forgetBlockAndLoopDispositions();
   }
 
   using namespace ore;
diff --git a/llvm/test/Transforms/LoopUnrollAndJam/se-forget.ll b/llvm/test/Transforms/LoopUnrollAndJam/se-forget.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnrollAndJam/se-forget.ll
@@ -0,0 +1,124 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt "-passes=print<scalar-evolution>,loop-unroll-and-jam" -S < %s | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-p:64:64-n32:64-S128-v256:256:256-v512:512:512"
+
+define dso_local void @main() {
+; Checks that we forget scalar evolution loops and dispositions, which allows
+; scalar evolution to verify successfully
+
+; CHECK-LABEL: @main(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[X:%.*]] = alloca [216 x float], align 4
+; CHECK-NEXT:    [[Y:%.*]] = alloca [216 x float], align 4
+; CHECK-NEXT:    [[X_IX_DIM_0:%.*]] = getelementptr i8, ptr [[X]], i64 -52
+; CHECK-NEXT:    [[Y_IX_DIM_0:%.*]] = getelementptr i8, ptr [[Y]], i64 -52
+; CHECK-NEXT:    br label [[OUTER_HEADER:%.*]]
+; CHECK:       outer.header:
+; CHECK-NEXT:    [[X_IX_DIM_1:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 4
+; CHECK-NEXT:    [[X_IX_DIM_1_1:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1_1:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 8
+; CHECK-NEXT:    [[X_IX_DIM_1_2:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1_2:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 12
+; CHECK-NEXT:    [[X_IX_DIM_1_3:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1_3:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 16
+; CHECK-NEXT:    [[X_IX_DIM_1_4:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1_4:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[X_IX_DIM_1_5:%.*]] = getelementptr i8, ptr [[X_IX_DIM_0]], i64 20
+; CHECK-NEXT:    [[Y_IX_DIM_1_5:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_0]], i64 24
+; CHECK-NEXT:    br label [[INNER:%.*]]
+; CHECK:       inner:
+; CHECK-NEXT:    [[INNER_IV_1:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_1_1:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_11:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT_1:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_1_2:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_2:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT_2:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_1_3:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_3:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT_3:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_1_4:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_4:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT_4:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_1_5:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ 0, [[INNER]] ]
+; CHECK-NEXT:    [[INNER_IV_5:%.*]] = phi i64 [ 1, [[OUTER_HEADER]] ], [ [[INNER_IV_NEXT_5:%.*]], [[INNER]] ]
+; CHECK-NEXT:    [[_IX_X_LEN:%.*]] = mul nuw nsw i64 [[INNER_IV_1]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1]], i64 [[_IX_X_LEN]]
+; CHECK-NEXT:    [[Y_IX_DIM_2:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1]], i64 [[_IX_X_LEN]]
+; CHECK-NEXT:    [[_VAL_Y:%.*]] = load float, ptr [[Y_IX_DIM_2]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y]], ptr [[X_IX_DIM_2]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT]] = add nuw nsw i64 [[INNER_IV]], 1
+; CHECK-NEXT:    [[_IX_X_LEN_12:%.*]] = mul nuw nsw i64 [[INNER_IV_1_1]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2_1:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1_1]], i64 [[_IX_X_LEN_12]]
+; CHECK-NEXT:    [[Y_IX_DIM_2_1:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1_1]], i64 [[_IX_X_LEN_12]]
+; CHECK-NEXT:    [[_VAL_Y_1:%.*]] = load float, ptr [[Y_IX_DIM_2_1]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y_1]], ptr [[X_IX_DIM_2_1]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT_1]] = add nuw nsw i64 [[INNER_IV_11]], 1
+; CHECK-NEXT:    [[_IX_X_LEN_2:%.*]] = mul nuw nsw i64 [[INNER_IV_1_2]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2_2:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1_2]], i64 [[_IX_X_LEN_2]]
+; CHECK-NEXT:    [[Y_IX_DIM_2_2:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1_2]], i64 [[_IX_X_LEN_2]]
+; CHECK-NEXT:    [[_VAL_Y_2:%.*]] = load float, ptr [[Y_IX_DIM_2_2]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y_2]], ptr [[X_IX_DIM_2_2]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT_2]] = add nuw nsw i64 [[INNER_IV_2]], 1
+; CHECK-NEXT:    [[_IX_X_LEN_3:%.*]] = mul nuw nsw i64 [[INNER_IV_1_3]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2_3:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1_3]], i64 [[_IX_X_LEN_3]]
+; CHECK-NEXT:    [[Y_IX_DIM_2_3:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1_3]], i64 [[_IX_X_LEN_3]]
+; CHECK-NEXT:    [[_VAL_Y_3:%.*]] = load float, ptr [[Y_IX_DIM_2_3]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y_3]], ptr [[X_IX_DIM_2_3]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT_3]] = add nuw nsw i64 [[INNER_IV_3]], 1
+; CHECK-NEXT:    [[_IX_X_LEN_4:%.*]] = mul nuw nsw i64 [[INNER_IV_1_4]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2_4:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1_4]], i64 [[_IX_X_LEN_4]]
+; CHECK-NEXT:    [[Y_IX_DIM_2_4:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1_4]], i64 [[_IX_X_LEN_4]]
+; CHECK-NEXT:    [[_VAL_Y_4:%.*]] = load float, ptr [[Y_IX_DIM_2_4]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y_4]], ptr [[X_IX_DIM_2_4]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT_4]] = add nuw nsw i64 [[INNER_IV_4]], 1
+; CHECK-NEXT:    [[_IX_X_LEN_5:%.*]] = mul nuw nsw i64 [[INNER_IV_1_5]], 144
+; CHECK-NEXT:    [[X_IX_DIM_2_5:%.*]] = getelementptr i8, ptr [[X_IX_DIM_1_5]], i64 [[_IX_X_LEN_5]]
+; CHECK-NEXT:    [[Y_IX_DIM_2_5:%.*]] = getelementptr i8, ptr [[Y_IX_DIM_1_5]], i64 [[_IX_X_LEN_5]]
+; CHECK-NEXT:    [[_VAL_Y_5:%.*]] = load float, ptr [[Y_IX_DIM_2_5]], align 4
+; CHECK-NEXT:    store float [[_VAL_Y_5]], ptr [[X_IX_DIM_2_5]], align 8
+; CHECK-NEXT:    [[INNER_IV_NEXT_5]] = add nuw nsw i64 [[INNER_IV_5]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT_5:%.*]] = icmp eq i64 [[INNER_IV_NEXT_5]], 7
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT_5]], label [[OUTER_LATCH:%.*]], label [[INNER]]
+; CHECK:       outer.latch:
+; CHECK-NEXT:    br label [[EXIT:%.*]]
+; CHECK:       exit:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %x = alloca [216 x float], align 4
+  %y = alloca [216 x float], align 4
+  %x_ix_dim_0 = getelementptr i8, ptr %x, i64 -52
+  %y_ix_dim_0 = getelementptr i8, ptr %y, i64 -52
+  br label %outer.header
+
+outer.header:          ; preds = %outer.latch, %entry
+  %outer.iv = phi i64 [ 1, %entry ], [ %outer.iv.next, %outer.latch ]
+  %_ix_x_len.1 = shl nuw nsw i64 %outer.iv, 2
+  %x_ix_dim_1 = getelementptr i8, ptr %x_ix_dim_0, i64 20
+  %y_ix_dim_1 = getelementptr i8, ptr %y_ix_dim_0, i64 %_ix_x_len.1
+  br label %inner
+
+inner:                                      ; preds = %inner, %outer.header
+  %inner.iv.1 = phi i64 [ 1, %outer.header ], [ 0, %inner ]
+  %inner.iv = phi i64 [ 1, %outer.header ], [ %inner.iv.next, %inner ]
+  %_ix_x_len = mul nuw nsw i64 %inner.iv.1, 144
+  %x_ix_dim_2 = getelementptr i8, ptr %x_ix_dim_1, i64 %_ix_x_len
+  %y_ix_dim_2 = getelementptr i8, ptr %y_ix_dim_1, i64 %_ix_x_len
+  %_val_y = load float, ptr %y_ix_dim_2, align 4
+  %_add_tmp = fadd fast float %_val_y, 0.000000e+00
+  store float %_add_tmp, ptr %x_ix_dim_2, align 8
+  %inner.iv.next = add nuw nsw i64 %inner.iv, 1
+  %exitcond.not = icmp eq i64 %inner.iv.next, 7
+  br i1 %exitcond.not, label %outer.latch, label %inner
+
+outer.latch:                                    ; preds = %inner
+  %outer.iv.next = add nuw nsw i64 %outer.iv, 1
+  %exitcond.not.1 = icmp eq i64 %outer.iv.next, 7
+  br i1 %exitcond.not.1, label %exit, label %outer.header, !llvm.loop !1
+
+exit:                                    ; preds = %outer.latch
+  ret void
+}
+
+!1 = distinct !{!1, !2}
+!2 = !{!"llvm.loop.unroll_and_jam.enable"}