diff --git a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
--- a/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanHCFGBuilder.cpp
@@ -119,7 +119,7 @@
   // Get or create a region for the loop containing BB.
   Loop *CurrentLoop = LI->getLoopFor(BB);
   VPRegionBlock *ParentR = nullptr;
-  if (CurrentLoop) {
+  if (CurrentLoop && CurrentLoop->getLoopDepth() >= TheLoop->getLoopDepth()) {
     auto Iter = Loop2Region.insert({CurrentLoop, nullptr});
     if (Iter.second)
       Iter.first->second = new VPRegionBlock(
diff --git a/llvm/test/Transforms/LoopVectorize/outer_loop_test3.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_test3.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/outer_loop_test3.ll
@@ -0,0 +1,147 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2
+; RUN: opt -S -passes=loop-vectorize -enable-vplan-native-path < %s -S -pass-remarks=loop-vectorize 2>%t | FileCheck %s
+; RUN: cat %t | FileCheck %s --check-prefix=CHECK-REMARK
+
+; void test(int n, int **a)
+; {
+;   for (int k = 0; k < n; ++k) {
+;     a[k][0] = 0;
+;     #pragma clang loop vectorize_width(4)
+;     for (int i = 0; i < n; ++i) {
+;         for (int j = 0; j < n; ++j) {
+;             a[i][j] = 2 + k;
+;         }
+;     }
+;   }
+; }
+
+; CHECK-REMARK: remark: <unknown>:0:0: vectorized outerloop (vectorization width: 4, interleaved count: 1)
+
+define void @test(i64 %n, ptr %a) {
+; CHECK-LABEL: define void @test
+; CHECK-SAME: (i64 [[N:%.*]], ptr [[A:%.*]]) {
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP34:%.*]] = icmp sgt i64 [[N]], 0
+; CHECK-NEXT:    br i1 [[CMP34]], label [[FOR_BODY_US_PREHEADER:%.*]], label [[FOR_COND_CLEANUP:%.*]]
+; CHECK:       for.body.us.preheader:
+; CHECK-NEXT:    br label [[FOR_BODY_US:%.*]]
+; CHECK:       for.body.us:
+; CHECK-NEXT:    [[INDVARS_IV42:%.*]] = phi i64 [ 0, [[FOR_BODY_US_PREHEADER]] ], [ [[INDVARS_IV_NEXT43:%.*]], [[FOR_COND2_FOR_COND_CLEANUP4_CRIT_EDGE_SPLIT_US_US:%.*]] ]
+; CHECK-NEXT:    [[ARRAYIDX_US:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[INDVARS_IV42]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[ARRAYIDX_US]], align 8
+; CHECK-NEXT:    store i32 0, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV42]] to i32
+; CHECK-NEXT:    [[TMP2:%.*]] = add i32 [[TMP1]], 2
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 4
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[TMP2]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i64> poison, i64 [[N]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT2]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_COND6_FOR_COND_CLEANUP8_CRIT_EDGE_US_US4:%.*]] ]
+; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[FOR_COND6_FOR_COND_CLEANUP8_CRIT_EDGE_US_US4]] ]
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[A]], <4 x i64> [[VEC_IND]]
+; CHECK-NEXT:    [[WIDE_MASKED_GATHER:%.*]] = call <4 x ptr> @llvm.masked.gather.v4p0.v4p0(<4 x ptr> [[TMP3]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x ptr> poison)
+; CHECK-NEXT:    br label [[FOR_BODY9_US_US1:%.*]]
+; CHECK:       for.body9.us.us1:
+; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <4 x i64> [ [[TMP5:%.*]], [[FOR_BODY9_US_US1]] ], [ zeroinitializer, [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds i32, <4 x ptr> [[WIDE_MASKED_GATHER]], <4 x i64> [[VEC_PHI]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> [[BROADCAST_SPLAT]], <4 x ptr> [[TMP4]], i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>)
+; CHECK-NEXT:    [[TMP5]] = add nuw nsw <4 x i64> [[VEC_PHI]], <i64 1, i64 1, i64 1, i64 1>
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp eq <4 x i64> [[TMP5]], [[BROADCAST_SPLAT3]]
+; CHECK-NEXT:    [[TMP7:%.*]] = extractelement <4 x i1> [[TMP6]], i32 0
+; CHECK-NEXT:    br i1 [[TMP7]], label [[FOR_COND6_FOR_COND_CLEANUP8_CRIT_EDGE_US_US4]], label [[FOR_BODY9_US_US1]]
+; CHECK:       for.cond6.for.cond.cleanup8_crit_edge.us.us4:
+; CHECK-NEXT:    [[TMP8:%.*]] = add nuw nsw <4 x i64> [[VEC_IND]], <i64 1, i64 1, i64 1, i64 1>
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq <4 x i64> [[TMP8]], [[BROADCAST_SPLAT3]]
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       middle.block:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label [[FOR_COND2_FOR_COND_CLEANUP4_CRIT_EDGE_SPLIT_US_US]], label [[SCALAR_PH]]
+; CHECK:       scalar.ph:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_US]] ]
+; CHECK-NEXT:    br label [[FOR_COND6_PREHEADER_US_US:%.*]]
+; CHECK:       for.cond6.preheader.us.us:
+; CHECK-NEXT:    [[INDVARS_IV37:%.*]] = phi i64 [ [[INDVARS_IV_NEXT38:%.*]], [[FOR_COND6_FOR_COND_CLEANUP8_CRIT_EDGE_US_US:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
+; CHECK-NEXT:    [[ARRAYIDX11_US_US:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[INDVARS_IV37]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load ptr, ptr [[ARRAYIDX11_US_US]], align 8
+; CHECK-NEXT:    br label [[FOR_BODY9_US_US:%.*]]
+; CHECK:       for.body9.us.us:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY9_US_US]] ], [ 0, [[FOR_COND6_PREHEADER_US_US]] ]
+; CHECK-NEXT:    [[ARRAYIDX13_US_US:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    store i32 [[TMP2]], ptr [[ARRAYIDX13_US_US]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label [[FOR_COND6_FOR_COND_CLEANUP8_CRIT_EDGE_US_US]], label [[FOR_BODY9_US_US]]
+; CHECK:       for.cond6.for.cond.cleanup8_crit_edge.us.us:
+; CHECK-NEXT:    [[INDVARS_IV_NEXT38]] = add nuw nsw i64 [[INDVARS_IV37]], 1
+; CHECK-NEXT:    [[EXITCOND41_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT38]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND41_NOT]], label [[FOR_COND2_FOR_COND_CLEANUP4_CRIT_EDGE_SPLIT_US_US]], label [[FOR_COND6_PREHEADER_US_US]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       for.cond2.for.cond.cleanup4_crit_edge.split.us.us:
+; CHECK-NEXT:    [[INDVARS_IV_NEXT43]] = add nuw nsw i64 [[INDVARS_IV42]], 1
+; CHECK-NEXT:    [[EXITCOND47_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT43]], [[N]]
+; CHECK-NEXT:    br i1 [[EXITCOND47_NOT]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[FOR_BODY_US]]
+; CHECK:       for.cond.cleanup.loopexit:
+; CHECK-NEXT:    br label [[FOR_COND_CLEANUP]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %cmp34 = icmp sgt i64 %n, 0
+  br i1 %cmp34, label %for.body.us.preheader, label %for.cond.cleanup
+
+for.body.us.preheader:
+  br label %for.body.us
+
+for.body.us:
+  %indvars.iv42 = phi i64 [ 0, %for.body.us.preheader ], [ %indvars.iv.next43, %for.cond2.for.cond.cleanup4_crit_edge.split.us.us ]
+  %arrayidx.us = getelementptr inbounds ptr, ptr %a, i64 %indvars.iv42
+  %0 = load ptr, ptr %arrayidx.us, align 8
+  store i32 0, ptr %0, align 4
+  %1 = trunc i64 %indvars.iv42 to i32
+  %2 = add i32 %1, 2
+  br label %for.cond6.preheader.us.us
+
+for.cond6.preheader.us.us:
+  %indvars.iv37 = phi i64 [ %indvars.iv.next38, %for.cond6.for.cond.cleanup8_crit_edge.us.us ], [ 0, %for.body.us ]
+  %arrayidx11.us.us = getelementptr inbounds ptr, ptr %a, i64 %indvars.iv37
+  %3 = load ptr, ptr %arrayidx11.us.us, align 8
+  br label %for.body9.us.us
+
+for.body9.us.us:
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body9.us.us ], [ 0, %for.cond6.preheader.us.us ]
+  %arrayidx13.us.us = getelementptr inbounds i32, ptr %3, i64 %indvars.iv
+  store i32 %2, ptr %arrayidx13.us.us, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond.not = icmp eq i64 %indvars.iv.next, %n
+  br i1 %exitcond.not, label %for.cond6.for.cond.cleanup8_crit_edge.us.us, label %for.body9.us.us
+
+for.cond6.for.cond.cleanup8_crit_edge.us.us:
+  %indvars.iv.next38 = add nuw nsw i64 %indvars.iv37, 1
+  %exitcond41.not = icmp eq i64 %indvars.iv.next38, %n
+  br i1 %exitcond41.not, label %for.cond2.for.cond.cleanup4_crit_edge.split.us.us, label %for.cond6.preheader.us.us, !llvm.loop !3
+
+for.cond2.for.cond.cleanup4_crit_edge.split.us.us:
+  %indvars.iv.next43 = add nuw nsw i64 %indvars.iv42, 1
+  %exitcond47.not = icmp eq i64 %indvars.iv.next43, %n
+  br i1 %exitcond47.not, label %for.cond.cleanup.loopexit, label %for.body.us
+
+for.cond.cleanup.loopexit:
+  br label %for.cond.cleanup
+
+for.cond.cleanup:
+  ret void
+}
+
+!3 = distinct !{!3, !4, !5, !6}
+!4 = !{!"llvm.loop.vectorize.width", i32 4}
+!5 = !{!"llvm.loop.vectorize.scalable.enable", i1 false}
+!6 = !{!"llvm.loop.vectorize.enable", i1 true}