Index: llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
===================================================================
--- llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -773,7 +773,8 @@
   if (FirstSPAdjustAmount)
     StackSize = FirstSPAdjustAmount;
 
-  if (RVFI->isPushable(MF) && MBBI->getOpcode() == RISCV::CM_POP) {
+  if (RVFI->isPushable(MF) && MBBI != MBB.end() &&
+      MBBI->getOpcode() == RISCV::CM_POP) {
     // Use available stack adjustment in pop instruction to deallocate stack
     // space.
     unsigned PushStack = RVFI->getRVPushRegs() * (STI.getXLen() / 8);
Index: llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.ll
@@ -0,0 +1,111 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mattr=+zcmp -verify-machineinstrs  \
+; RUN: -mtriple=riscv32 -target-abi ilp32 < %s \
+; RUN: | FileCheck %s -check-prefixes=RV32IZCMP
+; RUN: llc -mattr=+zcmp -verify-machineinstrs  \
+; RUN: -mtriple=riscv64 -target-abi lp64 < %s  \
+; RUN: | FileCheck %s -check-prefixes=RV64IZCMP
+
+; This source code exposed a crash in RISC-V RISCVFrameLowering when executing
+; the Prologue/Epilogue Insertion & Frame Finalization pass. The root cause was:
+; Not doing a bounds check before using a returned iterator.
+; NOTE: -mattr=+zcmp is essential to the reproducibility of the issue.
+
+declare void @f1() nounwind
+
+@x = external dso_local global i1, align 4
+@y = external dso_local global i1, align 4
+@a = external dso_local global i32, align 4
+@b = external dso_local global i32, align 4
+@c = external dso_local global i32, align 4
+@d = external dso_local global i32, align 4
+@z = external dso_local global i32, align 4
+
+define void @f0() nounwind {
+; RV32IZCMP-LABEL: f0:
+; RV32IZCMP:       # %bb.0: # %entry
+; RV32IZCMP-NEXT:    lui a0, %hi(x)
+; RV32IZCMP-NEXT:    lbu a1, %lo(x)(a0)
+; RV32IZCMP-NEXT:    beqz a1, .LBB0_2
+; RV32IZCMP-NEXT:  .LBB0_1: # %cleanup
+; RV32IZCMP-NEXT:    ret
+; RV32IZCMP-NEXT:  .LBB0_2: # %if.end
+; RV32IZCMP-NEXT:    lbu a0, %lo(x)(a0)
+; RV32IZCMP-NEXT:    beqz a0, .LBB0_6
+; RV32IZCMP-NEXT:  # %bb.3: # %if.end3
+; RV32IZCMP-NEXT:    lui a0, %hi(a)
+; RV32IZCMP-NEXT:    lui a1, %hi(b)
+; RV32IZCMP-NEXT:    lui a2, 912092
+; RV32IZCMP-NEXT:  .LBB0_4: # %for.cond
+; RV32IZCMP-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV32IZCMP-NEXT:    lw a3, %lo(a)(a0)
+; RV32IZCMP-NEXT:    lw a4, %lo(b)(a1)
+; RV32IZCMP-NEXT:    bge a3, a4, .LBB0_1
+; RV32IZCMP-NEXT:  # %bb.5: # %for.body
+; RV32IZCMP-NEXT:    # in Loop: Header=BB0_4 Depth=1
+; RV32IZCMP-NEXT:    sw zero, -273(a2)
+; RV32IZCMP-NEXT:    j .LBB0_4
+; RV32IZCMP-NEXT:  .LBB0_6: # %if.then2
+; RV32IZCMP-NEXT:    tail f1@plt
+;
+; RV64IZCMP-LABEL: f0:
+; RV64IZCMP:       # %bb.0: # %entry
+; RV64IZCMP-NEXT:    lui a0, %hi(x)
+; RV64IZCMP-NEXT:    lbu a1, %lo(x)(a0)
+; RV64IZCMP-NEXT:    beqz a1, .LBB0_2
+; RV64IZCMP-NEXT:  .LBB0_1: # %cleanup
+; RV64IZCMP-NEXT:    ret
+; RV64IZCMP-NEXT:  .LBB0_2: # %if.end
+; RV64IZCMP-NEXT:    lbu a0, %lo(x)(a0)
+; RV64IZCMP-NEXT:    beqz a0, .LBB0_6
+; RV64IZCMP-NEXT:  # %bb.3: # %if.end3
+; RV64IZCMP-NEXT:    lui a0, %hi(a)
+; RV64IZCMP-NEXT:    lui a1, %hi(b)
+; RV64IZCMP-NEXT:    lui a2, 228023
+; RV64IZCMP-NEXT:    slli a2, a2, 2
+; RV64IZCMP-NEXT:  .LBB0_4: # %for.cond
+; RV64IZCMP-NEXT:    # =>This Inner Loop Header: Depth=1
+; RV64IZCMP-NEXT:    lw a3, %lo(a)(a0)
+; RV64IZCMP-NEXT:    lw a4, %lo(b)(a1)
+; RV64IZCMP-NEXT:    bge a3, a4, .LBB0_1
+; RV64IZCMP-NEXT:  # %bb.5: # %for.body
+; RV64IZCMP-NEXT:    # in Loop: Header=BB0_4 Depth=1
+; RV64IZCMP-NEXT:    sw zero, -273(a2)
+; RV64IZCMP-NEXT:    j .LBB0_4
+; RV64IZCMP-NEXT:  .LBB0_6: # %if.then2
+; RV64IZCMP-NEXT:    tail f1@plt
+entry:
+  %0 = load i1, ptr @x, align 4
+  br i1 %0, label %cleanup, label %if.end
+
+if.end:
+  %1 = load i1, ptr @x, align 4
+  br i1 %1, label %if.end3, label %if.then2
+
+if.then2:
+  tail call void @f1()
+  br label %cleanup
+
+if.end3:
+  br label %for.cond
+
+for.cond:
+  %2 = load i32, ptr @a, align 4
+  %3 = load i32, ptr @b, align 4
+  %cmp6 = icmp slt i32 %2, %3
+  br i1 %cmp6, label %for.body, label %for.cond8
+
+for.body:
+  store i32 0, ptr inttoptr (i32 -559038737 to ptr), align 4
+  br label %for.cond
+
+for.cond8:
+  %4 = load i32, ptr @c, align 4
+  %5 = load i32, ptr @d, align 4
+  %cmp10 = icmp slt i32 %4, %5
+  br label %cleanup
+
+cleanup:
+  ret void
+}
+
Index: llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/RISCV/zcmp-prolog-epilog-crash.mir
@@ -0,0 +1,158 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 2
+# REQUIRES: asserts
+# RUN: llc  %s -o - -mtriple=riscv32 -mattr=+zcmp -target-abi ilp32 -run-pass=prologepilog \
+# RUN:   -simplify-mir -verify-machineinstrs | FileCheck %s
+
+--- |
+  define hidden void @f(fp128 %a) local_unnamed_addr #0 {
+  entry:
+    %0 = bitcast fp128 %a to i128
+    %and.i = lshr i128 %0, 112
+    %1 = trunc i128 %and.i to i32
+    %2 = and i32 %1, 32767
+    %or.i = or i128 poison, 5192296858534827628530496329220096
+    br label %if.end.i
+
+  if.end.i:                                         ; preds = %entry
+    br i1 poison, label %exit, label %if.then12.i
+
+  if.then12.i:                                      ; preds = %if.end.i
+    %sub13.i = sub nuw nsw i32 16495, %2
+    %sh_prom.i = zext i32 %sub13.i to i128
+    %shr14.i = lshr i128 %or.i, %sh_prom.i
+    %conv15.i = trunc i128 %shr14.i to i32
+    br label %exit
+
+  exit:                                             ; preds = %if.then12.i, %if.end.i
+    %retval.0.i = phi i32 [ %conv15.i, %if.then12.i ], [ -1, %if.end.i ]
+    ret void
+  }
+...
+---
+name:            f
+alignment:       2
+tracksRegLiveness: true
+tracksDebugUserValues: true
+liveins:
+  - { reg: '$x10' }
+frameInfo:
+  maxAlignment:    1
+  localFrameSize:  32
+  savePoint:       '%bb.2'
+  restorePoint:    '%bb.2'
+stack:
+  - { id: 0, size: 32, alignment: 1, local-offset: -32 }
+machineFunctionInfo:
+  varArgsFrameIndex: 0
+  varArgsSaveSize: 0
+body:             |
+  ; CHECK-LABEL: name: f
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   renamable $x10 = ADDI $x0, -1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.1.if.end.i:
+  ; CHECK-NEXT:   liveins: $x10
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   BNE $x0, $x0, %bb.3
+  ; CHECK-NEXT:   PseudoBR %bb.2
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.2.if.then12.i:
+  ; CHECK-NEXT:   liveins: $x10
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   $x2 = frame-setup ADDI $x2, -32
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 32
+  ; CHECK-NEXT:   SB $x0, $x2, 31 :: (store (s8) into %stack.0 + 31)
+  ; CHECK-NEXT:   SB $x0, $x2, 30 :: (store (s8) into %stack.0 + 30)
+  ; CHECK-NEXT:   SB $x0, $x2, 29 :: (store (s8) into %stack.0 + 29)
+  ; CHECK-NEXT:   SB $x0, $x2, 28 :: (store (s8) into %stack.0 + 28)
+  ; CHECK-NEXT:   SB $x0, $x2, 27 :: (store (s8) into %stack.0 + 27)
+  ; CHECK-NEXT:   SB $x0, $x2, 26 :: (store (s8) into %stack.0 + 26)
+  ; CHECK-NEXT:   SB $x0, $x2, 25 :: (store (s8) into %stack.0 + 25)
+  ; CHECK-NEXT:   SB $x0, $x2, 24 :: (store (s8) into %stack.0 + 24)
+  ; CHECK-NEXT:   SB $x0, $x2, 23 :: (store (s8) into %stack.0 + 23)
+  ; CHECK-NEXT:   SB $x0, $x2, 22 :: (store (s8) into %stack.0 + 22)
+  ; CHECK-NEXT:   SB $x0, $x2, 21 :: (store (s8) into %stack.0 + 21)
+  ; CHECK-NEXT:   SB $x0, $x2, 20 :: (store (s8) into %stack.0 + 20)
+  ; CHECK-NEXT:   SB $x0, $x2, 19 :: (store (s8) into %stack.0 + 19)
+  ; CHECK-NEXT:   SB $x0, $x2, 18 :: (store (s8) into %stack.0 + 18)
+  ; CHECK-NEXT:   SB $x0, $x2, 17 :: (store (s8) into %stack.0 + 17)
+  ; CHECK-NEXT:   SB $x0, $x2, 16 :: (store (s8) into %stack.0 + 16)
+  ; CHECK-NEXT:   SB renamable $x10, $x2, 0 :: (store (s8) into %stack.0)
+  ; CHECK-NEXT:   SB renamable $x10, $x2, 4 :: (store (s8) into %stack.0 + 4)
+  ; CHECK-NEXT:   renamable $x11 = SRLI renamable $x10, 24
+  ; CHECK-NEXT:   SB renamable $x11, $x2, 3 :: (store (s8) into %stack.0 + 3)
+  ; CHECK-NEXT:   renamable $x12 = SRLI renamable $x10, 16
+  ; CHECK-NEXT:   SB renamable $x12, $x2, 2 :: (store (s8) into %stack.0 + 2)
+  ; CHECK-NEXT:   renamable $x13 = SRLI renamable $x10, 8
+  ; CHECK-NEXT:   SB renamable $x13, $x2, 1 :: (store (s8) into %stack.0 + 1)
+  ; CHECK-NEXT:   SB renamable $x10, $x2, 8 :: (store (s8) into %stack.0 + 8)
+  ; CHECK-NEXT:   SB renamable $x11, $x2, 7 :: (store (s8) into %stack.0 + 7)
+  ; CHECK-NEXT:   SB renamable $x12, $x2, 6 :: (store (s8) into %stack.0 + 6)
+  ; CHECK-NEXT:   SB renamable $x13, $x2, 5 :: (store (s8) into %stack.0 + 5)
+  ; CHECK-NEXT:   SB killed renamable $x10, $x2, 12 :: (store (s8) into %stack.0 + 12)
+  ; CHECK-NEXT:   SB renamable $x11, $x2, 11 :: (store (s8) into %stack.0 + 11)
+  ; CHECK-NEXT:   SB renamable $x12, $x2, 10 :: (store (s8) into %stack.0 + 10)
+  ; CHECK-NEXT:   SB renamable $x13, $x2, 9 :: (store (s8) into %stack.0 + 9)
+  ; CHECK-NEXT:   SB killed renamable $x11, $x2, 15 :: (store (s8) into %stack.0 + 15)
+  ; CHECK-NEXT:   SB killed renamable $x12, $x2, 14 :: (store (s8) into %stack.0 + 14)
+  ; CHECK-NEXT:   SB killed renamable $x13, $x2, 13 :: (store (s8) into %stack.0 + 13)
+  ; CHECK-NEXT:   $x2 = frame-destroy ADDI $x2, 32
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT: bb.3.exit:
+  ; CHECK-NEXT:   PseudoRET
+  bb.0.entry:
+    liveins: $x10
+
+    renamable $x10 = ADDI $x0, -1
+
+  bb.1.if.end.i:
+    liveins: $x10
+
+    BNE $x0, $x0, %bb.3
+    PseudoBR %bb.2
+
+  bb.2.if.then12.i:
+    liveins: $x10
+
+    SB $x0, %stack.0, 31 :: (store (s8) into %stack.0 + 31)
+    SB $x0, %stack.0, 30 :: (store (s8) into %stack.0 + 30)
+    SB $x0, %stack.0, 29 :: (store (s8) into %stack.0 + 29)
+    SB $x0, %stack.0, 28 :: (store (s8) into %stack.0 + 28)
+    SB $x0, %stack.0, 27 :: (store (s8) into %stack.0 + 27)
+    SB $x0, %stack.0, 26 :: (store (s8) into %stack.0 + 26)
+    SB $x0, %stack.0, 25 :: (store (s8) into %stack.0 + 25)
+    SB $x0, %stack.0, 24 :: (store (s8) into %stack.0 + 24)
+    SB $x0, %stack.0, 23 :: (store (s8) into %stack.0 + 23)
+    SB $x0, %stack.0, 22 :: (store (s8) into %stack.0 + 22)
+    SB $x0, %stack.0, 21 :: (store (s8) into %stack.0 + 21)
+    SB $x0, %stack.0, 20 :: (store (s8) into %stack.0 + 20)
+    SB $x0, %stack.0, 19 :: (store (s8) into %stack.0 + 19)
+    SB $x0, %stack.0, 18 :: (store (s8) into %stack.0 + 18)
+    SB $x0, %stack.0, 17 :: (store (s8) into %stack.0 + 17)
+    SB $x0, %stack.0, 16 :: (store (s8) into %stack.0 + 16)
+    SB renamable $x10, %stack.0, 0 :: (store (s8) into %stack.0)
+    SB renamable $x10, %stack.0, 4 :: (store (s8) into %stack.0 + 4)
+    renamable $x11 = SRLI renamable $x10, 24
+    SB renamable $x11, %stack.0, 3 :: (store (s8) into %stack.0 + 3)
+    renamable $x12 = SRLI renamable $x10, 16
+    SB renamable $x12, %stack.0, 2 :: (store (s8) into %stack.0 + 2)
+    renamable $x13 = SRLI renamable $x10, 8
+    SB renamable $x13, %stack.0, 1 :: (store (s8) into %stack.0 + 1)
+    SB renamable $x10, %stack.0, 8 :: (store (s8) into %stack.0 + 8)
+    SB renamable $x11, %stack.0, 7 :: (store (s8) into %stack.0 + 7)
+    SB renamable $x12, %stack.0, 6 :: (store (s8) into %stack.0 + 6)
+    SB renamable $x13, %stack.0, 5 :: (store (s8) into %stack.0 + 5)
+    SB killed renamable $x10, %stack.0, 12 :: (store (s8) into %stack.0 + 12)
+    SB renamable $x11, %stack.0, 11 :: (store (s8) into %stack.0 + 11)
+    SB renamable $x12, %stack.0, 10 :: (store (s8) into %stack.0 + 10)
+    SB renamable $x13, %stack.0, 9 :: (store (s8) into %stack.0 + 9)
+    SB killed renamable $x11, %stack.0, 15 :: (store (s8) into %stack.0 + 15)
+    SB killed renamable $x12, %stack.0, 14 :: (store (s8) into %stack.0 + 14)
+    SB killed renamable $x13, %stack.0, 13 :: (store (s8) into %stack.0 + 13)
+
+  bb.3.exit:
+    PseudoRET
+
+...