diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -69,6 +69,7 @@
     setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
 
   setOperationAction(ISD::DYNAMIC_STACKALLOC, GRLenVT, Expand);
+  setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand);
   setOperationAction(ISD::VASTART, MVT::Other, Custom);
   setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand);
 
diff --git a/llvm/test/CodeGen/LoongArch/alloca.ll b/llvm/test/CodeGen/LoongArch/alloca.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/alloca.ll
@@ -0,0 +1,191 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
+; RUN:   | FileCheck %s --check-prefix=LA64
+
+declare void @notdead(ptr)
+
+;; These tests must ensure the stack pointer is restored using the frame
+;; pointer
+
+define void @simple_alloca(i32 %n) nounwind {
+; LA32-LABEL: simple_alloca:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    addi.w $fp, $sp, 16
+; LA32-NEXT:    addi.w $a0, $a0, 15
+; LA32-NEXT:    addi.w $a1, $zero, -16
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    sub.w $a0, $sp, $a0
+; LA32-NEXT:    move $sp, $a0
+; LA32-NEXT:    bl %plt(notdead)
+; LA32-NEXT:    addi.w $sp, $fp, -16
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: simple_alloca:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    addi.d $fp, $sp, 16
+; LA64-NEXT:    addi.w $a1, $zero, -16
+; LA64-NEXT:    lu32i.d $a1, 1
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    addi.d $a0, $a0, 15
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    sub.d $a0, $sp, $a0
+; LA64-NEXT:    move $sp, $a0
+; LA64-NEXT:    bl %plt(notdead)
+; LA64-NEXT:    addi.d $sp, $fp, -16
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+  %1 = alloca i8, i32 %n
+  call void @notdead(ptr %1)
+  ret void
+}
+
+declare ptr @llvm.stacksave()
+declare void @llvm.stackrestore(ptr)
+
+define void @scoped_alloca(i32 %n) nounwind {
+; LA32-LABEL: scoped_alloca:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $s0, $sp, 4 # 4-byte Folded Spill
+; LA32-NEXT:    addi.w $fp, $sp, 16
+; LA32-NEXT:    addi.w $a0, $a0, 15
+; LA32-NEXT:    addi.w $a1, $zero, -16
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    move $s0, $sp
+; LA32-NEXT:    sub.w $a0, $sp, $a0
+; LA32-NEXT:    move $sp, $a0
+; LA32-NEXT:    bl %plt(notdead)
+; LA32-NEXT:    move $sp, $s0
+; LA32-NEXT:    addi.w $sp, $fp, -16
+; LA32-NEXT:    ld.w $s0, $sp, 4 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: scoped_alloca:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -32
+; LA64-NEXT:    st.d $ra, $sp, 24 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 16 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $s0, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    addi.d $fp, $sp, 32
+; LA64-NEXT:    addi.w $a1, $zero, -16
+; LA64-NEXT:    lu32i.d $a1, 1
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    addi.d $a0, $a0, 15
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    move $s0, $sp
+; LA64-NEXT:    sub.d $a0, $sp, $a0
+; LA64-NEXT:    move $sp, $a0
+; LA64-NEXT:    bl %plt(notdead)
+; LA64-NEXT:    move $sp, $s0
+; LA64-NEXT:    addi.d $sp, $fp, -32
+; LA64-NEXT:    ld.d $s0, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $fp, $sp, 16 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 24 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 32
+; LA64-NEXT:    ret
+  %sp = call ptr @llvm.stacksave()
+  %addr = alloca i8, i32 %n
+  call void @notdead(ptr %addr)
+  call void @llvm.stackrestore(ptr %sp)
+  ret void
+}
+
+declare void @func(ptr, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32)
+
+;; Check that outgoing arguments passed on the stack do not corrupt a
+;; variable-sized stack object.
+define void @alloca_callframe(i32 %n) nounwind {
+; LA32-LABEL: alloca_callframe:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    st.w $fp, $sp, 8 # 4-byte Folded Spill
+; LA32-NEXT:    addi.w $fp, $sp, 16
+; LA32-NEXT:    addi.w $a0, $a0, 15
+; LA32-NEXT:    addi.w $a1, $zero, -16
+; LA32-NEXT:    and $a0, $a0, $a1
+; LA32-NEXT:    sub.w $a0, $sp, $a0
+; LA32-NEXT:    move $sp, $a0
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    ori $a1, $zero, 12
+; LA32-NEXT:    st.w $a1, $sp, 12
+; LA32-NEXT:    ori $a1, $zero, 11
+; LA32-NEXT:    st.w $a1, $sp, 8
+; LA32-NEXT:    ori $a1, $zero, 10
+; LA32-NEXT:    st.w $a1, $sp, 4
+; LA32-NEXT:    ori $a1, $zero, 9
+; LA32-NEXT:    st.w $a1, $sp, 0
+; LA32-NEXT:    ori $a1, $zero, 2
+; LA32-NEXT:    ori $a2, $zero, 3
+; LA32-NEXT:    ori $a3, $zero, 4
+; LA32-NEXT:    ori $a4, $zero, 5
+; LA32-NEXT:    ori $a5, $zero, 6
+; LA32-NEXT:    ori $a6, $zero, 7
+; LA32-NEXT:    ori $a7, $zero, 8
+; LA32-NEXT:    bl %plt(func)
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    addi.w $sp, $fp, -16
+; LA32-NEXT:    ld.w $fp, $sp, 8 # 4-byte Folded Reload
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    ret
+;
+; LA64-LABEL: alloca_callframe:
+; LA64:       # %bb.0:
+; LA64-NEXT:    addi.d $sp, $sp, -16
+; LA64-NEXT:    st.d $ra, $sp, 8 # 8-byte Folded Spill
+; LA64-NEXT:    st.d $fp, $sp, 0 # 8-byte Folded Spill
+; LA64-NEXT:    addi.d $fp, $sp, 16
+; LA64-NEXT:    addi.w $a1, $zero, -16
+; LA64-NEXT:    lu32i.d $a1, 1
+; LA64-NEXT:    bstrpick.d $a0, $a0, 31, 0
+; LA64-NEXT:    addi.d $a0, $a0, 15
+; LA64-NEXT:    and $a0, $a0, $a1
+; LA64-NEXT:    sub.d $a0, $sp, $a0
+; LA64-NEXT:    move $sp, $a0
+; LA64-NEXT:    addi.d $sp, $sp, -32
+; LA64-NEXT:    ori $a1, $zero, 12
+; LA64-NEXT:    st.d $a1, $sp, 24
+; LA64-NEXT:    ori $a1, $zero, 11
+; LA64-NEXT:    st.d $a1, $sp, 16
+; LA64-NEXT:    ori $a1, $zero, 10
+; LA64-NEXT:    st.d $a1, $sp, 8
+; LA64-NEXT:    ori $a1, $zero, 9
+; LA64-NEXT:    st.d $a1, $sp, 0
+; LA64-NEXT:    ori $a1, $zero, 2
+; LA64-NEXT:    ori $a2, $zero, 3
+; LA64-NEXT:    ori $a3, $zero, 4
+; LA64-NEXT:    ori $a4, $zero, 5
+; LA64-NEXT:    ori $a5, $zero, 6
+; LA64-NEXT:    ori $a6, $zero, 7
+; LA64-NEXT:    ori $a7, $zero, 8
+; LA64-NEXT:    bl %plt(func)
+; LA64-NEXT:    addi.d $sp, $sp, 32
+; LA64-NEXT:    addi.d $sp, $fp, -16
+; LA64-NEXT:    ld.d $fp, $sp, 0 # 8-byte Folded Reload
+; LA64-NEXT:    ld.d $ra, $sp, 8 # 8-byte Folded Reload
+; LA64-NEXT:    addi.d $sp, $sp, 16
+; LA64-NEXT:    ret
+  %1 = alloca i8, i32 %n
+  call void @func(ptr %1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8,
+                  i32 9, i32 10, i32 11, i32 12)
+  ret void
+}