Index: lib/Target/Mips/MipsFrameLowering.h
===================================================================
--- lib/Target/Mips/MipsFrameLowering.h
+++ lib/Target/Mips/MipsFrameLowering.h
@@ -30,13 +30,16 @@
 
   static const MipsFrameLowering *create(const MipsSubtarget &ST);
 
-  bool hasFP(const MachineFunction &MF) const override;
-
   void
   eliminateCallFramePseudoInstr(MachineFunction &MF,
                                 MachineBasicBlock &MBB,
                                 MachineBasicBlock::iterator I) const override;
 
+
+  bool hasFP(const MachineFunction &MF) const override;
+
+  bool hasBP(const MachineFunction &MF) const;
+
 protected:
   uint64_t estimateStackSize(const MachineFunction &MF) const;
 };
Index: lib/Target/Mips/MipsFrameLowering.cpp
===================================================================
--- lib/Target/Mips/MipsFrameLowering.cpp
+++ lib/Target/Mips/MipsFrameLowering.cpp
@@ -90,12 +90,23 @@
 }
 
 // hasFP - Return true if the specified function should have a dedicated frame
-// pointer register.  This is true if the function has variable sized allocas or
-// if frame pointer elimination is disabled.
+// pointer register.  This is true if the function has variable sized allocas,
+// if it needs dynamic stack realignment, if frame pointer elimination is
+// disabled, or if the frame address is taken.
 bool MipsFrameLowering::hasFP(const MachineFunction &MF) const {
   const MachineFrameInfo *MFI = MF.getFrameInfo();
+  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
   return MF.getTarget().Options.DisableFramePointerElim(MF) ||
-      MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken();
+      MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken() ||
+      TRI->needsStackRealignment(MF);
+}
+
+bool MipsFrameLowering::hasBP(const MachineFunction &MF) const {
+  const MachineFrameInfo *MFI = MF.getFrameInfo();
+  const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+
+  return MFI->hasVarSizedObjects() && TRI->needsStackRealignment(MF);
 }
 
 uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const {
Index: lib/Target/Mips/MipsRegisterInfo.h
===================================================================
--- lib/Target/Mips/MipsRegisterInfo.h
+++ lib/Target/Mips/MipsRegisterInfo.h
@@ -60,6 +60,11 @@
   void processFunctionBeforeFrameFinalized(MachineFunction &MF,
                                        RegScavenger *RS = nullptr) const;
 
+  // Stack realignment queries.
+  bool canRealignStack(const MachineFunction &MF) const;
+
+  bool needsStackRealignment(const MachineFunction &MF) const override;
+
   /// Debug information queries.
   unsigned getFrameRegister(const MachineFunction &MF) const override;
 
Index: lib/Target/Mips/MipsRegisterInfo.cpp
===================================================================
--- lib/Target/Mips/MipsRegisterInfo.cpp
+++ lib/Target/Mips/MipsRegisterInfo.cpp
@@ -21,6 +21,7 @@
 #include "llvm/ADT/BitVector.h"
 #include "llvm/ADT/STLExtras.h"
 #include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineInstrBuilder.h"
 #include "llvm/IR/Constants.h"
@@ -178,6 +179,14 @@
     else {
       Reserved.set(Mips::FP);
       Reserved.set(Mips::FP_64);
+
+      // Reserve the base register if we need to both realign the stack and
+      // allocate variable-sized objects at runtime.
+      if (needsStackRealignment(MF) &&
+          MF.getFrameInfo()->hasVarSizedObjects()) {
+        Reserved.set(Mips::S7);
+        Reserved.set(Mips::S7_64);
+      }
     }
   }
 
@@ -271,6 +280,55 @@
   else
     return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
                             (IsN64 ? Mips::SP_64 : Mips::SP);
+}
 
+bool MipsRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+  const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
+  unsigned FP = Subtarget.isGP32bit() ? Mips::FP : Mips::FP_64;
+  unsigned BP = Subtarget.isGP32bit() ? Mips::S7 : Mips::S7_64;
+
+  // Support dynamic stack realignment only for targets with standard encoding.
+  if (!Subtarget.hasStandardEncoding())
+    return false;
+
+  // We can't perform dynamic stack realignment if we can't reserve the
+  // frame pointer register.
+  if (!MF.getRegInfo().canReserveReg(FP))
+    return false;
+
+  // We can realign the stack if we know the maximum call frame size and we
+  // don't have variable sized objects.
+  if (Subtarget.getFrameLowering()->hasReservedCallFrame(MF))
+    return true;
+
+  // We have to reserve the base pointer register in the presence of variable
+  // sized objects.
+  return MF.getRegInfo().canReserveReg(BP);
 }
 
+bool MipsRegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
+  const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
+  const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+  bool CanRealign = canRealignStack(MF);
+
+  // Avoid realigning functions that explicitly do not want to be realigned.
+  // Normally, we should report an error when a function should be dynamically
+  // realigned but also has the attribute no-realign-stack. Unfotunately,
+  // with this attirbute, MachineFrameInfo clamps each new object's alignment
+  // to that of the stack's alignment as specified by the ABI. As a result,
+  // the information of whether we have objects with larger alignment
+  // requirement than the stack's alignment is already lost at this point.
+  if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
+    return false;
+
+  const Function *F = MF.getFunction();
+  if (F->hasFnAttribute(Attribute::StackAlignment))
+    return CanRealign;
+
+  unsigned StackAlignment = Subtarget.getFrameLowering()->getStackAlignment();
+  if (MFI->getMaxAlignment() > StackAlignment)
+    return CanRealign;
+
+  return false;
+}
Index: lib/Target/Mips/MipsSEFrameLowering.cpp
===================================================================
--- lib/Target/Mips/MipsSEFrameLowering.cpp
+++ lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -381,6 +381,11 @@
   unsigned FP = ABI.GetFramePtr();
   unsigned ZERO = ABI.GetNullPtr();
   unsigned ADDu = ABI.GetPtrAdduOp();
+  unsigned ADDiu = ABI.GetPtrAddiuOp();
+  unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
+
+  const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
+        &Mips::GPR64RegClass : &Mips::GPR32RegClass;
 
   // First, compute final stack size.
   uint64_t StackSize = MFI->getStackSize();
@@ -463,15 +468,12 @@
   }
 
   if (MipsFI->callsEhReturn()) {
-    const TargetRegisterClass *PtrRC =
-        ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
-
     // Insert instructions that spill eh data registers.
     for (int I = 0; I < 4; ++I) {
       if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
         MBB.addLiveIn(ABI.GetEhDataReg(I));
       TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
-                              MipsFI->getEhDataRegFI(I), PtrRC, &RegInfo);
+                              MipsFI->getEhDataRegFI(I), RC, &RegInfo);
     }
 
     // Emit .cfi_offset directives for eh data registers.
@@ -496,6 +498,26 @@
         nullptr, MRI->getDwarfRegNum(FP, true)));
     BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
         .addCFIIndex(CFIIndex);
+
+    if (RegInfo.needsStackRealignment(MF)) {
+      // addiu $Reg, $zero, -MaxAlignment
+      // andi $sp, $sp, $Reg
+      unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
+      assert(isInt<16>(MFI->getMaxAlignment()) &&
+             "Function's alignment size requirement is not supported.");
+      int MaxAlign = - (signed) MFI->getMaxAlignment();
+
+      BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign);
+      BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
+
+      if (hasBP(MF)) {
+        // move $s7, $sp
+        unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
+        BuildMI(MBB, MBBI, dl, TII.get(ADDu), BP)
+          .addReg(SP)
+          .addReg(ZERO);
+      }
+    }
   }
 }
 
@@ -605,10 +627,14 @@
   MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
   MipsABIInfo ABI = STI.getABI();
   unsigned FP = ABI.GetFramePtr();
+  unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
 
   // Mark $fp as used if function has dedicated frame pointer.
   if (hasFP(MF))
     MRI.setPhysRegUsed(FP);
+  // Mark $s7 as used if function has dedicated base pointer.
+  if (hasBP(MF))
+    MRI.setPhysRegUsed(BP);
 
   // Create spill slots for eh data registers if function calls eh_return.
   if (MipsFI->callsEhReturn())
Index: lib/Target/Mips/MipsSERegisterInfo.cpp
===================================================================
--- lib/Target/Mips/MipsSERegisterInfo.cpp
+++ lib/Target/Mips/MipsSERegisterInfo.cpp
@@ -110,8 +110,11 @@
   MachineFunction &MF = *MI.getParent()->getParent();
   MachineFrameInfo *MFI = MF.getFrameInfo();
   MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
+
   MipsABIInfo ABI =
       static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI();
+  const MipsRegisterInfo *RegInfo =
+    static_cast<const MipsRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
 
   const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
   int MinCSFI = 0;
@@ -135,7 +138,14 @@
 
   if ((FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI) || EhDataRegFI)
     FrameReg = ABI.GetStackPtr();
-  else
+  else if (RegInfo->needsStackRealignment(MF)) {
+    if (MFI->hasVarSizedObjects() && !MFI->isFixedObjectIndex(FrameIndex))
+      FrameReg = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
+    else if (MFI->isFixedObjectIndex(FrameIndex))
+      FrameReg = getFrameRegister(MF);
+    else
+      FrameReg = ABI.IsN64() ? Mips::SP_64 : Mips::SP;
+  } else
     FrameReg = getFrameRegister(MF);
 
   // Calculate final offset.
Index: test/CodeGen/Mips/dynamic-stack-realignment.ll
===================================================================
--- /dev/null
+++ test/CodeGen/Mips/dynamic-stack-realignment.ll
@@ -0,0 +1,287 @@
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips3 -target-abi n32 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64 -target-abi n32 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -target-abi n32 | FileCheck %s \
+; RUN:    --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+
+; Check dynamic stack realignment in functions without variable-sized objects.
+
+declare void @helper_01(i32, i32, i32, i32, i32*)
+
+; O32 ABI
+define void @func_01() {
+entry:
+; GP32-LABEL: func_01:
+
+  ; prologue
+  ; GP32:       addiu   $sp, $sp, -1024
+  ; GP32:       sw      $ra, 1020($sp)
+  ; GP32:       sw      $fp, 1016($sp)
+  ;
+  ; GP32:       move    $fp, $sp
+  ; GP32:       addiu   $[[T0:[0-9]+|ra|gp]], $zero, -512
+  ; GP32-NEXT:  and     $sp, $sp, $[[T0]]
+
+  ; body
+  ; GP32:       addiu   $[[T1:[0-9]+]], $sp, 512
+  ; GP32:       sw      $[[T1]], 16($sp)
+
+  ; epilogue
+  ; GP32:       move    $sp, $fp
+  ; GP32:       lw      $fp, 1016($sp)
+  ; GP32:       lw      $ra, 1020($sp)
+  ; GP32:       addiu   $sp, $sp, 1024
+
+  %a = alloca i32, align 512
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+  ret void
+}
+
+declare void @helper_02(i32, i32, i32, i32,
+                        i32, i32, i32, i32, i32*)
+
+; N32/N64 ABIs
+define void @func_02() {
+entry:
+; GP64-LABEL: func_02:
+
+  ; prologue
+  ; N32:        addiu   $sp, $sp, -1024
+  ; N64:        daddiu  $sp, $sp, -1024
+  ; GP64:       sd      $ra, 1016($sp)
+  ; GP64:       sd      $fp, 1008($sp)
+  ; N32:        sd      $gp, 1000($sp)
+  ;
+  ; GP64:       move    $fp, $sp
+  ; N32:        addiu   $[[T0:[0-9]+|ra]], $zero, -512
+  ; N64:        daddiu  $[[T0:[0-9]+|ra]], $zero, -512
+  ; GP64-NEXT:  and     $sp, $sp, $[[T0]]
+
+  ; body
+  ; N32:        addiu   $[[T1:[0-9]+]], $sp, 512
+  ; N64:        daddiu  $[[T1:[0-9]+]], $sp, 512
+  ; GP64:       sd      $[[T1]], 0($sp)
+
+  ; epilogue
+  ; GP64:       move    $sp, $fp
+  ; N32:        ld      $gp, 1000($sp)
+  ; GP64:       ld      $fp, 1008($sp)
+  ; GP64:       ld      $ra, 1016($sp)
+  ; N32:        addiu   $sp, $sp, 1024
+  ; N64:        daddiu  $sp, $sp, 1024
+
+  %a = alloca i32, align 512
+  call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+                       i32 0, i32 0, i32 0, i32 0, i32* %a)
+  ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments.
+
+declare void @helper_03(i32, i32, i32, i32, i32*, i32*)
+
+; O32 ABI
+define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32* %b) {
+entry:
+; GP32-LABEL: func_03:
+
+  ; body
+  ; GP32-DAG:   addiu   $[[T0:[0-9]+]], $sp, 512
+  ; GP32-DAG:   sw      $[[T0]], 16($sp)
+  ; GP32-DAG:   lw      $[[T1:[0-9]+]], 1040($fp)
+  ; GP32-DAG:   sw      $[[T1]], 20($sp)
+
+  %a = alloca i32, align 512
+  call void @helper_03(i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+  ret void
+}
+
+declare void @helper_04(i32, i32, i32, i32,
+                        i32, i32, i32, i32, i32*, i32*)
+
+; N32/N64 ABIs
+define void @func_04(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+                     i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+                     i32* %b) {
+entry:
+; GP64-LABEL: func_04:
+
+  ; body
+  ; N32-DAG:    addiu   $[[T0:[0-9]+]], $sp, 512
+  ; N64-DAG:    daddiu  $[[T0:[0-9]+]], $sp, 512
+  ; GP64-DAG:   sd      $[[T0]], 0($sp)
+  ; GP64-DAG:   ld      $[[T1:[0-9]+]], 1024($fp)
+  ; GP64-DAG:   sd      $[[T1]], 8($sp)
+
+  %a = alloca i32, align 512
+  call void @helper_04(i32 0, i32 0, i32 0, i32 0,
+                       i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+  ret void
+}
+
+; Check dynamic stack realignment in functions with variable-sized objects.
+
+; O32 ABI
+define void @func_05(i32 %sz) {
+entry:
+; GP32-LABEL: func_05:
+
+  ; prologue
+  ; GP32:       addiu   $sp, $sp, -1024
+  ; GP32:       sw      $fp, 1020($sp)
+  ; GP32:       sw      $23, 1016($sp)
+  ;
+  ; GP32:       move    $fp, $sp
+  ; GP32:       addiu   $[[T0:[0-9]+|gp]], $zero, -512
+  ; GP32-NEXT:  and     $sp, $sp, $[[T0]]
+  ; GP32-NEXT:  move    $23, $sp
+
+  ; body
+  ; GP32:       addiu   $[[T1:[0-9]+]], $zero, 222
+  ; GP32:       sw      $[[T1]], 508($23)
+
+  ; epilogue
+  ; GP32:       move    $sp, $fp
+  ; GP32:       lw      $23, 1016($sp)
+  ; GP32:       lw      $fp, 1020($sp)
+  ; GP32:       addiu   $sp, $sp, 1024
+
+  %a0 = alloca i32, i32 %sz, align 512
+  %a1 = alloca i32, align 4
+
+  store volatile i32 111, i32* %a0, align 512
+  store volatile i32 222, i32* %a1, align 4
+
+  ret void
+}
+
+; N32/N64 ABIs
+define void @func_06(i32 %sz) {
+entry:
+; GP64-LABEL: func_06:
+
+  ; prologue
+  ; N32:        addiu   $sp, $sp, -1024
+  ; N64:        daddiu  $sp, $sp, -1024
+  ; GP64:       sd      $fp, 1016($sp)
+  ; GP64:       sd      $23, 1008($sp)
+  ;
+  ; GP64:       move    $fp, $sp
+  ; GP64:       addiu   $[[T0:[0-9]+|gp]], $zero, -512
+  ; GP64-NEXT:  and     $sp, $sp, $[[T0]]
+  ; GP64-NEXT:  move    $23, $sp
+
+  ; body
+  ; GP64:       addiu   $[[T1:[0-9]+]], $zero, 222
+  ; GP64:       sw      $[[T1]], 508($23)
+
+  ; epilogue
+  ; GP64:       move    $sp, $fp
+  ; GP64:       ld      $23, 1008($sp)
+  ; GP64:       ld      $fp, 1016($sp)
+  ; N32:        addiu   $sp, $sp, 1024
+  ; N64:        daddiu  $sp, $sp, 1024
+
+  %a0 = alloca i32, i32 %sz, align 512
+  %a1 = alloca i32, align 4
+
+  store volatile i32 111, i32* %a0, align 512
+  store volatile i32 222, i32* %a1, align 4
+
+  ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments and $sp for
+; building outbound arguments for nested function calls.
+
+; O32 ABI
+define void @func_07(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %sz) {
+entry:
+; GP32-LABEL: func_07:
+
+  ; body
+  ; GP32-DAG:       lw      $[[T0:[0-9]+]], 1040($fp)
+  ;
+  ; GP32-DAG:       addiu   $[[T1:[0-9]+]], $zero, 222
+  ; GP32-DAG:       sw      $[[T1]], 508($23)
+  ;
+  ; GP32-DAG:       sw      $[[T2:[0-9]+]], 16($sp)
+
+  %a0 = alloca i32, i32 %sz, align 512
+  %a1 = alloca i32, align 4
+
+  store volatile i32 111, i32* %a0, align 512
+  store volatile i32 222, i32* %a1, align 4
+
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a1)
+
+  ret void
+}
+
+; N32/N64 ABIs
+define void @func_08(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+                     i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+                     i32 %sz) {
+entry:
+; GP64-LABEL: func_08:
+
+  ; body
+  ; N32-DAG:        lw      $[[T0:[0-9]+]], 1028($fp)
+  ; N64-DAG:        lwu     $[[T0:[0-9]+]], 1028($fp)
+  ;
+  ; GP64-DAG:       addiu   $[[T1:[0-9]+]], $zero, 222
+  ; GP64-DAG:       sw      $[[T1]], 508($23)
+  ;
+  ; GP64-DAG:       sd      $[[T2:[0-9]+]], 0($sp)
+
+  %a0 = alloca i32, i32 %sz, align 512
+  %a1 = alloca i32, align 4
+
+  store volatile i32 111, i32* %a0, align 512
+  store volatile i32 222, i32* %a1, align 4
+
+  call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+                       i32 0, i32 0, i32 0, i32 0, i32* %a1)
+  ret void
+}
+
+; Check that we do not perform dynamic stack realignment in the presence of
+; the "no-realign-stack" function attribute.
+define void @func_09() "no-realign-stack" {
+entry:
+; ALL-LABEL: func_09:
+
+  ; ALL-NOT:  and     $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+  %a = alloca i32, align 512
+  call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+  ret void
+}
+
+define void @func_10(i32 %sz) "no-realign-stack" {
+entry:
+; ALL-LABEL: func_10:
+
+  ; ALL-NOT:  and     $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+  %a0 = alloca i32, i32 %sz, align 512
+  %a1 = alloca i32, align 4
+
+  store volatile i32 111, i32* %a0, align 512
+  store volatile i32 222, i32* %a1, align 4
+
+  ret void
+}