Index: docs/LangRef.rst
===================================================================
--- docs/LangRef.rst
+++ docs/LangRef.rst
@@ -11907,7 +11907,7 @@
 
 ::
 
-      declare void @llvm.stackprotectorcheck(i8** <guard>)
+      declare void @llvm.stackprotectorcheck(i8* <guard>)
 
 Overview:
 """""""""
@@ -11937,6 +11937,32 @@
 intrinsic, we are able to generate the stack protector basic blocks late in
 codegen after the tail call decision has occurred.
 
+'``llvm.experimental_stackguardvalue``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+      declare i8* @llvm.experimental_stackguardvalue()
+
+Overview:
+"""""""""
+
+The ``llvm.experimental_stackguardvalue`` intrinsic. It is only supported by
+platforms that do not have an exported symbol as a stack guard.
+
+Arguments:
+""""""""""
+
+None.
+
+Semantics:
+""""""""""
+
+The ``llvm.experimental_stackguardvalue`` intrinsic returns current stack guard.
+
 '``llvm.objectsize``' Intrinsic
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
Index: include/llvm/IR/Intrinsics.td
===================================================================
--- include/llvm/IR/Intrinsics.td
+++ include/llvm/IR/Intrinsics.td
@@ -323,9 +323,16 @@
 
 // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
 // guard to the correct place on the stack frame.
+//
+// stackprotectorcheck is only used when -enable-selectiondag-sp is on and
+// target supports SSP in SelectionDAG. Otherwise SSP pass will generate IR
+// level code, including experimental_stackguardvalue calls. Backends may lower
+// experimental_stackguardvalue by overriding getStackGuardAddr and implementing
+// LOAD_STACK_GUARD pseudo instruction.
 def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
-def int_stackprotectorcheck : Intrinsic<[], [llvm_ptrptr_ty],
+def int_stackprotectorcheck : Intrinsic<[], [llvm_ptr_ty],
                                         [IntrReadWriteArgMem]>;
+def int_experimental_stackguardvalue : Intrinsic<[llvm_ptr_ty], [], []>;
 
 // A counter increment for instrumentation based profiling.
 def int_instrprof_increment : Intrinsic<[],
Index: include/llvm/Target/TargetLowering.h
===================================================================
--- include/llvm/Target/TargetLowering.h
+++ include/llvm/Target/TargetLowering.h
@@ -1003,14 +1003,18 @@
     return PrefLoopAlignment;
   }
 
-  /// Return true if the target stores stack protector cookies at a fixed offset
-  /// in some non-standard address space, and populates the address space and
-  /// offset as appropriate.
-  virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
-                                      unsigned &/*Offset*/) const {
-    return false;
+  /// Return whether the target supports selection dag based stack protector.
+  /// TODO: Adjust visitSPDescriptorFailure in SelectionDAGBuilder so that
+  /// SelectionDAG Stack Protector is always supported. Then remove this
+  /// function and possibly the flag -enable-selectiondag-sp.
+  virtual bool supportsSelectionDAGSP() const {
+    return !getTargetMachine().getTargetTriple().isOSOpenBSD();
   }
 
+  /// Return the address of stack guard, or return nullptr if the target wants
+  /// to lower LOAD_STACK_GUARD manually.
+  virtual Value *getStackGuardAddr(Module &M) const;
+
   /// If the target has a standard location for the unsafe stack pointer,
   /// returns the address of that location. Otherwise, returns nullptr.
   virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
@@ -2874,10 +2878,9 @@
   AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
 
   /// If this function returns true, SelectionDAGBuilder emits a
-  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
-  virtual bool useLoadStackGuardNode() const {
-    return false;
-  }
+  /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector,
+  /// even if getStackGuardAddr returns a global variable.
+  virtual bool forceLoadStackGuardNode() const { return false; }
 
   /// Lower TLS global address SDNode for target independent emulated TLS model.
   virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
===================================================================
--- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
+++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h
@@ -479,9 +479,8 @@
 
     /// Initialize the stack protector descriptor structure for a new basic
     /// block.
-    void initialize(const BasicBlock *BB,
-                    MachineBasicBlock *MBB,
-                    const CallInst &StackProtCheckCall) {
+    void initialize(const BasicBlock *BB, MachineBasicBlock *MBB,
+                    const Value *G) {
       // Make sure we are not initialized yet.
       assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is "
              "already initialized!");
@@ -489,7 +488,7 @@
       SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true);
       FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB);
       if (!Guard)
-        Guard = StackProtCheckCall.getArgOperand(0);
+        Guard = G;
     }
 
     /// Reset state that changes when we handle different basic blocks.
@@ -548,8 +547,8 @@
     /// contain a call to __stack_chk_fail().
     MachineBasicBlock *FailureMBB;
 
-    /// The guard variable which we will compare against the stored value in the
-    /// stack protector stack slot.
+    /// It either stores an address to the stack guard, or stores a
+    /// llvm.experimental_stackguardvalue CallInst.
     const Value *Guard;
 
     /// The virtual register holding the stack guard value.
Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
===================================================================
--- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1935,27 +1935,27 @@
   int FI = MFI->getStackProtectorIndex();
 
   const Value *IRGuard = SPD.getGuard();
-  SDValue GuardPtr = getValue(IRGuard);
-  SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
-
   unsigned Align = DL->getPrefTypeAlignment(IRGuard->getType());
-
-  SDValue Guard;
   SDLoc dl = getCurSDLoc();
 
-  // If GuardReg is set and useLoadStackGuardNode returns true, retrieve the
+  SDValue Guard;
+  // If GuardReg is set and forceLoadStackGuardNode returns true, retrieve the
   // guard value from the virtual register holding the value. Otherwise, emit a
   // volatile load to retrieve the stack guard value.
   unsigned GuardReg = SPD.getGuardReg();
-
-  if (GuardReg && TLI.useLoadStackGuardNode())
+  if (GuardReg && TLI.forceLoadStackGuardNode()) {
     Guard = DAG.getCopyFromReg(DAG.getEntryNode(), dl, GuardReg,
                                PtrTy);
-  else
+  } else if (dyn_cast<CallInst>(IRGuard)) {
+    Guard = getValue(IRGuard);
+  } else {
+    SDValue GuardPtr = getValue(IRGuard);
     Guard = DAG.getLoad(PtrTy, dl, DAG.getEntryNode(),
                         GuardPtr, MachinePointerInfo(IRGuard, 0),
                         true, false, false, Align);
+  }
 
+  SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
   SDValue StackSlot = DAG.getLoad(
       PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), true,
@@ -5106,23 +5106,34 @@
     setValue(&I, Res);
     return nullptr;
   }
+  case Intrinsic::experimental_stackguardvalue: {
+    EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
+    SDValue Chain = getRoot();
+    MachineSDNode *Node =
+        DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, sdl, PtrTy, Chain);
+    DAG.setRoot(Chain);
+    setValue(&I, SDValue(Node, 0));
+    return nullptr;
+  }
   case Intrinsic::stackprotector: {
     // Emit code into the DAG to store the stack guard onto the stack.
     MachineFunction &MF = DAG.getMachineFunction();
     MachineFrameInfo *MFI = MF.getFrameInfo();
     EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
     SDValue Src, Chain = getRoot();
-    const Value *Ptr = cast<LoadInst>(I.getArgOperand(0))->getPointerOperand();
-    const GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr);
-
-    // See if Ptr is a bitcast. If it is, look through it and see if we can get
-    // global variable __stack_chk_guard.
-    if (!GV)
-      if (const Operator *BC = dyn_cast<Operator>(Ptr))
-        if (BC->getOpcode() == Instruction::BitCast)
-          GV = dyn_cast<GlobalVariable>(BC->getOperand(0));
+    const GlobalVariable *GV = nullptr;
+    if (auto Ptr = dyn_cast<LoadInst>(I.getArgOperand(0))) {
+      GV = dyn_cast<GlobalVariable>(Ptr->getPointerOperand());
+
+      // See if Ptr is a bitcast. If it is, look through it and see if we can
+      // get global variable __stack_chk_guard.
+      if (!GV)
+        if (const Operator *BC = dyn_cast<Operator>(Ptr->getPointerOperand()))
+          if (BC->getOpcode() == Instruction::BitCast)
+            GV = dyn_cast<GlobalVariable>(BC->getOperand(0));
+    }
 
-    if (GV && TLI.useLoadStackGuardNode()) {
+    if (GV && TLI.forceLoadStackGuardNode()) {
       // Emit a LOAD_STACK_GUARD node.
       MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
                                                sdl, PtrTy, Chain);
@@ -5135,17 +5146,21 @@
                                          DAG.getEVTAlignment(PtrTy));
       Node->setMemRefs(MemRefs, MemRefs + 1);
 
-      // Copy the guard value to a virtual register so that it can be
-      // retrieved in the epilogue.
       Src = SDValue(Node, 0);
+    } else {
+      Src = getValue(I.getArgOperand(0)); // The guard's value.
+    }
+    // We get a LOAD_STCK_GUARD either from forceLoadStackGuardNode or from
+    // lowering llvm.experimental_stackguardvalue. Copy the guard value to a
+    // virtual register so that it can be retrieved in the epilogue.
+    if ((GV && TLI.forceLoadStackGuardNode()) ||
+        dyn_cast<CallInst>(I.getArgOperand(0))) {
       const TargetRegisterClass *RC =
           TLI.getRegClassFor(Src.getSimpleValueType());
       unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
 
       SPDescriptor.setGuardReg(Reg);
       Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
-    } else {
-      Src = getValue(I.getArgOperand(0));   // The guard's value.
     }
 
     AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
@@ -5352,10 +5367,23 @@
     return nullptr;
   case Intrinsic::stackprotectorcheck: {
     // Do not actually emit anything for this basic block. Instead we initialize
-    // the stack protector descriptor and export the guard variable so we can
+    // the stack protector descriptor and export the guard value so we can
     // access it in FinishBasicBlock.
     const BasicBlock *BB = I.getParent();
-    SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I);
+    // llvm.stackprotectorcheck may carry a load of the address
+    // or a llvm.experimental_stackguardvalue call.
+    // SPDescriptor expects either an address, or an intrinsic call, so we need
+    // to strip the LoadInst if possible.
+    if (auto Load = dyn_cast<LoadInst>(I.getArgOperand(0))) {
+      SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB],
+                              Load->getPointerOperand());
+    } else if (dyn_cast<CallInst>(I.getArgOperand(0))) {
+      SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I.getArgOperand(0));
+    } else {
+      llvm_unreachable("llvm.stackprotectorcheck expects either a load of a "
+                       "global variable, or a "
+                       "llvm.experimental_stackguardvalue call.");
+    }
     ExportFromCurrentBlock(SPDescriptor.getGuard());
 
     // Flush our exports since we are going to process a terminator.
Index: lib/CodeGen/StackProtector.cpp
===================================================================
--- lib/CodeGen/StackProtector.cpp
+++ lib/CodeGen/StackProtector.cpp
@@ -12,6 +12,32 @@
 // are allocated. Upon exiting the block, the stored value is checked. If it's
 // changed, then there was some sort of violation and the program aborts.
 //
+// Currently we have 3 code paths:
+// 1) IR without SupportsSelectionDAGSP -> FastISel -> Optional backend lowering
+// 2) IR with SupportsSelectionDAGSP -> SelectionDAG -> Optional backend
+//    lowering
+// 3) IR without SupportsSelectionDAGSP -> SelectionDAG -> Optional backend
+//    lowering
+//
+// For details on llvm.stackprotectorcheck, see comment on
+// StackProtectorDescriptor.
+//
+// We do want to eliminate 3), since all cases it handles can be handled by 2).
+// The current problem is visitSPDescriptorFailure hardcodes the failure
+// handler, which isn't corret on OpenBSD. Once this is solved, 3) will be
+// removed.
+//
+// By combining TLI::getStackGuardAddr and TLI::forceLoadStackGuardNode, SSP
+// guard loading can be customized by backends:
+// * Some backends want to lower the global variable manually, in such case
+//   forceLoadStackGuardNode should return true and getStackGuardAddr returns
+//   the global variable.
+// * Other platforms (e.g. PowerPC 64-bit) want to lower the stack guard loading
+//   completely manually, getStackGuardAddr should return nullptr.
+// * For the rest, forceLoadStackGuardNode returns false and
+//   getStackGuardAddr returns the global variable, thus guard loading not
+//   customized.
+//
 //===----------------------------------------------------------------------===//
 
 #include "llvm/CodeGen/StackProtector.h"
@@ -318,45 +344,32 @@
   return nullptr;
 }
 
-/// Insert code into the entry block that stores the __stack_chk_guard
+/// Return the instruction to load a stack guard.
+static Value *CreateLoadStackGuard(IRBuilder<> &B, Module &M,
+                                   const TargetLoweringBase *TLI) {
+  if (auto StackGuardAddr = TLI->getStackGuardAddr(M))
+    return B.CreateLoad(StackGuardAddr, "StackGuard");
+  return B.CreateCall(
+      Intrinsic::getDeclaration(&M, Intrinsic::experimental_stackguardvalue));
+}
+
+/// Insert code into the entry block that stores the stack guard
 /// variable onto the stack:
 ///
 ///   entry:
 ///     StackGuardSlot = alloca i8*
-///     StackGuard = load __stack_chk_guard
-///     call void @llvm.stackprotect.create(StackGuard, StackGuardSlot)
-///
-/// Returns true if the platform/triple supports the stackprotectorcreate pseudo
-/// node.
-static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
+///     StackGuard = <value of the stack guard>
+///     call void @llvm.stackprotector(StackGuard, StackGuardSlot)
+static void CreatePrologue(Function *F, Module *M, ReturnInst *RI,
                            const TargetLoweringBase *TLI, const Triple &TT,
-                           AllocaInst *&AI, Value *&StackGuardVar) {
-  bool SupportsSelectionDAGSP = false;
+                           AllocaInst *&AI, Value *&StackGuardVal) {
   PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext());
-  unsigned AddressSpace, Offset;
-  if (TLI->getStackCookieLocation(AddressSpace, Offset)) {
-    Constant *OffsetVal =
-        ConstantInt::get(Type::getInt32Ty(RI->getContext()), Offset);
-
-    StackGuardVar =
-        ConstantExpr::getIntToPtr(OffsetVal, PointerType::get(PtrTy,
-                                                              AddressSpace));
-  } else if (TT.isOSOpenBSD()) {
-    StackGuardVar = M->getOrInsertGlobal("__guard_local", PtrTy);
-    cast<GlobalValue>(StackGuardVar)
-        ->setVisibility(GlobalValue::HiddenVisibility);
-  } else {
-    SupportsSelectionDAGSP = true;
-    StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy);
-  }
 
   IRBuilder<> B(&F->getEntryBlock().front());
   AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot");
-  LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard");
+  StackGuardVal = CreateLoadStackGuard(B, *M, TLI);
   B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector),
-               {LI, AI});
-
-  return SupportsSelectionDAGSP;
+               {StackGuardVal, AI});
 }
 
 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
@@ -367,10 +380,11 @@
 ///    value. It calls __stack_chk_fail if they differ.
 bool StackProtector::InsertStackProtectors() {
   bool HasPrologue = false;
-  bool SupportsSelectionDAGSP =
-      EnableSelectionDAGSP && !TM->Options.EnableFastISel;
+  bool SupportsSelectionDAGSP = EnableSelectionDAGSP &&
+                                !TM->Options.EnableFastISel &&
+                                TLI->supportsSelectionDAGSP();
   AllocaInst *AI = nullptr;       // Place on stack that stores the stack guard.
-  Value *StackGuardVar = nullptr; // The stack guard variable.
+  Value *StackGuardVal = nullptr; // The stack guard value.
 
   for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
     BasicBlock *BB = &*I++;
@@ -380,8 +394,7 @@
 
     if (!HasPrologue) {
       HasPrologue = true;
-      SupportsSelectionDAGSP &=
-          CreatePrologue(F, M, RI, TLI, Trip, AI, StackGuardVar);
+      CreatePrologue(F, M, RI, TLI, Trip, AI, StackGuardVal);
     }
 
     if (SupportsSelectionDAGSP) {
@@ -400,7 +413,7 @@
 
       Function *Intrinsic =
           Intrinsic::getDeclaration(M, Intrinsic::stackprotectorcheck);
-      CallInst::Create(Intrinsic, StackGuardVar, "", InsertionPt);
+      CallInst::Create(Intrinsic, StackGuardVal, "", InsertionPt);
     } else {
       // If we do not support SelectionDAG based tail calls, generate IR level
       // tail calls.
@@ -415,7 +428,7 @@
       //
       //   return:
       //     ...
-      //     %1 = load __stack_chk_guard
+      //     %1 = <value of the stack guard>
       //     %2 = load StackGuardSlot
       //     %3 = cmp i1 %1, %2
       //     br i1 %3, label %SP_return, label %CallStackCheckFailBlk
@@ -450,7 +463,7 @@
 
       // Generate the stack protector instructions in the old basic block.
       IRBuilder<> B(BB);
-      LoadInst *LI1 = B.CreateLoad(StackGuardVar);
+      Value *LI1 = CreateLoadStackGuard(B, *M, TLI);
       LoadInst *LI2 = B.CreateLoad(AI);
       Value *Cmp = B.CreateICmpEQ(LI1, LI2);
       auto SuccessProb =
Index: lib/CodeGen/TargetLoweringBase.cpp
===================================================================
--- lib/CodeGen/TargetLoweringBase.cpp
+++ lib/CodeGen/TargetLoweringBase.cpp
@@ -1745,3 +1745,14 @@
 
   return true;
 }
+
+Value *TargetLoweringBase::getStackGuardAddr(Module &M) const {
+  PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
+  if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
+    auto StackGuardAddr = M.getOrInsertGlobal("__guard_local", PtrTy);
+    cast<GlobalValue>(StackGuardAddr)
+        ->setVisibility(GlobalValue::HiddenVisibility);
+    return StackGuardAddr;
+  }
+  return M.getOrInsertGlobal("__stack_chk_guard", PtrTy);
+}
Index: lib/IR/AutoUpgrade.cpp
===================================================================
--- lib/IR/AutoUpgrade.cpp
+++ lib/IR/AutoUpgrade.cpp
@@ -159,6 +159,21 @@
     }
     break;
 
+  case 's':
+    // The old version takes a guard address i8**, while the new one takes a
+    // value i8*.
+    if (Name == "stackprotectorcheck") {
+      if (F->arg_size() == 1 &&
+          F->arg_begin()->getType() ==
+              PointerType::getUnqual(Type::getInt8PtrTy(F->getContext()))) {
+        F->setName(Name + ".old");
+        NewFn = Intrinsic::getDeclaration(F->getParent(),
+                                          Intrinsic::stackprotectorcheck);
+        return true;
+      }
+    }
+    break;
+
   case 'x': {
     if (Name.startswith("x86.sse2.pcmpeq.") ||
         Name.startswith("x86.sse2.pcmpgt.") ||
@@ -739,6 +754,14 @@
     return;
   }
 
+  // Create a load of the address.
+  case Intrinsic::stackprotectorcheck: {
+    CI->replaceAllUsesWith(
+        Builder.CreateCall(NewFn, {Builder.CreateLoad(CI->getArgOperand(0))}));
+    CI->eraseFromParent();
+    return;
+  }
+
   case Intrinsic::x86_xop_vfrcz_ss:
   case Intrinsic::x86_xop_vfrcz_sd:
     CI->replaceAllUsesWith(
Index: lib/Target/AArch64/AArch64ISelLowering.h
===================================================================
--- lib/Target/AArch64/AArch64ISelLowering.h
+++ lib/Target/AArch64/AArch64ISelLowering.h
@@ -354,7 +354,7 @@
 
   bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
 
-  bool useLoadStackGuardNode() const override;
+  bool forceLoadStackGuardNode() const override;
   TargetLoweringBase::LegalizeTypeAction
   getPreferredVectorAction(EVT VT) const override;
 
Index: lib/Target/AArch64/AArch64ISelLowering.cpp
===================================================================
--- lib/Target/AArch64/AArch64ISelLowering.cpp
+++ lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10101,9 +10101,7 @@
   }
 }
 
-bool AArch64TargetLowering::useLoadStackGuardNode() const {
-  return true;
-}
+bool AArch64TargetLowering::forceLoadStackGuardNode() const { return true; }
 
 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
Index: lib/Target/ARM/ARMISelLowering.h
===================================================================
--- lib/Target/ARM/ARMISelLowering.h
+++ lib/Target/ARM/ARMISelLowering.h
@@ -462,7 +462,7 @@
     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
     bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
 
-    bool useLoadStackGuardNode() const override;
+    bool forceLoadStackGuardNode() const override;
 
     bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
                                    unsigned &Cost) const override;
Index: lib/Target/ARM/ARMISelLowering.cpp
===================================================================
--- lib/Target/ARM/ARMISelLowering.cpp
+++ lib/Target/ARM/ARMISelLowering.cpp
@@ -12186,7 +12186,7 @@
 }
 
 // This has so far only been implemented for MachO.
-bool ARMTargetLowering::useLoadStackGuardNode() const {
+bool ARMTargetLowering::forceLoadStackGuardNode() const {
   return Subtarget->isTargetMachO();
 }
 
Index: lib/Target/X86/X86ISelLowering.h
===================================================================
--- lib/Target/X86/X86ISelLowering.h
+++ lib/Target/X86/X86ISelLowering.h
@@ -948,11 +948,9 @@
     FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
                              const TargetLibraryInfo *libInfo) const override;
 
-    /// Return true if the target stores stack protector cookies at a fixed
-    /// offset in some non-standard address space, and populates the address
-    /// space and offset as appropriate.
-    bool getStackCookieLocation(unsigned &AddressSpace,
-                                unsigned &Offset) const override;
+    bool supportsSelectionDAGSP() const override;
+
+    Value *getStackGuardAddr(Module &M) const override;
 
     /// Return true if the target stores SafeStack pointer at a fixed offset in
     /// some non-standard address space, and populates the address space and
@@ -964,7 +962,7 @@
 
     bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
 
-    bool useLoadStackGuardNode() const override;
+    bool forceLoadStackGuardNode() const override;
     /// \brief Customize the preferred legalization strategy for certain types.
     LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
 
Index: lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- lib/Target/X86/X86ISelLowering.cpp
+++ lib/Target/X86/X86ISelLowering.cpp
@@ -1875,7 +1875,7 @@
 }
 
 // This has so far only been implemented for 64-bit MachO.
-bool X86TargetLowering::useLoadStackGuardNode() const {
+bool X86TargetLowering::forceLoadStackGuardNode() const {
   return Subtarget.isTargetMachO() && Subtarget.is64Bit();
 }
 
@@ -2147,16 +2147,29 @@
   return 256;
 }
 
-bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
-                                               unsigned &Offset) const {
-  if (!Subtarget.isTargetLinux())
+bool X86TargetLowering::supportsSelectionDAGSP() const {
+  if (Subtarget.isTargetLinux())
     return false;
-
-  // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
-  // %gs:0x14 on i386
-  Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
-  AddressSpace = getAddressSpace();
-  return true;
+  return TargetLowering::supportsSelectionDAGSP();
+}
+
+Value *X86TargetLowering::getStackGuardAddr(Module &M) const {
+  /// The target stores stack protector cookies at a fixed
+  /// offset in some non-standard address space, and populates the address
+  /// space and offset as appropriate.
+  if (Subtarget.isTargetLinux()) {
+    unsigned AddressSpace = getAddressSpace();
+    // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
+    // %gs:0x14 on i386
+    unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
+    Constant *OffsetVal =
+        ConstantInt::get(Type::getInt32Ty(M.getContext()), Offset);
+
+    PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
+    return ConstantExpr::getIntToPtr(OffsetVal,
+                                     PointerType::get(PtrTy, AddressSpace));
+  }
+  return TargetLowering::getStackGuardAddr(M);
 }
 
 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
Index: test/Bitcode/upgrade-stackprotectorcheck.ll
===================================================================
--- /dev/null
+++ test/Bitcode/upgrade-stackprotectorcheck.ll
@@ -0,0 +1,18 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+
+@user_defined_guard = external global i8*
+
+declare void @llvm.stackprotector(i8*, i8**)
+declare void @llvm.stackprotectorcheck(i8**)
+
+define void @test_upgrade() {
+entry:
+; CHECK: %0 = load i8*, i8** @user_defined_guard
+; CHECK: call void @llvm.stackprotectorcheck(i8* %0)
+  %StackGuardSlot = alloca i8*
+  %StackGuard = load i8*, i8** @user_defined_guard
+  call void @llvm.stackprotector(i8* %StackGuard, i8** %StackGuardSlot)
+  %container = alloca [32 x i8], align 1
+  call void @llvm.stackprotectorcheck(i8** @user_defined_guard)
+  ret void
+}