Index: lib/IR/SafepointIRVerifier.cpp
===================================================================
--- lib/IR/SafepointIRVerifier.cpp
+++ lib/IR/SafepointIRVerifier.cpp
@@ -36,6 +36,7 @@
 #include "llvm/ADT/SetOperations.h"
 #include "llvm/ADT/SetVector.h"
 #include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/Instructions.h"
@@ -318,8 +319,7 @@
   static void verifyFunction(GCPtrTracker &&Tracker,
                              InstructionVerifier &Verifier);
 
-  /// Returns true for reachable blocks that are verified, the other blocks are
-  /// ignored.
+  /// Returns true for reachable and live blocks.
   bool isMapped(const BasicBlock *BB) const {
     return BlockMap.find(BB) != BlockMap.end();
   }
@@ -376,12 +376,119 @@
 private:
   void reportInvalidUse(const Value &V, const Instruction &I);
 };
+
+/// It is a dead block calculator used by SafepointIRVerifier to ignore dead
+/// blocks. Algorithm starts with a given block set and propagates deadness
+/// using foldable conditional branches like GVN does but without splitting
+/// critical edges. So the CFG is kept intact.
+class DeadBlocks : public SetVector<const BasicBlock *> {
+  const DominatorTree &DT;
+  SetVector<const Use *> DeadEdges; // incomplete at the end
+
+public:
+  DeadBlocks(const DominatorTree &DT) : DT(DT) {  }
+
+  bool isDeadBlock(const BasicBlock *BB) {
+    return count(BB);
+  }
+
+  bool isDeadEdge(const Use *U) {
+    return DeadEdges.count(U);
+  }
+
+  bool hasLiveIncomingEdges(const BasicBlock *BB) {
+    // Check if all incoming edges are dead.
+    using pred_iterator = PredIterator<const BasicBlock, Value::const_user_iterator>;
+    for (pred_iterator PredIt(BB), End(BB, true); PredIt != End; ++PredIt) {
+      auto &PU = PredIt.getUse();
+      const Use &U = PU.getUser()->getOperandUse(PU.getOperandNo());
+      if (!isDeadBlock(*PredIt) && !isDeadEdge(&U))
+        return true; // Found a live edge.
+    }
+    return false;
+  }
+
+  void addDeadEdge(const Use &DeadEdge) {
+    if (!DeadEdges.insert(&DeadEdge))
+      return;
+
+    BasicBlock *BB = cast_or_null<BasicBlock>(DeadEdge.get());
+    if (hasLiveIncomingEdges(BB))
+      return;
+
+    SmallVector<const BasicBlock *, 4> NewDead;
+    SmallSetVector<const BasicBlock *, 4> DF;
+
+    NewDead.push_back(BB);
+    while (!NewDead.empty()) {
+      const BasicBlock *D = NewDead.pop_back_val();
+      if (isDeadBlock(D))
+        continue;
+
+      // All blocks dominated by D are dead.
+      SmallVector<BasicBlock *, 8> Dom;
+      DT.getDescendants(const_cast<BasicBlock*>(D), Dom);
+      // Do not need to mark all in and out edges dead
+      // because BB is marked dead and this is enough
+      // to run further.
+      insert(Dom.begin(), Dom.end());
+
+      // Figure out the dominance-frontier(D).
+      for (BasicBlock *B : Dom)
+        for (BasicBlock *S : successors(B))
+          if (!isDeadBlock(S) && !hasLiveIncomingEdges(S))
+            NewDead.push_back(S);
+    }
+  }
+
+  void processFoldableCondBr(const BranchInst *BI) {
+    if (!BI || BI->isUnconditional())
+      return;
+
+    // If a branch has two identical successors, we cannot declare either dead.
+    if (BI->getSuccessor(0) == BI->getSuccessor(1))
+      return;
+
+    ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
+    if (!Cond)
+      return;
+
+    addDeadEdge(BI->getOperandUse(Cond->getZExtValue() ? 1 : 2));
+  }
+
+  void processInstruction(const Instruction *I) {
+    // For conditional branches, we can perform simple conditional propagation on
+    // the condition value itself.
+    const BranchInst *BI = dyn_cast<BranchInst>(I);
+    if (!BI || !BI->isConditional() || !isa<Constant>(BI->getCondition()))
+        return;
+
+    processFoldableCondBr(BI);
+  }
+
+  void processFunction(const Function &F) {
+    // Top-down walk of the dominator tree
+    ReversePostOrderTraversal<const Function *> RPOT(&F);
+    for (const BasicBlock *BB : RPOT)
+      for (auto &BI : *BB)
+        processInstruction(&BI);
+  }
+};
 } // end anonymous namespace
 
 GCPtrTracker::GCPtrTracker(const Function &F, const DominatorTree &DT) : F(F) {
-  // First, calculate Contribution of each BB.
+  // Find dead and unreachable blocks to exclude them from verification.
+  DeadBlocks DB(DT);
   for (const BasicBlock &BB : F)
-    if (DT.isReachableFromEntry(&BB)) {
+    if (!DT.isReachableFromEntry(&BB))
+      DB.insert(&BB);
+
+  DB.processFunction(F);
+
+  // Calculate Contribution of each live BB.
+  // Allocate BB states for live blocks.
+  for (const BasicBlock &BB : F)
+    if (!DB.isDeadBlock(&BB)) {
       BasicBlockState *BBS = new (BSAllocator.Allocate()) BasicBlockState;
       for (const auto &I : BB)
         transferInstruction(I, BBS->Cleared, BBS->Contribution);
@@ -403,9 +510,7 @@
 
 BasicBlockState *GCPtrTracker::getBasicBlockState(const BasicBlock *BB) {
   auto it = BlockMap.find(BB);
-  assert(it != BlockMap.end() &&
-         "No such BB in BlockMap! Probably BB from another function");
-  return it->second;
+  return it != BlockMap.end() ? it->second : nullptr;
 }
 
 const BasicBlockState *GCPtrTracker::getBasicBlockState(
@@ -426,6 +531,9 @@
   ReversePostOrderTraversal<const Function *> RPOT(&Tracker.F);
   for (const BasicBlock *BB : RPOT) {
     BasicBlockState *BBS = Tracker.getBasicBlockState(BB);
+    if (!BBS)
+      continue;
+
     // We destructively modify AvailableIn as we traverse the block instruction
     // by instruction.
     AvailableValueSet &AvailableSet = BBS->AvailableIn;
@@ -455,7 +563,9 @@
   // The AvailableIn and AvailableOut sets decrease as we iterate.
   while (!Worklist.empty()) {
     const BasicBlock *BB = Worklist.pop_back_val();
-    BasicBlockState *BBS = BlockMap[BB];
+    BasicBlockState *BBS = getBasicBlockState(BB);
+    if (!BBS)
+      continue; // Ignore dead successors.
 
     size_t OldInCount = BBS->AvailableIn.size();
     for (const BasicBlock *PBB : predecessors(BB))
@@ -573,13 +683,15 @@
   assert(DTN && "Unreachable blocks are ignored");
   while (DTN->getIDom()) {
     DTN = DTN->getIDom();
-    const auto &Defs = BlockMap[DTN->getBlock()]->Contribution;
+    auto BBS = getBasicBlockState(DTN->getBlock());
+    assert(BBS && "immediate dominator cannot be dead for a live block");
+    const auto &Defs = BBS->Contribution;
     Result.insert(Defs.begin(), Defs.end());
     // If this block is 'Cleared', then nothing LiveIn to this block can be
     // available after this block completes.  Note: This turns out to be
     // really important for reducing memory consuption of the initial available
     // sets and thus peak memory usage by this verifier.
-    if (BlockMap[DTN->getBlock()]->Cleared)
+    if (BBS->Cleared)
       return;
   }
 
@@ -628,12 +740,13 @@
     if (containsGCPtrType(PN->getType()))
       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
         const BasicBlock *InBB = PN->getIncomingBlock(i);
-        if (!Tracker->isMapped(InBB))
-            continue;
+        const BasicBlockState *InBBS = Tracker->getBasicBlockState(InBB);
+        if (!InBBS)
+          continue; // Skip dead block.
         const Value *InValue = PN->getIncomingValue(i);
 
         if (isNotExclusivelyConstantDerived(InValue) &&
-            !Tracker->getBasicBlockState(InBB)->AvailableOut.count(InValue))
+            !InBBS->AvailableOut.count(InValue))
           reportInvalidUse(*InValue, *PN);
       }
   } else if (isa<CmpInst>(I) &&
Index: test/SafepointIRVerifier/dead-block-tolerant.ll
===================================================================
--- test/SafepointIRVerifier/dead-block-tolerant.ll
+++ test/SafepointIRVerifier/dead-block-tolerant.ll
@@ -0,0 +1,94 @@
+; RUN: opt -safepoint-ir-verifier-print-only -verify-safepoint-ir -S %s 2>&1 | FileCheck %s
+
+%jObject = type { [8 x i8] }
+declare %jObject addrspace(1)* @llvm.experimental.gc.relocate.p1jObject(token, i32, i32) nounwind
+declare token @llvm.experimental.gc.statepoint.p0f_f64f64f(i64, i32, double (double)*, i32, i32, ...)
+declare token @llvm.experimental.gc.statepoint.p0f_isVoidf(i64, i32, void ()*, i32, i32, ...)
+
+; This test checks that dead branch (left) does not conflict with live branch (right).
+define void @test2(i8 addrspace(1)* %arg, i1 %cond) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test2
+; CHECK: No illegal uses found by SafepointIRVerifier in: test2
+ begin:
+  %ptr = getelementptr i8, i8 addrspace(1)* %arg, i64 4
+  br i1 true, label %right, label %left
+
+ left:
+  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+  br label %merge
+
+ right:
+  br label %merge
+
+ merge:
+  %val.unrelocated = phi i8 addrspace(1)* [ %arg, %left ], [ %ptr, %right ]
+  %c = icmp eq i8 addrspace(1)* %val.unrelocated, %arg
+  ret void
+}
+
+; This test checks that dead branch (right) does not conflict with live branch (left).
+define void @test3(i8 addrspace(1)* %arg, i1 %cond) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test3
+; CHECK: No illegal uses found by SafepointIRVerifier in: test3
+ begin:
+  %ptr = getelementptr i8, i8 addrspace(1)* %arg, i64 4
+  br i1 false, label %right, label %left
+
+ left:
+  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+  br label %merge
+
+ right:
+  br label %merge
+
+ merge:
+  %val.unrelocated = phi i8 addrspace(1)* [ %arg, %left ], [ %ptr, %right ]
+  %c = icmp eq i8 addrspace(1)* %val.unrelocated, %arg
+  ret void
+}
+
+; This test checks that two dead branches (left and right) do not conflict.
+define void @test4(i8 addrspace(1)* %arg, i1 %cond) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test4
+; CHECK: No illegal uses found by SafepointIRVerifier in: test4
+ begin:
+  %ptr = getelementptr i8, i8 addrspace(1)* %arg, i64 4
+  br i1 true, label %next, label %right
+
+ next:
+  %safepoint_token0 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+  br i1 false, label %left, label %merge
+
+ left:
+  %safepoint_token1 = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+  br label %merge
+
+ right:
+  br label %merge
+
+ merge:
+  %val.unrelocated = phi i8 addrspace(1)* [ %arg, %left ], [ %ptr, %right ], [ %arg, %next ]
+  %c = icmp eq i8 addrspace(1)* %val.unrelocated, %arg
+  ret void
+}
+
+; This test checks that two live branches (left and right) conflict.
+define void @test5(i8 addrspace(1)* %arg, i1 %cond) gc "statepoint-example" {
+; CHECK-LABEL: Verifying gc pointers in function: test5
+; CHECK-NOT: No illegal uses found by SafepointIRVerifier in: test5
+ begin:
+  %ptr = getelementptr i8, i8 addrspace(1)* %arg, i64 4
+  br i1 %cond, label %left, label %right
+
+ left:
+  %safepoint_token = call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* undef, i32 0, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0)
+  br label %merge
+
+ right:
+  br label %merge
+
+ merge:
+  %val.unrelocated = phi i8 addrspace(1)* [ %arg, %left ], [ %ptr, %right ]
+  %c = icmp eq i8 addrspace(1)* %val.unrelocated, %arg
+  ret void
+}