Index: llvm/include/llvm/Transforms/Scalar/JumpThreading.h =================================================================== --- llvm/include/llvm/Transforms/Scalar/JumpThreading.h +++ llvm/include/llvm/Transforms/Scalar/JumpThreading.h @@ -85,8 +85,10 @@ bool HasGuards = false; #ifdef NDEBUG SmallPtrSet LoopHeaders; + SmallPtrSet UnreachableBlocks; #else SmallSet, 16> LoopHeaders; + SmallSet, 16> UnreachableBlocks; #endif DenseSet> RecursionSet; Index: llvm/lib/Transforms/Scalar/JumpThreading.cpp =================================================================== --- llvm/lib/Transforms/Scalar/JumpThreading.cpp +++ llvm/lib/Transforms/Scalar/JumpThreading.cpp @@ -357,39 +357,32 @@ BFI = std::move(BFI_); } - // Remove unreachable blocks from function as they may result in infinite - // loop. We do threading if we found something profitable. Jump threading a - // branch can create other opportunities. If these opportunities form a cycle - // i.e. if any jump threading is undoing previous threading in the path, then - // we will loop forever. We take care of this issue by not jump threading for - // back edges. This works for normal cases but not for unreachable blocks as - // they may have cycle with no back edge. - bool EverChanged = false; - EverChanged |= removeUnreachableBlocks(F, LVI, DDT); + // JumpThreading must not processes blocks unreachable from entry. It's a + // waste of compute time and can potentially lead to hangs. + UnreachableBlocks.clear(); + DominatorTree &DT = DDT->flush(); + for (auto &BB : F) + if (!DT.isReachableFromEntry(&BB)) + UnreachableBlocks.insert(&BB); FindLoopHeaders(F); - - bool Changed; + bool Changed, EverChanged = false; do { Changed = false; - for (Function::iterator I = F.begin(), E = F.end(); I != E;) { + for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) { BasicBlock *BB = &*I; - // Thread all of the branches we can over this block. - while (ProcessBlock(BB)) + if (UnreachableBlocks.count(BB)) + continue; + while (ProcessBlock(BB)) // Thread all of the branches we can over BB. Changed = true; - - ++I; - - // Don't thread branches over a block that's slated for deletion. - if (DDT->pendingDeletedBB(BB)) + if (BB == &F.getEntryBlock() || DDT->pendingDeletedBB(BB)) continue; - // If the block is trivially dead, zap it. This eliminates the successor - // edges which simplifies the CFG. - if (pred_empty(BB) && - BB != &BB->getParent()->getEntryBlock()) { + if (pred_empty(BB)) { + // When ProcessBlock makes BB unreachable it doesn't bother to fix up + // the instructions in it. We must remove BB to prevent invalid IR. DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName() - << "' with terminator: " << *BB->getTerminator() << '\n'); + << "' with terminator: " << *BB->getTerminator() << '\n'); LoopHeaders.erase(BB); LVI->eraseBlock(BB); DeleteDeadBlock(BB, DDT); @@ -397,33 +390,27 @@ continue; } + // ProcessBlock doen't thread BBs with unconditional TIs. However, if BB + // is "almost empty", we attempt to merge BB with its sole successor. BranchInst *BI = dyn_cast(BB->getTerminator()); - - // Can't thread an unconditional jump, but if the block is "almost - // empty", we can replace uses of it with uses of the successor and make - // this dead. - // We should not eliminate the loop header or latch either, because - // eliminating a loop header or latch might later prevent LoopSimplify - // from transforming nested loops into simplified form. We will rely on - // later passes in backend to clean up empty blocks. if (BI && BI->isUnconditional() && - BB != &BB->getParent()->getEntryBlock() && - // If the terminator is the only non-phi instruction, try to nuke it. - BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB) && - !LoopHeaders.count(BI->getSuccessor(0))) { - // FIXME: It is always conservatively correct to drop the info - // for a block even if it doesn't get erased. This isn't totally - // awesome, but it allows us to use AssertingVH to prevent nasty - // dangling pointer issues within LazyValueInfo. + // The terminator must be the only non-phi instruction. + BB->getFirstNonPHIOrDbg()->isTerminator() && + // Don't alter Loop headers and latches to ensure LoopSimplify will + // transform nested loops later. + !LoopHeaders.count(BB) && !LoopHeaders.count(BI->getSuccessor(0)) && + TryToSimplifyUncondBranchFromEmptyBlock(BB, DDT)) { + // We passed in DDT so BB's parent remains F until DDT->flush() is + // called. BB can still be used for cleanup. LVI->eraseBlock(BB); - if (TryToSimplifyUncondBranchFromEmptyBlock(BB, DDT)) - Changed = true; + Changed = true; } } EverChanged |= Changed; } while (Changed); LoopHeaders.clear(); + UnreachableBlocks.clear(); DDT->flush(); LVI->enableDT(); return EverChanged; Index: llvm/test/Transforms/JumpThreading/pr15851_hang.ll =================================================================== --- llvm/test/Transforms/JumpThreading/pr15851_hang.ll +++ llvm/test/Transforms/JumpThreading/pr15851_hang.ll @@ -2,9 +2,19 @@ ; CHECK-LABEL: @f( ; CHECK-LABEL: entry -; CHECK: ret void -; CHECK-NOT: for.cond1 -; CHECK-NOT: for.body +; CHECK-NEXT: ret void +; +; JumpThreading must detect the next two blocks are unreachable from entry +; and leave them alone. A subsequent pass will remove them from @f. +; +; CHECK: for.cond1: +; CHECK-NEXT: phi +; CHECK-NEXT: icmp +; CHECK-NEXT: br i1 %cmp, label %for.body, label %for.cond1 +; CHECK: for.body: +; CHECK-NEXT: add +; CHECK-NEXT: icmp +; CHECK-NEXT: br i1 %a, label %for.cond1, label %for.cond1 define void @f() { entry: Index: llvm/test/Transforms/JumpThreading/select.ll =================================================================== --- llvm/test/Transforms/JumpThreading/select.ll +++ llvm/test/Transforms/JumpThreading/select.ll @@ -157,12 +157,13 @@ ; CHECK: test_switch_default ; CHECK: entry: ; CHECK: load -; CHECK: icmp +; CHECK: switch ; CHECK: [[THREADED:[A-Za-z.0-9]+]]: ; CHECK: store ; CHECK: br ; CHECK: L2: -; CHECK: icmp +; CHECK-SAME: preds = %entry, %entry +; CHECK-NEXT: phi i32 define void @test_switch_default(i32* nocapture %status) nounwind { entry: %0 = load i32, i32* %status, align 4