diff --git a/llvm/lib/Transforms/Scalar/GVNHoist.cpp b/llvm/lib/Transforms/Scalar/GVNHoist.cpp --- a/llvm/lib/Transforms/Scalar/GVNHoist.cpp +++ b/llvm/lib/Transforms/Scalar/GVNHoist.cpp @@ -337,7 +337,8 @@ // WL to HoistBB. bool safeToHoistScalar(const BasicBlock *HoistBB, const BasicBlock *BB, int &NBBsOnAllPaths) { - return !hasEHOnPath(HoistBB, BB, NBBsOnAllPaths); + return !hasEHOnPath(HoistBB, BB, NBBsOnAllPaths) && + !isa(HoistBB->getTerminator()); } // In the inverse CFG, the dominance frontier of basic block (BB) is the diff --git a/llvm/test/Transforms/GVNHoist/hoist-call.ll b/llvm/test/Transforms/GVNHoist/hoist-call.ll --- a/llvm/test/Transforms/GVNHoist/hoist-call.ll +++ b/llvm/test/Transforms/GVNHoist/hoist-call.ll @@ -26,3 +26,24 @@ } declare float @llvm.fabs.f32(float) + +; Check that extractvalues are not hoisted into entry. +define void @foo() { +; CHECK-LABEL: define void @foo( +; CHECK-NEXT: entry +; CHECK-NEXT: %0 = callbr +; CHECK-NEXT: to label +; CHECK-EMPTY: +; CHECK-NEXT: asm.fallthrough: +entry: + %0 = callbr { i32, i32 } asm sideeffect "somestuff", "=r,=r,!i"() + to label %asm.fallthrough [label %err.split] + +asm.fallthrough: ; preds = %entry + %asmresult = extractvalue { i32, i32 } %0, 0 + ret void + +err.split: ; preds = %entry + %asmresult2 = extractvalue { i32, i32 } %0, 0 + ret void +}