Index: lib/Transforms/Coroutines/CoroFrame.cpp =================================================================== --- lib/Transforms/Coroutines/CoroFrame.cpp +++ lib/Transforms/Coroutines/CoroFrame.cpp @@ -863,6 +863,15 @@ splitBlockIfNotFirst(I->getNextNode(), "After" + Name); } +// Any values with users not dominated by CoroBegin can't be spilled +static bool allUsersDominatedByCoroBegin(Value &V, Instruction &CBInst, Function &F) { + DominatorTree DT(F); + for (User *U : V.users()) + if (cast(U) && !DT.dominates(&CBInst, cast(U))) + return false; + return true; +} + void coro::buildCoroutineFrame(Function &F, Shape &Shape) { // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite // access to local variables. @@ -915,7 +924,7 @@ // Collect the spills for arguments and other not-materializable values. for (Argument &A : F.args()) for (User *U : A.users()) - if (Checker.isDefinitionAcrossSuspend(A, U)) + if (Checker.isDefinitionAcrossSuspend(A, U) && allUsersDominatedByCoroBegin(A, *Shape.CoroBegin, F)) Spills.emplace_back(&A, U); for (Instruction &I : instructions(F)) { @@ -929,7 +938,7 @@ continue; for (User *U : I.users()) - if (Checker.isDefinitionAcrossSuspend(I, U)) { + if (Checker.isDefinitionAcrossSuspend(I, U) && allUsersDominatedByCoroBegin(I, *Shape.CoroBegin, F)) { // We cannot spill a token. if (I.getType()->isTokenTy()) report_fatal_error( Index: test/Transforms/Coroutines/coro-split-mem2reg.ll =================================================================== --- /dev/null +++ test/Transforms/Coroutines/coro-split-mem2reg.ll @@ -0,0 +1,62 @@ +; Tests that coro-split can handle the case when a memory reference +; which crosses a suspend point is promoted to a register reference +; by mem2reg. +; RUN: opt < %s -mem2reg -coro-early -coro-split -coro-elide -S | FileCheck %s + +%Allocator = type { void (%Allocator*)* } + +declare i8* @customalloc(void (%Allocator*)* nonnull, %Allocator* nonnull, i64) + +define i8* @amain(%Allocator*, i16*) { +Entry: + %allocStore = alloca %Allocator*, align 8 + %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null) + %size = call i64 @llvm.coro.size.i64() + store %Allocator* %0, %Allocator** %allocStore, align 8 + %allocArg = getelementptr inbounds %Allocator, %Allocator* %0, i32 0, i32 0 + %allocPtr = load void (%Allocator*)*, void (%Allocator*)** %allocArg, align 8 + %allocSuccess = call fastcc i8* @customalloc(void (%Allocator*)* %allocPtr, %Allocator* %0, i64 %size) + %coroBegin = call i8* @llvm.coro.begin(token %id, i8* %allocSuccess) + br label %CoroSuspend + +CoroSuspend: ; preds = %Entry + %suspend = call i8 @llvm.coro.suspend(token none, i1 true) + switch i8 %suspend, label %Suspend [ + i8 0, label %InvalidResume + i8 1, label %CheckFree + ] + +Suspend: ; preds = %CheckFree, %CoroSuspend + %end = call i1 @llvm.coro.end(i8* null, i1 false) + ret i8* %coroBegin + +InvalidResume: ; preds = %CoroSuspend + unreachable + +CheckFree: ; preds = %CoroSuspend + %allocLoad = load %Allocator*, %Allocator** %allocStore, align 8 + %allocLoadPtr = getelementptr inbounds %Allocator, %Allocator* %allocLoad, i32 0, i32 0 + %allocLoadVoid = load void (%Allocator*)*, void (%Allocator*)** %allocLoadPtr, align 8 + %dummy = sub nsw i64 %size, 0 + call i8* @llvm.coro.free(token %id, i8* %coroBegin) + br label %Suspend +} + +; CHECK-LABEL: @amain( +; CHECK-NOT: %allocStore = +; CHECK-NOT: store %Allocator* %0, %Allocator** %allocStore + +; CHECK-LABEL: @amain.destroy( +; CHECK-NOT: %allocStore.reload.addr = +; CHECK: ret void + +; CHECK-LABEL: @amain.cleanup( +; CHECK-NOT: %allocStore.reload.addr = +; CHECK: ret void + +declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) +declare i64 @llvm.coro.size.i64() +declare i8* @llvm.coro.begin(token, i8* writeonly) +declare i8 @llvm.coro.suspend(token, i1) +declare i1 @llvm.coro.end(i8*, i1) +declare i8* @llvm.coro.free(token, i8* nocapture readonly)