Index: lib/Transforms/Coroutines/CoroFrame.cpp =================================================================== --- lib/Transforms/Coroutines/CoroFrame.cpp +++ lib/Transforms/Coroutines/CoroFrame.cpp @@ -797,6 +797,17 @@ } } +// Any values with users not dominated by CoroBegin can't be spilled +static bool allUsersDominatedByCoroBegin(Value &V, Instruction &CBInst, Function &F) { + DominatorTree DT(F); + for (User *U : V.users()) { + auto isInstruction = dyn_cast_or_null(U); + if (isInstruction && !DT.dominates(&CBInst, isInstruction)) + return false; + } + return true; +} + // Move early uses of spilled variable after CoroBegin. // For example, if a parameter had address taken, we may end up with the code // like: @@ -827,13 +838,16 @@ // TODO: Make this more robust. Currently if we run into a situation // where simple instruction move won't work we panic and // report_fatal_error. - for (User *UI : I->users()) { - if (!DT.dominates(CoroBegin, cast(UI))) - report_fatal_error("cannot move instruction since its users are not" - " dominated by CoroBegin"); + auto isArg = dyn_cast_or_null(CurrentValue); + if (!isArg || CurrentValue->getType()->isPointerTy()) { + for (User *UI : I->users()) { + if (!DT.dominates(CoroBegin, cast(UI))) { + report_fatal_error("cannot move instruction since its users are not" + " dominated by CoroBegin"); + } + } + NeedsMoving.push_back(I); } - - NeedsMoving.push_back(I); } } } @@ -915,7 +929,7 @@ // Collect the spills for arguments and other not-materializable values. for (Argument &A : F.args()) for (User *U : A.users()) - if (Checker.isDefinitionAcrossSuspend(A, U)) + if (Checker.isDefinitionAcrossSuspend(A, U) && !A.getType()->isPointerTy()) Spills.emplace_back(&A, U); for (Instruction &I : instructions(F)) { @@ -929,7 +943,8 @@ continue; for (User *U : I.users()) - if (Checker.isDefinitionAcrossSuspend(I, U)) { + if (Checker.isDefinitionAcrossSuspend(I, U) && + allUsersDominatedByCoroBegin(I, *Shape.CoroBegin, F)) { // We cannot spill a token. if (I.getType()->isTokenTy()) report_fatal_error( Index: test/Transforms/Coroutines/coro-split-mem2reg.ll =================================================================== --- /dev/null +++ test/Transforms/Coroutines/coro-split-mem2reg.ll @@ -0,0 +1,64 @@ +; Tests that coro-split can handle the case when a memory reference +; which crosses a suspend point is promoted to a register reference +; by mem2reg. +; RUN: opt < %s -mem2reg -coro-early -coro-split -coro-elide -S | FileCheck %s + +%Allocator = type { void (%Allocator*)* } + +declare i8* @customalloc(void (%Allocator*)* nonnull, %Allocator* nonnull, i32) + +define i8* @amain(%Allocator*, i32 %scalar, i16*) { +Entry: + %allocStore = alloca %Allocator*, align 8 + %id = call token @llvm.coro.id(i32 16, i8* null, i8* null, i8* null) + store %Allocator* %0, %Allocator** %allocStore, align 8 + %allocArg = getelementptr inbounds %Allocator, %Allocator* %0, i32 0, i32 0 + %allocPtr = load void (%Allocator*)*, void (%Allocator*)** %allocArg, align 8 + %allocSuccess = call fastcc i8* @customalloc(void (%Allocator*)* %allocPtr, %Allocator* %0, i32 %scalar) + %coroBegin = call i8* @llvm.coro.begin(token %id, i8* %allocSuccess) + br label %CoroSuspend + +CoroSuspend: ; preds = %Entry + %suspend = call i8 @llvm.coro.suspend(token none, i1 true) + switch i8 %suspend, label %Suspend [ + i8 0, label %InvalidResume + i8 1, label %CheckFree + ] + +Suspend: ; preds = %CheckFree, %CoroSuspend + %end = call i1 @llvm.coro.end(i8* null, i1 false) + ret i8* %coroBegin + +InvalidResume: ; preds = %CoroSuspend + unreachable + +CheckFree: ; preds = %CoroSuspend + %allocLoad = load %Allocator*, %Allocator** %allocStore, align 8 + %allocLoadPtr = getelementptr inbounds %Allocator, %Allocator* %allocLoad, i32 0, i32 0 + %allocLoadVoid = load void (%Allocator*)*, void (%Allocator*)** %allocLoadPtr, align 8 + %scalarStore = alloca i32 + store i32 %scalar, i32* %scalarStore + call i8* @llvm.coro.free(token %id, i8* %coroBegin) + br label %Suspend +} + +; CHECK: amain.Frame = type { void (%amain.Frame*)*, void (%amain.Frame*)*, i1, i1, i32 } + +; CHECK-LABEL: @amain( +; CHECK-NOT: %allocStore = +; CHECK-NOT: store %Allocator* %0, %Allocator** %allocStore + +; CHECK-LABEL: @amain.destroy( +; CHECK-NOT: %allocStore.reload.addr = +; CHECK: ret void + +; CHECK-LABEL: @amain.cleanup( +; CHECK-NOT: %allocStore.reload.addr = +; CHECK: ret void + +declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) +declare i64 @llvm.coro.size.i64() +declare i8* @llvm.coro.begin(token, i8* writeonly) +declare i8 @llvm.coro.suspend(token, i1) +declare i1 @llvm.coro.end(i8*, i1) +declare i8* @llvm.coro.free(token, i8* nocapture readonly)