Index: llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp +++ llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp @@ -489,6 +489,18 @@ } +/// Return true if value V represents the GC value. The behavior is conservative +/// in case it is not sure that value is not GC the function returns true. +static bool isGCValue(const Value *V, SelectionDAGBuilder &Builder) { + auto *Ty = V->getType(); + if (!Ty->isPtrOrPtrVectorTy()) + return false; + if (auto *GFI = Builder.GFI) + if (auto IsManaged = GFI->getStrategy().isGCManagedPointer(Ty)) + return *IsManaged; + return true; // conservative +} + /// Lower deopt state and gc pointer arguments of the statepoint. The actual /// lowering is described in lowerIncomingStatepointValue. This function is /// responsible for lowering everything in the right position and playing some @@ -607,18 +619,8 @@ LLVM_DEBUG(dbgs() << LowerAsVReg.size() << " pointers will go in vregs\n"); - auto isGCValue = [&](const Value *V) { - auto *Ty = V->getType(); - if (!Ty->isPtrOrPtrVectorTy()) - return false; - if (auto *GFI = Builder.GFI) - if (auto IsManaged = GFI->getStrategy().isGCManagedPointer(Ty)) - return *IsManaged; - return true; // conservative - }; - auto requireSpillSlot = [&](const Value *V) { - if (isGCValue(V)) + if (isGCValue(V, Builder)) return !LowerAsVReg.count(Builder.getValue(V)); return !(LiveInDeopt || UseRegistersForDeoptValues); }; @@ -727,8 +729,7 @@ NumOfStatepoints++; // Clear state StatepointLowering.startNewStatepoint(*this); - assert(SI.Bases.size() == SI.Ptrs.size() && - SI.Ptrs.size() <= SI.GCRelocates.size()); + assert(SI.Bases.size() == SI.Ptrs.size()); LLVM_DEBUG(dbgs() << "Lowering statepoint " << *SI.StatepointInstr << "\n"); #ifndef NDEBUG @@ -1042,6 +1043,21 @@ } } + // If we find a deopt value which isn't explicitly added, we need to + // ensure it gets lowered such that gc cycles occurring before the + // deoptimization event during the lifetime of the call don't invalidate + // the pointer we're deopting with. Note that we assume that all + // pointers passed to deopt are base pointers; relaxing that assumption + // would require relatively large changes to how we represent relocations. + for (Value *V : I.deopt_operands()) { + if (!isGCValue(V, *this)) + continue; + if (Seen.insert(getValue(V)).second) { + SI.Bases.push_back(V); + SI.Ptrs.push_back(V); + } + } + SI.GCArgs = ArrayRef(I.gc_args_begin(), I.gc_args_end()); SI.StatepointInstr = &I; SI.ID = I.getID(); Index: llvm/test/CodeGen/X86/statepoint-vreg-details.ll =================================================================== --- llvm/test/CodeGen/X86/statepoint-vreg-details.ll +++ llvm/test/CodeGen/X86/statepoint-vreg-details.ll @@ -135,16 +135,15 @@ ; CHECK-VREG-LABEL: name: test_deopt_gcpointer ; CHECK-VREG: %1:gr64 = COPY $rsi ; CHECK-VREG: %0:gr64 = COPY $rdi -; CHECK-VREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, %0 :: (store 8 into %stack.0) -; CHECK-VREG: %2:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 1, %1(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-VREG: %2:gr64, %3:gr64 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, %0, 2, 2, %1(tied-def 0), %0(tied-def 1), 2, 0, 2, 2, 0, 0, 1, 1, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: $rdi = COPY %2 ; CHECK-VREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp ; CHECK-VREG: RET 0 ; CHECK-PREG-LABEL: name: test_deopt_gcpointer ; CHECK-PREG: renamable $rbx = COPY $rsi -; CHECK-PREG: MOV64mr %stack.0, 1, $noreg, 0, $noreg, killed renamable $rdi :: (store 8 into %stack.0) -; CHECK-PREG: renamable $rbx = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 1, killed renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp :: (volatile load store 8 on %stack.0) +; CHECK-PREG: renamable $r14 = COPY $rdi +; CHECK-PREG: renamable $rbx, dead renamable $r14 = STATEPOINT 0, 0, 0, @func, 2, 0, 2, 0, 2, 1, killed renamable $r14, 2, 2, killed renamable $rbx(tied-def 0), renamable $r14(tied-def 1), 2, 0, 2, 2, 0, 0, 1, 1, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK-PREG: $rdi = COPY killed renamable $rbx ; CHECK-PREG: CALL64pcrel32 @consume, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit-def $rsp, implicit-def $ssp Index: llvm/test/CodeGen/X86/statepoint-vreg-invoke.ll =================================================================== --- llvm/test/CodeGen/X86/statepoint-vreg-invoke.ll +++ llvm/test/CodeGen/X86/statepoint-vreg-invoke.ll @@ -126,11 +126,13 @@ ; CHECK: STATEPOINT 1, 16, 5, undef renamable $rax, undef $edi, undef $rsi, undef $edx, undef $ecx, undef $r8d, 2, 0, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp, implicit-def dead $eax :: (volatile load store 8 on %stack.0) ; CHECK: JMP_1 %bb.1 ; CHECK: bb.1.normal_continue: +; CHECK: renamable $rbx = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: $edi = MOV32ri 10 -; CHECK: STATEPOINT 2882400000, 0, 1, target-flags(x86-plt) @__llvm_deoptimize, killed $edi, 2, 0, 2, 2, 2, 2, 1, 8, %stack.0, 0, 1, 8, %stack.0, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp +; CHECK: dead renamable $rbx = STATEPOINT 2882400000, 0, 1, target-flags(x86-plt) @__llvm_deoptimize, killed $edi, 2, 0, 2, 2, 2, 2, killed renamable $rbx, renamable $rbx, 2, 1, renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp ; CHECK: bb.2.exceptional_return (landing-pad): +; CHECK: renamable $rbx = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0) ; CHECK: $edi = MOV32ri -271 -; CHECK: STATEPOINT 2882400000, 0, 1, target-flags(x86-plt) @__llvm_deoptimize, killed $edi, 2, 0, 2, 0, 2, 1, 1, 8, %stack.0, 0, 2, 0, 2, 0, 2, 0, csr_64, implicit-def $rsp, implicit-def $ssp +; CHECK: dead renamable $rbx = STATEPOINT 2882400000, 0, 1, target-flags(x86-plt) @__llvm_deoptimize, killed $edi, 2, 0, 2, 0, 2, 1, killed renamable $rbx, 2, 1, renamable $rbx(tied-def 0), 2, 0, 2, 1, 0, 0, csr_64, implicit-def $rsp, implicit-def $ssp entry: %val1 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8 %val2 = load i8 addrspace(1)*, i8 addrspace(1)* addrspace(1)* undef, align 8 Index: llvm/test/CodeGen/X86/statepoint-vreg.ll =================================================================== --- llvm/test/CodeGen/X86/statepoint-vreg.ll +++ llvm/test/CodeGen/X86/statepoint-vreg.ll @@ -159,24 +159,29 @@ ret void } -; deopt GC pointer not present in GC args must be spilled +; deopt GC pointer not present in GC args goes on reg. define void @test_deopt_gcpointer(i32 addrspace(1)* %a, i32 addrspace(1)* %b) gc "statepoint-example" { ; CHECK-LABEL: test_deopt_gcpointer: ; CHECK: # %bb.0: -; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: pushq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: subq $16, %rsp +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 24 +; CHECK-NEXT: pushq %rax ; CHECK-NEXT: .cfi_def_cfa_offset 32 -; CHECK-NEXT: .cfi_offset %rbx, -16 +; CHECK-NEXT: .cfi_offset %rbx, -24 +; CHECK-NEXT: .cfi_offset %r14, -16 ; CHECK-NEXT: movq %rsi, %rbx -; CHECK-NEXT: movq %rdi, {{[0-9]+}}(%rsp) +; CHECK-NEXT: movq %rdi, %r14 ; CHECK-NEXT: callq func ; CHECK-NEXT: .Ltmp4: ; CHECK-NEXT: movq %rbx, %rdi ; CHECK-NEXT: callq consume -; CHECK-NEXT: addq $16, %rsp -; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: addq $8, %rsp +; CHECK-NEXT: .cfi_def_cfa_offset 24 ; CHECK-NEXT: popq %rbx +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: popq %r14 ; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq %safepoint_token = tail call token (i64, i32, void ()*, i32, i32, ...) @llvm.experimental.gc.statepoint.p0f_isVoidf(i64 0, i32 0, void ()* @func, i32 0, i32 0, i32 0, i32 0) ["deopt" (i32 addrspace(1)* %a), "gc-live" (i32 addrspace(1)* %b)]