diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h @@ -196,6 +196,10 @@ // A mutex to protect the guarded slot and metadata pool for this class. Mutex PoolMutex; + // Some unwinders can grab the libdl lock. In order to provide atfork + // protection, we need to ensure that we allow an unwinding thread to release + // the libdl lock before forking. + Mutex BacktraceMutex; // Record the number allocations that we've sampled. We store this amount so // that we don't randomly choose to recycle a slot that previously had an // allocation before all the slots have been utilised. diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp @@ -103,9 +103,15 @@ installAtFork(); } -void GuardedPoolAllocator::disable() { PoolMutex.lock(); } +void GuardedPoolAllocator::disable() { + PoolMutex.lock(); + BacktraceMutex.lock(); +} -void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } +void GuardedPoolAllocator::enable() { + PoolMutex.unlock(); + BacktraceMutex.unlock(); +} void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg) { @@ -232,7 +238,10 @@ roundUpTo(Size, PageSize)); Meta->RecordAllocation(UserPtr, Size); - Meta->AllocationTrace.RecordBacktrace(Backtrace); + { + ScopedLock UL(BacktraceMutex); + Meta->AllocationTrace.RecordBacktrace(Backtrace); + } return reinterpret_cast(UserPtr); } @@ -281,6 +290,7 @@ // otherwise non-reentrant unwinders may deadlock. if (!getThreadLocals()->RecursiveGuard) { ScopedRecursiveGuard SRG; + ScopedLock UL(BacktraceMutex); Meta->DeallocationTrace.RecordBacktrace(Backtrace); } }