diff --git a/compiler-rt/lib/asan/asan_thread.cpp b/compiler-rt/lib/asan/asan_thread.cpp --- a/compiler-rt/lib/asan/asan_thread.cpp +++ b/compiler-rt/lib/asan/asan_thread.cpp @@ -507,15 +507,30 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) {} -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) { +void GetThreadExtraStackRangesLocked(tid_t os_id, + InternalMmapVector *ranges) { __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id); if (!t) return; __asan::FakeStack *fake_stack = t->get_fake_stack(); if (!fake_stack) return; - fake_stack->ForEachFakeFrame(callback, arg); + + fake_stack->ForEachFakeFrame( + [](uptr begin, uptr end, void *arg) { + reinterpret_cast *>(arg)->push_back( + {begin : begin, end : end}); + }, + ranges); +} + +void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges) { + GetAsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( + [](ThreadContextBase *tctx, void *arg) { + GetThreadExtraStackRangesLocked( + tctx->os_id, reinterpret_cast *>(arg)); + }, + ranges); } void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs) { diff --git a/compiler-rt/lib/hwasan/hwasan_thread.cpp b/compiler-rt/lib/hwasan/hwasan_thread.cpp --- a/compiler-rt/lib/hwasan/hwasan_thread.cpp +++ b/compiler-rt/lib/hwasan/hwasan_thread.cpp @@ -194,8 +194,9 @@ void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches) {} -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) {} +void GetThreadExtraStackRangesLocked(tid_t os_id, + InternalMmapVector *ranges) {} +void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges) {} void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs) {} void GetRunningThreadsLocked(InternalMmapVector *threads) {} diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -79,6 +79,11 @@ kIgnoreObjectInvalid }; +struct Range { + uptr begin; + uptr end; +}; + //// -------------------------------------------------------------------------- //// Poisoning prototypes. //// -------------------------------------------------------------------------- @@ -105,8 +110,9 @@ uptr *tls_begin, uptr *tls_end, uptr *cache_begin, uptr *cache_end, DTLS **dtls); void GetAllThreadAllocatorCachesLocked(InternalMmapVector *caches); -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg); +void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges); +void GetThreadExtraStackRangesLocked(tid_t os_id, + InternalMmapVector *ranges); void GetAdditionalThreadContextPtrsLocked(InternalMmapVector *ptrs); void GetRunningThreadsLocked(InternalMmapVector *threads); @@ -250,7 +256,6 @@ InternalMmapVectorNoCtor const *GetRootRegions(); void ScanRootRegion(Frontier *frontier, RootRegion const ®ion, uptr region_begin, uptr region_end, bool is_readable); -void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg); // Run stoptheworld while holding any platform-specific locks, as well as the // allocator and thread registry locks. void LockStuffAndStopTheWorld(StopTheWorldCallback callback, @@ -260,6 +265,7 @@ Frontier *frontier, const char *region_type, ChunkTag tag); void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier); +void ScanExtraStackRanges(tid_t os_id, Frontier *frontier); // Functions called from the parent tool. const char *MaybeCallLsanDefaultOptions(); diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp --- a/compiler-rt/lib/lsan/lsan_common.cpp +++ b/compiler-rt/lib/lsan/lsan_common.cpp @@ -353,9 +353,12 @@ } } -void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) { - Frontier *frontier = reinterpret_cast(arg); - ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); +void ScanExtraStackRanges(const InternalMmapVector &ranges, + Frontier *frontier) { + for (uptr i = 0; i < ranges.size(); i++) { + ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK", + kReachable); + } } # if SANITIZER_FUCHSIA @@ -397,6 +400,7 @@ Frontier *frontier, tid_t caller_tid, uptr caller_sp) { InternalMmapVector registers; + InternalMmapVector extra_ranges; for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { tid_t os_id = static_cast(suspended_threads.GetThreadID(i)); LOG_THREADS("Processing thread %llu.\n", os_id); @@ -457,7 +461,9 @@ } ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", kReachable); - ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); + extra_ranges.clear(); + GetThreadExtraStackRangesLocked(os_id, &extra_ranges); + ScanExtraStackRanges(extra_ranges, frontier); } if (flags()->use_tls) { diff --git a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp --- a/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp +++ b/compiler-rt/lib/lsan/lsan_common_fuchsia.cpp @@ -144,17 +144,13 @@ // We don't use the thread registry at all for enumerating the threads // and their stacks, registers, and TLS regions. So use it separately - // just for the allocator cache, and to call ForEachExtraStackRange, + // just for the allocator cache, and to call ScanExtraStackRanges, // which ASan needs. if (flags()->use_stacks) { - GetLsanThreadRegistryLocked()->RunCallbackForEachThreadLocked( - [](ThreadContextBase *tctx, void *arg) { - ForEachExtraStackRange(tctx->os_id, ForEachExtraStackRangeCb, - arg); - }, - ¶ms->argument->frontier); + InternalMmapVector ranges; + GetThreadExtraStackRangesLocked(&ranges); + ScanExtraStackRanges(ranges, ¶ms->argument->frontier); } - params->callback(SuspendedThreadsListFuchsia(), params->argument); }, ¶ms); diff --git a/compiler-rt/lib/lsan/lsan_thread.cpp b/compiler-rt/lib/lsan/lsan_thread.cpp --- a/compiler-rt/lib/lsan/lsan_thread.cpp +++ b/compiler-rt/lib/lsan/lsan_thread.cpp @@ -75,8 +75,9 @@ ///// Interface to the common LSan module. ///// -void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback, - void *arg) {} +void GetThreadExtraStackRangesLocked(tid_t os_id, + InternalMmapVector *ranges) {} +void GetThreadExtraStackRangesLocked(InternalMmapVector *ranges) {} void LockThreadRegistry() { thread_registry->Lock(); }