diff --git a/compiler-rt/lib/asan/asan_stats.cpp b/compiler-rt/lib/asan/asan_stats.cpp --- a/compiler-rt/lib/asan/asan_stats.cpp +++ b/compiler-rt/lib/asan/asan_stats.cpp @@ -124,9 +124,9 @@ // Use lock to keep reports from mixing up. Lock lock(&print_lock); stats.Print(); - StackDepotStats *stack_depot_stats = StackDepotGetStats(); + StackDepotStats stack_depot_stats = StackDepotGetStats(); Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", - stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); + stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20); PrintInternalAllocatorStats(); } diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp --- a/compiler-rt/lib/hwasan/hwasan.cpp +++ b/compiler-rt/lib/hwasan/hwasan.cpp @@ -141,7 +141,7 @@ static void HwasanFormatMemoryUsage(InternalScopedString &s) { HwasanThreadList &thread_list = hwasanThreadList(); auto thread_stats = thread_list.GetThreadStats(); - auto *sds = StackDepotGetStats(); + auto sds = StackDepotGetStats(); AllocatorStatCounters asc; GetAllocatorStats(asc); s.append( @@ -151,7 +151,7 @@ internal_getpid(), GetRSS(), thread_stats.n_live_threads, thread_stats.total_stack_size, thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), - sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]); + sds.allocated, sds.n_uniq_ids, asc[AllocatorStatMapped]); } #if SANITIZER_ANDROID diff --git a/compiler-rt/lib/memprof/memprof_stats.cpp b/compiler-rt/lib/memprof/memprof_stats.cpp --- a/compiler-rt/lib/memprof/memprof_stats.cpp +++ b/compiler-rt/lib/memprof/memprof_stats.cpp @@ -115,9 +115,9 @@ // Use lock to keep reports from mixing up. Lock lock(&print_lock); stats.Print(); - StackDepotStats *stack_depot_stats = StackDepotGetStats(); + StackDepotStats stack_depot_stats = StackDepotGetStats(); Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", - stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); + stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20); PrintInternalAllocatorStats(); } diff --git a/compiler-rt/lib/msan/msan_chained_origin_depot.h b/compiler-rt/lib/msan/msan_chained_origin_depot.h --- a/compiler-rt/lib/msan/msan_chained_origin_depot.h +++ b/compiler-rt/lib/msan/msan_chained_origin_depot.h @@ -19,7 +19,7 @@ namespace __msan { // Gets the statistic of the origin chain storage. -StackDepotStats *ChainedOriginDepotGetStats(); +StackDepotStats ChainedOriginDepotGetStats(); // Stores a chain with StackDepot ID here_id and previous chain ID prev_id. // If successful, returns true and the new chain id new_id. diff --git a/compiler-rt/lib/msan/msan_report.cpp b/compiler-rt/lib/msan/msan_report.cpp --- a/compiler-rt/lib/msan/msan_report.cpp +++ b/compiler-rt/lib/msan/msan_report.cpp @@ -122,17 +122,17 @@ ScopedErrorReportLock l; if (__msan_get_track_origins() > 0) { - StackDepotStats *stack_depot_stats = StackDepotGetStats(); + StackDepotStats stack_depot_stats = StackDepotGetStats(); // FIXME: we want this at normal exit, too! // FIXME: but only with verbosity=1 or something - Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids); - Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated); + Printf("Unique heap origins: %zu\n", stack_depot_stats.n_uniq_ids); + Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats.allocated); - StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats(); + StackDepotStats chained_origin_depot_stats = ChainedOriginDepotGetStats(); Printf("Unique origin histories: %zu\n", - chained_origin_depot_stats->n_uniq_ids); + chained_origin_depot_stats.n_uniq_ids); Printf("History depot allocated bytes: %zu\n", - chained_origin_depot_stats->allocated); + chained_origin_depot_stats.allocated); } } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp @@ -48,16 +48,12 @@ prev_reported_rss = current_rss_mb; } // If stack depot has grown 10% since last time, print it too. - StackDepotStats *stack_depot_stats = StackDepotGetStats(); - if (stack_depot_stats) { - if (prev_reported_stack_depot_size * 11 / 10 < - stack_depot_stats->allocated) { - Printf("%s: StackDepot: %zd ids; %zdM allocated\n", - SanitizerToolName, - stack_depot_stats->n_uniq_ids, - stack_depot_stats->allocated >> 20); - prev_reported_stack_depot_size = stack_depot_stats->allocated; - } + StackDepotStats stack_depot_stats = StackDepotGetStats(); + if (prev_reported_stack_depot_size * 11 / 10 < + stack_depot_stats.allocated) { + Printf("%s: StackDepot: %zd ids; %zdM allocated\n", SanitizerToolName, + stack_depot_stats.n_uniq_ids, stack_depot_stats.allocated >> 20); + prev_reported_stack_depot_size = stack_depot_stats.allocated; } } // Check RSS against the limit. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.h @@ -33,7 +33,7 @@ const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20); -StackDepotStats *StackDepotGetStats(); +StackDepotStats StackDepotGetStats(); u32 StackDepotPut(StackTrace stack); StackDepotHandle StackDepotPut_WithHandle(StackTrace stack); // Retrieves a stored stack trace by the id. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -90,9 +90,7 @@ StackDepot; static StackDepot theDepot; -StackDepotStats *StackDepotGetStats() { - return theDepot.GetStats(); -} +StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } u32 StackDepotPut(StackTrace stack) { StackDepotHandle h = theDepot.Put(stack); @@ -128,7 +126,7 @@ } StackDepotReverseMap::StackDepotReverseMap() { - map_.reserve(StackDepotGetStats()->n_uniq_ids + 100); + map_.reserve(StackDepotGetStats().n_uniq_ids + 100); for (int idx = 0; idx < StackDepot::kTabSize; idx++) { atomic_uintptr_t *p = &theDepot.tab[idx]; uptr v = atomic_load(p, memory_order_consume); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h @@ -16,9 +16,11 @@ #include #include "sanitizer_atomic.h" +#include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" #include "sanitizer_persistent_allocator.h" +#include "sanitizer_stackdepot.h" namespace __sanitizer { @@ -32,7 +34,12 @@ // Retrieves a stored stack trace by the id. args_type Get(u32 id); - StackDepotStats *GetStats() { return &stats; } + StackDepotStats GetStats() const { + return { + atomic_load_relaxed(&n_uniq_ids), + atomic_load_relaxed(&allocated), + }; + } void LockAll(); void UnlockAll(); @@ -54,7 +61,8 @@ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. atomic_uint32_t seq[kPartCount]; // Unique id generators. - StackDepotStats stats; + atomic_uintptr_t n_uniq_ids; + atomic_uintptr_t allocated; friend class StackDepotReverseMap; }; @@ -119,14 +127,13 @@ } uptr part = (h % kTabSize) / kPartSize; u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1; - stats.n_uniq_ids++; - CHECK_LT(id, kMaxId); + atomic_fetch_add(&n_uniq_ids, 1, memory_order_relaxed) CHECK_LT(id, kMaxId); id |= part << kPartShift; CHECK_NE(id, 0); CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); uptr memsz = Node::storage_size(args); s = (Node *)PersistentAlloc(memsz); - stats.allocated += memsz; + atomic_fetch_add(&allocated, memsz, memory_order_relaxed); s->id = id; s->store(args, h); s->link = s2; diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cpp @@ -124,13 +124,13 @@ internal_memset(mem, 0, sizeof(mem)); GetMemoryProfile(FillProfileCallback, mem, MemCount); auto meta = ctx->metamap.GetMemoryStats(); - StackDepotStats *stacks = StackDepotGetStats(); + StackDepotStats stacks = StackDepotGetStats(); uptr nthread, nlive; ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); uptr internal_stats[AllocatorStatCount]; internal_allocator()->GetStats(internal_stats); // All these are allocated from the common mmap region. - mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks->allocated + + mem[MemMmap] -= meta.mem_block + meta.sync_obj + stacks.allocated + internal_stats[AllocatorStatMapped]; if (s64(mem[MemMmap]) < 0) mem[MemMmap] = 0; @@ -143,8 +143,8 @@ mem[MemShadow] >> 20, mem[MemMeta] >> 20, mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, mem[MemHeap] >> 20, mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20, - meta.mem_block >> 20, meta.sync_obj >> 20, stacks->allocated >> 20, - stacks->n_uniq_ids, nlive, nthread); + meta.mem_block >> 20, meta.sync_obj >> 20, stacks.allocated >> 20, + stacks.n_uniq_ids, nlive, nthread); } # if SANITIZER_LINUX diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_platform_mac.cpp @@ -159,35 +159,35 @@ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &app_res, &app_dirty); #endif - StackDepotStats *stacks = StackDepotGetStats(); + StackDepotStats stacks = StackDepotGetStats(); uptr nthread, nlive; ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); - internal_snprintf(buf, buf_size, - "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" - "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" - "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" -#if !SANITIZER_GO - "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" - "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" - "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" -#else // !SANITIZER_GO + internal_snprintf( + buf, buf_size, + "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" + "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" + "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" +# if !SANITIZER_GO + "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" + "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" + "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" +# else // !SANITIZER_GO "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n" -#endif - "stacks: %zd unique IDs, %zd kB allocated\n" - "threads: %zd total, %zd live\n" - "------------------------------\n", - ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, - MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, - TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, -#if !SANITIZER_GO - LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, - HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, - HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024, -#else // !SANITIZER_GO +# endif + "stacks: %zd unique IDs, %zd kB allocated\n" + "threads: %zd total, %zd live\n" + "------------------------------\n", + ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024, + MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024, + TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024, +# if !SANITIZER_GO + LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024, + HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024, + HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024, +# else // !SANITIZER_GO LoAppMemBeg(), LoAppMemEnd(), app_res / 1024, app_dirty / 1024, -#endif - stacks->n_uniq_ids, stacks->allocated / 1024, - nthread, nlive); +# endif + stacks.n_uniq_ids, stacks.allocated / 1024, nthread, nlive); } # if !SANITIZER_GO