diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -64,7 +64,7 @@ } // namespace LargeBlock -static void unmap(LargeBlock::Header *H) { +static inline void unmap(LargeBlock::Header *H) { // Note that the `H->MapMap` is stored on the pages managed by itself. Take // over the ownership before unmap() so that any operation along with unmap() // won't touch inaccessible pages. @@ -72,7 +72,7 @@ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); } -class MapAllocatorNoCache { +template class MapAllocatorNoCache { public: void init(UNUSED s32 ReleaseToOsInterval) {} bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment, @@ -93,6 +93,10 @@ // Not supported by the Secondary Cache, but not an error either. return true; } + + void getStats(UNUSED ScopedString *Str) { + Str->append("Secondary Cache Disabled\n"); + } }; static const uptr MaxUnusedCachePages = 4U; @@ -130,17 +134,32 @@ template class MapAllocatorCache { public: + using CacheConfig = typename Config::Secondary::Cache; + + void getStats(ScopedString *Str) { + ScopedLock L(Mutex); + Str->append( + "Stats: MapAllocatorCache: EntriesCount: %zu, " + "MaxEntriesCount: %zu, MaxEntrySize: %zu\n", + (uptr)EntriesCount, (uptr)atomic_load_relaxed(&MaxEntriesCount), + (uptr)atomic_load_relaxed(&MaxEntrySize)); + for (CachedBlock Entry : Entries) { + Str->append("BlockAddress: 0x%x, BlockSize: %zu\n", + (uint32_t)Entry.CommitBase, Entry.CommitSize); + } + } + // Ensure the default maximum specified fits the array. - static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <= - Config::SecondaryCacheEntriesArraySize, + static_assert(CacheConfig::DefaultMaxEntriesCount <= + CacheConfig::EntriesArraySize, ""); void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { DCHECK_EQ(EntriesCount, 0U); setOption(Option::MaxCacheEntriesCount, - static_cast(Config::SecondaryCacheDefaultMaxEntriesCount)); + static_cast(CacheConfig::DefaultMaxEntriesCount)); setOption(Option::MaxCacheEntrySize, - static_cast(Config::SecondaryCacheDefaultMaxEntrySize)); + static_cast(CacheConfig::DefaultMaxEntrySize)); setOption(Option::ReleaseInterval, static_cast(ReleaseToOsInterval)); } @@ -185,10 +204,9 @@ // just unmap it. break; } - if (Config::SecondaryCacheQuarantineSize && - useMemoryTagging(Options)) { + if (CacheConfig::QuarantineSize && useMemoryTagging(Options)) { QuarantinePos = - (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u); + (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u); if (!Quarantine[QuarantinePos].CommitBase) { Quarantine[QuarantinePos] = Entry; return; @@ -291,16 +309,15 @@ bool setOption(Option O, sptr Value) { if (O == Option::ReleaseInterval) { - const s32 Interval = - Max(Min(static_cast(Value), - Config::SecondaryCacheMaxReleaseToOsIntervalMs), - Config::SecondaryCacheMinReleaseToOsIntervalMs); + const s32 Interval = Max( + Min(static_cast(Value), CacheConfig::MaxReleaseToOsIntervalMs), + CacheConfig::MinReleaseToOsIntervalMs); atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval); return true; } if (O == Option::MaxCacheEntriesCount) { const u32 MaxCount = static_cast(Value); - if (MaxCount > Config::SecondaryCacheEntriesArraySize) + if (MaxCount > CacheConfig::EntriesArraySize) return false; atomic_store_relaxed(&MaxEntriesCount, MaxCount); return true; @@ -317,7 +334,7 @@ void disableMemoryTagging() EXCLUDES(Mutex) { ScopedLock L(Mutex); - for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) { + for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) { if (Quarantine[I].CommitBase) { MemMapT &MemMap = Quarantine[I].MemMap; MemMap.unmap(MemMap.getBase(), MemMap.getCapacity()); @@ -342,11 +359,11 @@ private: void empty() { - MemMapT MapInfo[Config::SecondaryCacheEntriesArraySize]; + MemMapT MapInfo[CacheConfig::EntriesArraySize]; uptr N = 0; { ScopedLock L(Mutex); - for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) { + for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) { if (!Entries[I].CommitBase) continue; MapInfo[N] = Entries[I].MemMap; @@ -387,9 +404,9 @@ if (!EntriesCount || OldestTime == 0 || OldestTime > Time) return; OldestTime = 0; - for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++) + for (uptr I = 0; I < CacheConfig::QuarantineSize; I++) releaseIfOlderThan(Quarantine[I], Time); - for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) + for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) releaseIfOlderThan(Entries[I], Time); } @@ -402,9 +419,8 @@ u32 IsFullEvents GUARDED_BY(Mutex) = 0; atomic_s32 ReleaseToOsIntervalMs = {}; - CachedBlock - Entries[Config::SecondaryCacheEntriesArraySize] GUARDED_BY(Mutex) = {}; - NonZeroLengthArray + CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {}; + NonZeroLengthArray Quarantine GUARDED_BY(Mutex) = {}; }; @@ -435,8 +451,6 @@ return getBlockEnd(Ptr) - reinterpret_cast(Ptr); } - void getStats(ScopedString *Str); - void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); Cache.disable(); @@ -468,8 +482,10 @@ void unmapTestOnly() { Cache.unmapTestOnly(); } -private: - typename Config::SecondaryCache Cache; + void getStats(ScopedString *Str); + + private: + typename Config::Secondary::template CacheT Cache; mutable HybridMutex Mutex; DoublyLinkedList InUseBlocks GUARDED_BY(Mutex); @@ -623,6 +639,7 @@ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10, NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20); + Cache.getStats(Str); } } // namespace scudo