Index: include/sanitizer/allocator_interface.h =================================================================== --- include/sanitizer/allocator_interface.h +++ include/sanitizer/allocator_interface.h @@ -76,6 +76,13 @@ void (*malloc_hook)(const volatile void *, size_t), void (*free_hook)(const volatile void *)); + /* Drains allocator quarantines (calling thread's and global ones), returns + freed memory back to OS and releases other non-essential internal allocator + resources in attempt to reduce process RSS. + Currently available with ASan only. + */ + void __sanitizer_purge_allocator(); + #ifdef __cplusplus } // extern "C" #endif Index: lib/asan/asan_allocator.cc =================================================================== --- lib/asan/asan_allocator.cc +++ lib/asan/asan_allocator.cc @@ -716,6 +716,22 @@ return AsanChunkView(m1); } + void Purge() { + AsanThread *t = GetCurrentThread(); + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + quarantine.DrainAndRecycle(GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms))); + } + { + SpinMutexLock l(&fallback_mutex); + quarantine.DrainAndRecycle(&fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache)); + } + + allocator.ForceReleaseToOS(); + } + void PrintStats() { allocator.PrintStats(); quarantine.PrintStats(); @@ -1011,6 +1027,10 @@ return allocated_size; } +void __sanitizer_purge_allocator() { + instance.Purge(); +} + #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default (no-op) implementation of malloc hooks. SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, Index: lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_combined.h +++ lib/sanitizer_common/sanitizer_allocator_combined.h @@ -77,6 +77,10 @@ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); } + void ForceReleaseToOS() { + primary_.ForceReleaseToOS(); + } + void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) Index: lib/sanitizer_common/sanitizer_allocator_interface.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_interface.h +++ lib/sanitizer_common/sanitizer_allocator_interface.h @@ -38,6 +38,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_free_hook(void *ptr); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_purge_allocator(); + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts); } // extern "C" Index: lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary32.h +++ lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -120,6 +120,10 @@ // This is empty here. Currently only implemented in 64-bit allocator. } + void ForceReleaseToOS() { + // Currently implemented in 64-bit allocator only. + } + void *MapWithCallback(uptr size) { void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); Index: lib/sanitizer_common/sanitizer_allocator_primary64.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary64.h +++ lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -92,6 +92,13 @@ memory_order_relaxed); } + void ForceReleaseToOS() { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) { + BlockingMutexLock l(&GetRegionInfo(class_id)->mutex); + MaybeReleaseToOS(class_id, true /*force*/); + } + } + static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; @@ -116,7 +123,7 @@ region->num_freed_chunks = new_num_freed_chunks; region->stats.n_freed += n_chunks; - MaybeReleaseToOS(class_id); + MaybeReleaseToOS(class_id, false /*force*/); } NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -786,7 +793,7 @@ // Attempts to release RAM occupied by freed chunks back to OS. The region is // expected to be locked. - void MaybeReleaseToOS(uptr class_id) { + void MaybeReleaseToOS(uptr class_id, bool force) { RegionInfo *region = GetRegionInfo(class_id); const uptr chunk_size = ClassIdToSize(class_id); const uptr page_size = GetPageSizeCached(); @@ -799,12 +806,16 @@ return; // Nothing new to release. } - s32 interval_ms = ReleaseToOSIntervalMs(); - if (interval_ms < 0) - return; + if (!force) { + s32 interval_ms = ReleaseToOSIntervalMs(); + if (interval_ms < 0) + return; - if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime()) - return; // Memory was returned recently. + if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > + NanoTime()) { + return; // Memory was returned recently. + } + } MemoryMapper memory_mapper(*this, class_id); Index: lib/sanitizer_common/sanitizer_common_interface.inc =================================================================== --- lib/sanitizer_common/sanitizer_common_interface.inc +++ lib/sanitizer_common/sanitizer_common_interface.inc @@ -34,6 +34,7 @@ INTERFACE_FUNCTION(__sanitizer_get_ownership) INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes) INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks) +INTERFACE_FUNCTION(__sanitizer_purge_allocator) INTERFACE_FUNCTION(__sanitizer_print_memory_profile) INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook) INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook) Index: lib/sanitizer_common/sanitizer_quarantine.h =================================================================== --- lib/sanitizer_common/sanitizer_quarantine.h +++ lib/sanitizer_common/sanitizer_quarantine.h @@ -87,15 +87,14 @@ // is zero (it allows us to perform just one atomic read per Put() call). CHECK((size == 0 && cache_size == 0) || cache_size != 0); - atomic_store(&max_size_, size, memory_order_relaxed); - atomic_store(&min_size_, size / 10 * 9, - memory_order_relaxed); // 90% of max size. - atomic_store(&max_cache_size_, cache_size, memory_order_relaxed); + atomic_store_relaxed(&max_size_, size); + atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size. + atomic_store_relaxed(&max_cache_size_, cache_size); } - uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); } + uptr GetSize() const { return atomic_load_relaxed(&max_size_); } uptr GetCacheSize() const { - return atomic_load(&max_cache_size_, memory_order_relaxed); + return atomic_load_relaxed(&max_cache_size_); } void Put(Cache *c, Callback cb, Node *ptr, uptr size) { @@ -117,7 +116,16 @@ cache_.Transfer(c); } if (cache_.Size() > GetSize() && recycle_mutex_.TryLock()) - Recycle(cb); + Recycle(atomic_load_relaxed(&min_size_), cb); + } + + void NOINLINE DrainAndRecycle(Cache *c, Callback cb) { + { + SpinMutexLock l(&cache_mutex_); + cache_.Transfer(c); + } + recycle_mutex_.Lock(); + Recycle(0, cb); } void PrintStats() const { @@ -139,9 +147,8 @@ Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(Callback cb) { + void NOINLINE Recycle(uptr min_size, Callback cb) { Cache tmp; - uptr min_size = atomic_load(&min_size_, memory_order_relaxed); { SpinMutexLock l(&cache_mutex_); // Go over the batches and merge partially filled ones to @@ -201,7 +208,7 @@ // Total memory used, including internal accounting. uptr Size() const { - return atomic_load(&size_, memory_order_relaxed); + return atomic_load_relaxed(&size_); } // Memory used for internal accounting. @@ -225,7 +232,7 @@ list_.append_back(&from_cache->list_); SizeAdd(from_cache->Size()); - atomic_store(&from_cache->size_, 0, memory_order_relaxed); + atomic_store_relaxed(&from_cache->size_, 0); } void EnqueueBatch(QuarantineBatch *b) { @@ -296,10 +303,10 @@ atomic_uintptr_t size_; void SizeAdd(uptr add) { - atomic_store(&size_, Size() + add, memory_order_relaxed); + atomic_store_relaxed(&size_, Size() + add); } void SizeSub(uptr sub) { - atomic_store(&size_, Size() - sub, memory_order_relaxed); + atomic_store_relaxed(&size_, Size() - sub); } }; Index: test/asan/TestCases/Linux/release_to_os_test.cc =================================================================== --- test/asan/TestCases/Linux/release_to_os_test.cc +++ test/asan/TestCases/Linux/release_to_os_test.cc @@ -1,18 +1,21 @@ // Tests ASAN_OPTIONS=allocator_release_to_os=1 -// // RUN: %clangxx_asan -std=c++11 %s -o %t // RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE // RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE -// +// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t force 2>&1 | FileCheck %s --check-prefix=FORCE_RELEASE + // REQUIRES: x86_64-target-arch -#include -#include + #include -#include #include #include +#include +#include +#include +#include +#include #include void MallocReleaseStress() { @@ -39,10 +42,13 @@ delete[] p; } -int main() { +int main(int argc, char **argv) { MallocReleaseStress(); + if (argc > 1 && !strcmp("force", argv[1])) + __sanitizer_purge_allocator(); __asan_print_accumulated_stats(); } // RELEASE: mapped:{{.*}}releases: {{[1-9]}} // NO_RELEASE: mapped:{{.*}}releases: 0 +// FORCE_RELEASE: mapped:{{.*}}releases: {{[1-9]}}