Index: include/sanitizer/allocator_interface.h =================================================================== --- include/sanitizer/allocator_interface.h +++ include/sanitizer/allocator_interface.h @@ -76,6 +76,13 @@ void (*malloc_hook)(const volatile void *, size_t), void (*free_hook)(const volatile void *)); + /* Drains allocator quarantines (calling thread's and global ones), returns + freed memory back to OS and releases other non-essential internal allocator + resources in attempt to reduce process RSS. + Currently available with ASan only. + */ + void __sanitizer_purge_allocator(); + #ifdef __cplusplus } // extern "C" #endif Index: lib/asan/asan_allocator.cc =================================================================== --- lib/asan/asan_allocator.cc +++ lib/asan/asan_allocator.cc @@ -716,6 +716,22 @@ return AsanChunkView(m1); } + void Purge() { + AsanThread *t = GetCurrentThread(); + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + quarantine.DrainAndRecycle(GetQuarantineCache(ms), + QuarantineCallback(GetAllocatorCache(ms))); + } + { + SpinMutexLock l(&fallback_mutex); + quarantine.DrainAndRecycle(&fallback_quarantine_cache, + QuarantineCallback(&fallback_allocator_cache)); + } + + allocator.ForceReleaseToOS(); + } + void PrintStats() { allocator.PrintStats(); quarantine.PrintStats(); @@ -1011,6 +1027,10 @@ return allocated_size; } +void __sanitizer_purge_allocator() { + instance.Purge(); +} + #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default (no-op) implementation of malloc hooks. SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, Index: lib/fuzzer/FuzzerDriver.cpp =================================================================== --- lib/fuzzer/FuzzerDriver.cpp +++ lib/fuzzer/FuzzerDriver.cpp @@ -578,6 +578,7 @@ Options.ReloadIntervalSec = Flags.reload; Options.OnlyASCII = Flags.only_ascii; Options.DetectLeaks = Flags.detect_leaks; + Options.PurgeAllocatorIntervalSec = Flags.purge_allocator_interval; Options.TraceMalloc = Flags.trace_malloc; Options.RssLimitMb = Flags.rss_limit_mb; if (Flags.runs >= 0) Index: lib/fuzzer/FuzzerExtFunctions.def =================================================================== --- lib/fuzzer/FuzzerExtFunctions.def +++ lib/fuzzer/FuzzerExtFunctions.def @@ -33,6 +33,7 @@ (void (*malloc_hook)(const volatile void *, size_t), void (*free_hook)(const volatile void *)), false); +EXT_FUNC(__sanitizer_purge_allocator, void, (), false); EXT_FUNC(__sanitizer_print_memory_profile, int, (size_t, size_t), false); EXT_FUNC(__sanitizer_print_stack_trace, void, (), true); EXT_FUNC(__sanitizer_symbolize_pc, void, Index: lib/fuzzer/FuzzerFlags.def =================================================================== --- lib/fuzzer/FuzzerFlags.def +++ lib/fuzzer/FuzzerFlags.def @@ -114,6 +114,10 @@ "Be careful, this will also close e.g. asan's stderr/stdout.") FUZZER_FLAG_INT(detect_leaks, 1, "If 1, and if LeakSanitizer is enabled " "try to detect memory leaks during fuzzing (i.e. not only at shut down).") +FUZZER_FLAG_INT(purge_allocator_interval, 1, "Purge allocator caches and " + "quarantines every seconds. When rss_limit_mb is specified (>0), " + "purging starts when RSS exceeds 50% of rss_limit_mb. Pass " + "purge_allocator_interval=-1 to disable this functionality.") FUZZER_FLAG_INT(trace_malloc, 0, "If >= 1 will print all mallocs/frees. " "If >= 2 will also print stack traces.") FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon" Index: lib/fuzzer/FuzzerInternal.h =================================================================== --- lib/fuzzer/FuzzerInternal.h +++ lib/fuzzer/FuzzerInternal.h @@ -96,6 +96,7 @@ void CrashOnOverwrittenData(); void InterruptCallback(); void MutateAndTestOne(); + void PurgeAllocator(); void ReportNewCoverage(InputInfo *II, const Unit &U); void PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size); void WriteToOutputCorpus(const Unit &U); @@ -124,6 +125,8 @@ bool HasMoreMallocsThanFrees = false; size_t NumberOfLeakDetectionAttempts = 0; + system_clock::time_point LastAllocatorPurgeAttemptTime = system_clock::now(); + UserCallback CB; InputCorpus &Corpus; MutationDispatcher &MD; Index: lib/fuzzer/FuzzerLoop.cpp =================================================================== --- lib/fuzzer/FuzzerLoop.cpp +++ lib/fuzzer/FuzzerLoop.cpp @@ -587,7 +587,7 @@ size_t NewSize = 0; NewSize = MD.Mutate(CurrentUnitData, Size, CurrentMaxMutationLen); assert(NewSize > 0 && "Mutator returned empty unit"); - assert(NewSize <= CurrentMaxMutationLen && "Mutator return overisized unit"); + assert(NewSize <= CurrentMaxMutationLen && "Mutator return oversized unit"); Size = NewSize; II.NumExecutedMutations++; if (RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II)) @@ -598,6 +598,25 @@ } } +void Fuzzer::PurgeAllocator() { + if (Options.PurgeAllocatorIntervalSec < 0 || + !EF->__sanitizer_purge_allocator) { + return; + } + if (duration_cast(system_clock::now() - + LastAllocatorPurgeAttemptTime).count() < + Options.PurgeAllocatorIntervalSec) { + return; + } + + if (Options.RssLimitMb <= 0 || + GetPeakRSSMb() > static_cast(Options.RssLimitMb) / 2) { + EF->__sanitizer_purge_allocator(); + } + + LastAllocatorPurgeAttemptTime = system_clock::now(); +} + void Fuzzer::ReadAndExecuteSeedCorpora(const Vector &CorpusDirs) { const size_t kMaxSaneLen = 1 << 20; const size_t kMinDefaultLen = 4096; @@ -699,6 +718,8 @@ // Perform several mutations and runs. MutateAndTestOne(); + + PurgeAllocator(); } PrintStats("DONE ", "\n"); Index: lib/fuzzer/FuzzerOptions.h =================================================================== --- lib/fuzzer/FuzzerOptions.h +++ lib/fuzzer/FuzzerOptions.h @@ -54,6 +54,7 @@ bool DumpCoverage = false; bool UseClangCoverage = false; bool DetectLeaks = true; + int PurgeAllocatorIntervalSec = 1; int UseFeatureFrequency = false; int TraceMalloc = 0; bool HandleAbrt = false; Index: lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_combined.h +++ lib/sanitizer_common/sanitizer_allocator_combined.h @@ -77,6 +77,10 @@ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); } + void ForceReleaseToOS() { + primary_.ForceReleaseToOS(); + } + void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) Index: lib/sanitizer_common/sanitizer_allocator_interface.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_interface.h +++ lib/sanitizer_common/sanitizer_allocator_interface.h @@ -38,6 +38,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_free_hook(void *ptr); +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_purge_allocator(); + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts); } // extern "C" Index: lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary32.h +++ lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -120,6 +120,10 @@ // This is empty here. Currently only implemented in 64-bit allocator. } + void ForceReleaseToOS() { + // Currently implemented in 64-bit allocator only. + } + void *MapWithCallback(uptr size) { void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); Index: lib/sanitizer_common/sanitizer_allocator_primary64.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary64.h +++ lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -92,6 +92,13 @@ memory_order_relaxed); } + void ForceReleaseToOS() { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) { + BlockingMutexLock l(&GetRegionInfo(class_id)->mutex); + MaybeReleaseToOS(class_id, true /*force*/); + } + } + static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; @@ -116,7 +123,7 @@ region->num_freed_chunks = new_num_freed_chunks; region->stats.n_freed += n_chunks; - MaybeReleaseToOS(class_id); + MaybeReleaseToOS(class_id, false /*force*/); } NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -786,7 +793,7 @@ // Attempts to release RAM occupied by freed chunks back to OS. The region is // expected to be locked. - void MaybeReleaseToOS(uptr class_id) { + void MaybeReleaseToOS(uptr class_id, bool force) { RegionInfo *region = GetRegionInfo(class_id); const uptr chunk_size = ClassIdToSize(class_id); const uptr page_size = GetPageSizeCached(); @@ -799,12 +806,16 @@ return; // Nothing new to release. } - s32 interval_ms = ReleaseToOSIntervalMs(); - if (interval_ms < 0) - return; + if (!force) { + s32 interval_ms = ReleaseToOSIntervalMs(); + if (interval_ms < 0) + return; - if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime()) - return; // Memory was returned recently. + if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > + NanoTime()) { + return; // Memory was returned recently. + } + } MemoryMapper memory_mapper(*this, class_id); Index: lib/sanitizer_common/sanitizer_common_interface.inc =================================================================== --- lib/sanitizer_common/sanitizer_common_interface.inc +++ lib/sanitizer_common/sanitizer_common_interface.inc @@ -34,6 +34,7 @@ INTERFACE_FUNCTION(__sanitizer_get_ownership) INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes) INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks) +INTERFACE_FUNCTION(__sanitizer_purge_allocator) INTERFACE_FUNCTION(__sanitizer_print_memory_profile) INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook) INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook) Index: lib/sanitizer_common/sanitizer_quarantine.h =================================================================== --- lib/sanitizer_common/sanitizer_quarantine.h +++ lib/sanitizer_common/sanitizer_quarantine.h @@ -117,7 +117,16 @@ cache_.Transfer(c); } if (cache_.Size() > GetSize() && recycle_mutex_.TryLock()) - Recycle(cb); + Recycle(atomic_load(&min_size_, memory_order_relaxed), cb); + } + + void NOINLINE DrainAndRecycle(Cache *c, Callback cb) { + { + SpinMutexLock l(&cache_mutex_); + cache_.Transfer(c); + } + recycle_mutex_.Lock(); + Recycle(0, cb); } void PrintStats() const { @@ -139,9 +148,8 @@ Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(Callback cb) { + void NOINLINE Recycle(uptr min_size, Callback cb) { Cache tmp; - uptr min_size = atomic_load(&min_size_, memory_order_relaxed); { SpinMutexLock l(&cache_mutex_); // Go over the batches and merge partially filled ones to Index: test/asan/TestCases/Linux/release_to_os_test.cc =================================================================== --- test/asan/TestCases/Linux/release_to_os_test.cc +++ test/asan/TestCases/Linux/release_to_os_test.cc @@ -1,18 +1,21 @@ // Tests ASAN_OPTIONS=allocator_release_to_os=1 -// // RUN: %clangxx_asan -std=c++11 %s -o %t // RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE // RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE -// +// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t force 2>&1 | FileCheck %s --check-prefix=FORCE_RELEASE + // REQUIRES: x86_64-target-arch -#include -#include + #include -#include #include #include +#include +#include +#include +#include +#include #include void MallocReleaseStress() { @@ -39,10 +42,13 @@ delete[] p; } -int main() { +int main(int argc, char **argv) { MallocReleaseStress(); + if (argc > 1 && !strcmp("force", argv[1])) + __sanitizer_purge_allocator(); __asan_print_accumulated_stats(); } // RELEASE: mapped:{{.*}}releases: {{[1-9]}} // NO_RELEASE: mapped:{{.*}}releases: 0 +// FORCE_RELEASE: mapped:{{.*}}releases: {{[1-9]}}