Index: lib/asan/asan_activation.cc =================================================================== --- lib/asan/asan_activation.cc +++ lib/asan/asan_activation.cc @@ -79,11 +79,13 @@ Report( "quarantine_size_mb %d, max_redzone %d, poison_heap %d, " "malloc_context_size %d, alloc_dealloc_mismatch %d, " - "allocator_may_return_null %d, coverage %d, coverage_dir %s\n", + "allocator_may_return_null %d, coverage %d, coverage_dir %s, " + "allocator_release_to_os_interval_ms %d\n", allocator_options.quarantine_size_mb, allocator_options.max_redzone, poison_heap, malloc_context_size, allocator_options.alloc_dealloc_mismatch, - allocator_options.may_return_null, coverage, coverage_dir); + allocator_options.may_return_null, coverage, coverage_dir, + allocator_options.release_to_os_interval_ms); } } asan_deactivated_flags; Index: lib/asan/asan_activation_flags.inc =================================================================== --- lib/asan/asan_activation_flags.inc +++ lib/asan/asan_activation_flags.inc @@ -33,3 +33,4 @@ COMMON_ACTIVATION_FLAG(const char *, coverage_dir) COMMON_ACTIVATION_FLAG(int, verbosity) COMMON_ACTIVATION_FLAG(bool, help) +COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms) Index: lib/asan/asan_allocator.h =================================================================== --- lib/asan/asan_allocator.h +++ lib/asan/asan_allocator.h @@ -37,6 +37,7 @@ u16 max_redzone; u8 may_return_null; u8 alloc_dealloc_mismatch; + s32 release_to_os_interval_ms; void SetFrom(const Flags *f, const CommonFlags *cf); void CopyTo(Flags *f, CommonFlags *cf); Index: lib/asan/asan_allocator.cc =================================================================== --- lib/asan/asan_allocator.cc +++ lib/asan/asan_allocator.cc @@ -211,6 +211,7 @@ max_redzone = f->max_redzone; may_return_null = cf->allocator_may_return_null; alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; + release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms; } void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { @@ -219,6 +220,7 @@ f->max_redzone = max_redzone; cf->allocator_may_return_null = may_return_null; f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; + cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms; } struct Allocator { @@ -262,7 +264,7 @@ } void Initialize(const AllocatorOptions &options) { - allocator.Init(options.may_return_null); + allocator.Init(options.may_return_null, options.release_to_os_interval_ms); SharedInitCode(options); } @@ -291,6 +293,7 @@ void ReInitialize(const AllocatorOptions &options) { allocator.SetMayReturnNull(options.may_return_null); + allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); // Poison all existing allocation's redzones. @@ -312,6 +315,7 @@ options->may_return_null = allocator.MayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); } // -------------------- Helper methods. ------------------------- @@ -687,8 +691,6 @@ fallback_mutex.Unlock(); allocator.ForceUnlock(); } - - void ReleaseToOS() { allocator.ReleaseToOS(); } }; static Allocator instance(LINKER_INITIALIZED); @@ -730,11 +732,8 @@ return GetStackTraceFromId(GetFreeStackId()); } -void ReleaseToOS() { instance.ReleaseToOS(); } - void InitializeAllocator(const AllocatorOptions &options) { instance.Initialize(options); - SetAllocatorReleaseToOSCallback(ReleaseToOS); } void ReInitializeAllocator(const AllocatorOptions &options) { Index: lib/lsan/lsan_allocator.cc =================================================================== --- lib/lsan/lsan_allocator.cc +++ lib/lsan/lsan_allocator.cc @@ -64,7 +64,9 @@ static THREADLOCAL AllocatorCache cache; void InitializeAllocator() { - allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); + allocator.InitLinkerInitialized( + common_flags()->allocator_may_return_null, + common_flags()->allocator_release_to_os_interval_ms); } void AllocatorThreadFinish() { Index: lib/msan/msan_allocator.cc =================================================================== --- lib/msan/msan_allocator.cc +++ lib/msan/msan_allocator.cc @@ -103,7 +103,9 @@ static SpinMutex fallback_mutex; void MsanAllocatorInit() { - allocator.Init(common_flags()->allocator_may_return_null); + allocator.Init( + common_flags()->allocator_may_return_null, + common_flags()->allocator_release_to_os_interval_ms); } AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { Index: lib/sanitizer_common/sanitizer_allocator.cc =================================================================== --- lib/sanitizer_common/sanitizer_allocator.cc +++ lib/sanitizer_common/sanitizer_allocator.cc @@ -94,7 +94,8 @@ SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { - internal_allocator_instance->Init(/* may_return_null*/ false); + internal_allocator_instance->Init( + /* may_return_null */ false, kReleaseToOSIntervalNever); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } Index: lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_combined.h +++ lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,21 +24,22 @@ class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - void InitCommon(bool may_return_null) { - primary_.Init(); + void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } - void InitLinkerInitialized(bool may_return_null) { + void InitLinkerInitialized( + bool may_return_null, s32 release_to_os_interval_ms) { secondary_.InitLinkerInitialized(may_return_null); stats_.InitLinkerInitialized(); - InitCommon(may_return_null); + InitCommon(may_return_null, release_to_os_interval_ms); } - void Init(bool may_return_null) { + void Init(bool may_return_null, s32 release_to_os_interval_ms) { secondary_.Init(may_return_null); stats_.Init(); - InitCommon(may_return_null); + InitCommon(may_return_null, release_to_os_interval_ms); } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, @@ -83,6 +84,14 @@ atomic_store(&may_return_null_, may_return_null, memory_order_release); } + s32 ReleaseToOSIntervalMs() const { + return primary_.ReleaseToOSIntervalMs(); + } + + void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { + primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); + } + bool RssLimitIsExceeded() { return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire); } @@ -193,8 +202,6 @@ primary_.ForceUnlock(); } - void ReleaseToOS() { primary_.ReleaseToOS(); } - // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { Index: lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary32.h +++ lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -90,11 +90,19 @@ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT; typedef SizeClassAllocator32LocalCache AllocatorCache; - void Init() { + void Init(s32 release_to_os_interval_ms) { possible_regions.TestOnlyInit(); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); } + s32 ReleaseToOSIntervalMs() const { + return kReleaseToOSIntervalNever; + } + + void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { + // This is empty here. Currently only implemented in 64-bit allocator. + } + void *MapWithCallback(uptr size) { size = RoundUpTo(size, GetPageSizeCached()); void *res = MmapOrDie(size, "SizeClassAllocator32"); @@ -229,10 +237,6 @@ return 0; } - // This is empty here. Currently only implemented in 64-bit allocator. - void ReleaseToOS() { } - - typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; Index: lib/sanitizer_common/sanitizer_allocator_primary64.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary64.h +++ lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -69,7 +69,7 @@ return base + (static_cast(ptr32) << kCompactPtrScale); } - void Init() { + void Init(s32 release_to_os_interval_ms) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); if (kUsingConstantSpaceBeg) { CHECK_EQ(kSpaceBeg, reinterpret_cast( @@ -79,9 +79,19 @@ reinterpret_cast(MmapNoAccess(TotalSpaceSize)); CHECK_NE(NonConstSpaceBeg, ~(uptr)0); } + SetReleaseToOSIntervalMs(release_to_os_interval_ms); MapWithCallback(SpaceEnd(), AdditionalSize()); } + s32 ReleaseToOSIntervalMs() const { + return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed); + } + + void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { + atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms, + memory_order_relaxed); + } + void MapWithCallback(uptr beg, uptr size) { CHECK_EQ(beg, reinterpret_cast(MmapFixedOrDie(beg, size))); MapUnmapCallback().OnMap(beg, size); @@ -111,6 +121,8 @@ free_array[old_num_chunks + i] = chunks[i]; region->num_freed_chunks = new_num_freed_chunks; region->n_freed += n_chunks; + + MaybeReleaseToOS(class_id); } NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -284,11 +296,6 @@ GetPageSizeCached()); } - void ReleaseToOS() { - for (uptr class_id = 1; class_id < kNumClasses; class_id++) - ReleaseToOS(class_id); - } - typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded; @@ -320,9 +327,12 @@ // Granularity of ReleaseToOs (aka madvise). static const uptr kReleaseToOsGranularity = 1 << 12; + atomic_sint32_t release_to_os_interval_ms_; + struct ReleaseToOsInfo { uptr n_freed_at_last_release; uptr num_releases; + u64 last_release_at_ns; }; struct RegionInfo { @@ -462,42 +472,53 @@ return true; } - // Releases some RAM back to OS. + // Attempts to release some RAM back to OS. The region is expected to be + // locked. // Algorithm: - // * Lock the region. // * Sort the chunks. // * Find ranges fully covered by free-d chunks // * Release them to OS with madvise. - // - // TODO(kcc): make sure we don't do it too frequently. - void ReleaseToOS(uptr class_id) { + void MaybeReleaseToOS(uptr class_id) { RegionInfo *region = GetRegionInfo(class_id); - uptr region_beg = GetRegionBeginBySizeClass(class_id); - CompactPtrT *free_array = GetFreeArray(region_beg); uptr chunk_size = ClassIdToSize(class_id); - uptr scaled_chunk_size = chunk_size >> kCompactPtrScale; - const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale; - BlockingMutexLock l(®ion->mutex); + uptr n = region->num_freed_chunks; if (n * chunk_size < kReleaseToOsGranularity) - return; // No chance to release anything. - if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size < - kReleaseToOsGranularity) + return; // No chance to release anything. + if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size < + kReleaseToOsGranularity) { return; // Nothing new to release. + } + + s32 interval_ms = ReleaseToOSIntervalMs(); + if (interval_ms < 0) + return; + + u64 now_ns = NanoTime(); + if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns) + return; // Memory was returned recently. + region->rtoi.last_release_at_ns = now_ns; + + uptr region_beg = GetRegionBeginBySizeClass(class_id); + CompactPtrT *free_array = GetFreeArray(region_beg); SortArray(free_array, n); - uptr beg = free_array[0]; + + const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale; + const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale; + + uptr range_beg = free_array[0]; uptr prev = free_array[0]; for (uptr i = 1; i < n; i++) { uptr chunk = free_array[i]; CHECK_GT(chunk, prev); if (chunk - prev != scaled_chunk_size) { CHECK_GT(chunk - prev, scaled_chunk_size); - if (prev + scaled_chunk_size - beg >= kScaledGranularity) { - MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev); + if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) { + MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev); region->rtoi.n_freed_at_last_release = region->n_freed; region->rtoi.num_releases++; } - beg = chunk; + range_beg = chunk; } prev = chunk; } Index: lib/sanitizer_common/sanitizer_atomic.h =================================================================== --- lib/sanitizer_common/sanitizer_atomic.h +++ lib/sanitizer_common/sanitizer_atomic.h @@ -37,6 +37,11 @@ volatile Type val_dont_use; }; +struct atomic_sint32_t { + typedef s32 Type; + volatile Type val_dont_use; +}; + struct atomic_uint32_t { typedef u32 Type; volatile Type val_dont_use; Index: lib/sanitizer_common/sanitizer_common.h =================================================================== --- lib/sanitizer_common/sanitizer_common.h +++ lib/sanitizer_common/sanitizer_common.h @@ -375,12 +375,6 @@ // The callback should be registered once at the tool init time. void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); -// Callback to be called when we want to try releasing unused allocator memory -// back to the OS. -typedef void (*AllocatorReleaseToOSCallback)(); -// The callback should be registered once at the tool init time. -void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback); - // Functions related to signal handling. typedef void (*SignalHandlerType)(int, void *, void *); bool IsHandledDeadlySignal(int signum); @@ -842,6 +836,10 @@ uptr allocated; }; +// The magic value for allocator_release_to_os_interval_ms common flag to +// indicate that sanitizer allocator should not attempt to release memory to OS. +const s32 kReleaseToOSIntervalNever = -1; + } // namespace __sanitizer inline void *operator new(__sanitizer::operator_new_size_type size, Index: lib/sanitizer_common/sanitizer_common_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_common_libcdep.cc +++ lib/sanitizer_common/sanitizer_common_libcdep.cc @@ -70,18 +70,11 @@ SoftRssLimitExceededCallback = Callback; } -static AllocatorReleaseToOSCallback ReleseCallback; -void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) { - CHECK_EQ(ReleseCallback, nullptr); - ReleseCallback = Callback; -} - #if SANITIZER_LINUX && !SANITIZER_GO void BackgroundThread(void *arg) { uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; bool heap_profile = common_flags()->heap_profile; - bool allocator_release_to_os = common_flags()->allocator_release_to_os; uptr prev_reported_rss = 0; uptr prev_reported_stack_depot_size = 0; bool reached_soft_rss_limit = false; @@ -127,7 +120,6 @@ SoftRssLimitExceededCallback(false); } } - if (allocator_release_to_os && ReleseCallback) ReleseCallback(); if (heap_profile && current_rss_mb > rss_during_last_reported_profile * 1.1) { Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb); @@ -162,7 +154,6 @@ // Start the background thread if one of the rss limits is given. if (!common_flags()->hard_rss_limit_mb && !common_flags()->soft_rss_limit_mb && - !common_flags()->allocator_release_to_os && !common_flags()->heap_profile) return; if (!&real_pthread_create) return; // Can't spawn the thread anyway. internal_start_thread(BackgroundThread, nullptr); Index: lib/sanitizer_common/sanitizer_flags.inc =================================================================== --- lib/sanitizer_common/sanitizer_flags.inc +++ lib/sanitizer_common/sanitizer_flags.inc @@ -119,9 +119,10 @@ " This limit does not affect memory allocations other than" " malloc/new.") COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only") -COMMON_FLAG(bool, allocator_release_to_os, false, - "Experimental. If true, try to periodically release unused" - " memory to the OS.\n") +COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever, + "Experimental. If set, tries to release unused memory to the OS, " + "but not more often than this interval (in milliseconds). Negative " + "values mean do not attempt to release memory to the OS.\n") COMMON_FLAG(bool, can_use_proc_maps_statm, true, "If false, do not attempt to read /proc/maps/statm." " Mostly useful for testing sanitizers.") Index: lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -141,7 +141,7 @@ template void TestSizeClassAllocator() { Allocator *a = new Allocator; - a->Init(); + a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -238,7 +238,7 @@ template void SizeClassAllocatorMetadataStress() { Allocator *a = new Allocator; - a->Init(); + a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -292,7 +292,7 @@ template void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) { Allocator *a = new Allocator; - a->Init(); + a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -366,7 +366,7 @@ TestMapUnmapCallback::unmap_count = 0; typedef SizeClassAllocator64 Allocator64WithCallBack; Allocator64WithCallBack *a = new Allocator64WithCallBack; - a->Init(); + a->Init(kReleaseToOSIntervalNever); EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -397,7 +397,7 @@ TestMapUnmapCallback> Allocator32WithCallBack; Allocator32WithCallBack *a = new Allocator32WithCallBack; - a->Init(); + a->Init(kReleaseToOSIntervalNever); EXPECT_EQ(TestMapUnmapCallback::map_count, 0); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -430,7 +430,7 @@ template void FailInAssertionOnOOM() { Allocator a; - a.Init(); + a.Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -538,7 +538,7 @@ CombinedAllocator Allocator; Allocator *a = new Allocator; - a->Init(/* may_return_null */ true); + a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever); AllocatorCache cache; memset(&cache, 0, sizeof(cache)); @@ -627,7 +627,7 @@ typedef typename AllocatorCache::Allocator Allocator; Allocator *a = new Allocator(); - a->Init(); + a->Init(kReleaseToOSIntervalNever); memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -702,7 +702,7 @@ TEST(SanitizerCommon, AllocatorLeakTest) { typedef AllocatorCache::Allocator Allocator; Allocator a; - a.Init(); + a.Init(kReleaseToOSIntervalNever); uptr total_used_memory = 0; for (int i = 0; i < 100; i++) { pthread_t t; @@ -735,7 +735,7 @@ // able to call Deallocate on a zeroed cache, and it will self-initialize. TEST(Allocator, AllocatorCacheDeallocNewThread) { AllocatorCache::Allocator allocator; - allocator.Init(); + allocator.Init(kReleaseToOSIntervalNever); AllocatorCache main_cache; AllocatorCache child_cache; memset(&main_cache, 0, sizeof(main_cache)); @@ -806,7 +806,7 @@ template void TestSizeClassAllocatorIteration() { Allocator *a = new Allocator; - a->Init(); + a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -947,7 +947,7 @@ const uptr kRegionSize = kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; SpecialAllocator64 *a = new SpecialAllocator64; - a->Init(); + a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -94,6 +94,7 @@ u32 QuarantineSizeMb; u32 ThreadLocalQuarantineSizeKb; bool MayReturnNull; + s32 ReleaseToOSIntervalMs; bool DeallocationTypeMismatch; bool DeleteSizeMismatch; bool ZeroContents; Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -212,6 +212,7 @@ void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { MayReturnNull = cf->allocator_may_return_null; + ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; QuarantineSizeMb = f->QuarantineSizeMb; ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; DeallocationTypeMismatch = f->DeallocationTypeMismatch; @@ -221,6 +222,7 @@ void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { cf->allocator_may_return_null = MayReturnNull; + cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; f->QuarantineSizeMb = QuarantineSizeMb; f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; f->DeallocationTypeMismatch = DeallocationTypeMismatch; @@ -276,7 +278,7 @@ DeallocationTypeMismatch = Options.DeallocationTypeMismatch; DeleteSizeMismatch = Options.DeleteSizeMismatch; ZeroContents = Options.ZeroContents; - BackendAllocator.Init(Options.MayReturnNull); + BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs); AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); Index: lib/tsan/rtl/tsan_mman.cc =================================================================== --- lib/tsan/rtl/tsan_mman.cc +++ lib/tsan/rtl/tsan_mman.cc @@ -111,7 +111,9 @@ } void InitializeAllocator() { - allocator()->Init(common_flags()->allocator_may_return_null); + allocator()->Init( + common_flags()->allocator_may_return_null, + common_flags()->allocator_release_to_os_interval_ms); } void InitializeAllocatorLate() { Index: test/asan/TestCases/Linux/release_to_os_test.cc =================================================================== --- test/asan/TestCases/Linux/release_to_os_test.cc +++ test/asan/TestCases/Linux/release_to_os_test.cc @@ -2,8 +2,8 @@ // // RUN: %clangxx_asan -std=c++11 %s -o %t -// RUN: %env_asan_opts=allocator_release_to_os=1 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE -// RUN: %env_asan_opts=allocator_release_to_os=0 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE +// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=0 %run %t 2>&1 | FileCheck %s --check-prefix=RELEASE +// RUN: %env_asan_opts=allocator_release_to_os_interval_ms=-1 %run %t 2>&1 | FileCheck %s --check-prefix=NO_RELEASE // // REQUIRES: x86_64-target-arch #include