Index: compiler-rt/trunk/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/asan/asan_allocator.cc +++ compiler-rt/trunk/lib/asan/asan_allocator.cc @@ -235,6 +235,8 @@ AllocatorCache fallback_allocator_cache; QuarantineCache fallback_quarantine_cache; + atomic_uint8_t rss_limit_exceeded; + // ------------------- Options -------------------------- atomic_uint16_t min_redzone; atomic_uint16_t max_redzone; @@ -268,6 +270,14 @@ SharedInitCode(options); } + bool RssLimitExceeded() { + return atomic_load(&rss_limit_exceeded, memory_order_relaxed); + } + + void SetRssLimitExceeded(bool limit_exceeded) { + atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); + } + void RePoisonChunk(uptr chunk) { // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. @@ -363,6 +373,8 @@ AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); + if (RssLimitExceeded()) + return allocator.ReturnNullOrDieOnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -400,16 +412,15 @@ AsanThread *t = GetCurrentThread(); void *allocated; - bool check_rss_limit = true; if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocated = - allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + allocator.Allocate(cache, needed_size, 8, false); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocated = - allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + allocator.Allocate(cache, needed_size, 8, false); } if (!allocated) return allocator.ReturnNullOrDieOnOOM(); @@ -866,8 +877,8 @@ instance.ForceUnlock(); } -void AsanSoftRssLimitExceededCallback(bool exceeded) { - instance.allocator.SetRssLimitIsExceeded(exceeded); +void AsanSoftRssLimitExceededCallback(bool limit_exceeded) { + instance.SetRssLimitExceeded(limit_exceeded); } } // namespace __asan Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -43,12 +43,12 @@ } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, - bool cleared = false, bool check_rss_limit = false) { + bool cleared = false) { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; - if (size + alignment < size) return ReturnNullOrDieOnBadRequest(); - if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM(); + if (size + alignment < size) + return ReturnNullOrDieOnBadRequest(); uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -89,7 +89,8 @@ } void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) return nullptr; + if (MayReturnNull()) + return nullptr; ReportAllocatorCannotReturnNull(true); } @@ -106,15 +107,6 @@ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); } - bool RssLimitIsExceeded() { - return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire); - } - - void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) { - atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded, - memory_order_release); - } - void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) @@ -228,6 +220,5 @@ SecondaryAllocator secondary_; AllocatorGlobalStats stats_; atomic_uint8_t may_return_null_; - atomic_uint8_t rss_limit_is_exceeded_; };