diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -54,7 +54,7 @@ PerClass *c = &per_class_[class_id]; InitCache(c); if (UNLIKELY(c->count == c->max_count)) - Drain(c, allocator, class_id, c->max_count / 2); + Drain(c, allocator, class_id); CompactPtrT chunk = allocator->PointerToCompactPtr( allocator->GetRegionBeginBySizeClass(class_id), reinterpret_cast(p)); @@ -63,9 +63,10 @@ } void Drain(SizeClassAllocator *allocator) { + MemoryMapperT memory_mapper(*allocator); for (uptr i = 1; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; - while (c->count > 0) Drain(c, allocator, i, c->count); + while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count); } } @@ -106,12 +107,18 @@ return true; } - NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, - uptr count) { + NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, + uptr class_id) { + MemoryMapperT memory_mapper(*allocator); + Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2); + } + + void Drain(MemoryMapperT *memory_mapper, PerClass *c, + SizeClassAllocator *allocator, uptr class_id, uptr count) { CHECK_GE(c->count, count); const uptr first_idx_to_drain = c->count - count; c->count -= count; - allocator->ReturnToAllocator(&stats_, class_id, + allocator->ReturnToAllocator(memory_mapper, &stats_, class_id, &c->chunks[first_idx_to_drain], count); } }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -47,40 +47,53 @@ public: typedef typename Allocator::CompactPtrT CompactPtrT; - MemoryMapper(const Allocator &base_allocator, uptr class_id) - : allocator(base_allocator), - region_base(base_allocator.GetRegionBeginBySizeClass(class_id)) {} + explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {} - uptr GetReleasedRangesCount() const { return released_ranges_count; } + ~MemoryMapper() { + if (buffer_) + UnmapOrDie(buffer_, buffer_size_); + } - uptr GetReleasedBytes() const { return released_bytes; } + bool GetAndResetStats(uptr &ranges, uptr &bytes) { + ranges = released_ranges_count_; + released_ranges_count_ = 0; + bytes = released_bytes_; + released_bytes_ = 0; + return ranges != 0; + } void *MapPackedCounterArrayBuffer(uptr buffer_size) { // TODO(alekseyshl): The idea to explore is to check if we have enough // space between num_freed_chunks*sizeof(CompactPtrT) and // mapped_free_array to fit buffer_size bytes and use that space instead // of mapping a temporary one. - return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"); - } - - void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) { - UnmapOrDie(buffer, buffer_size); + if (buffer_size_ < buffer_size) { + if (buffer_) + UnmapOrDie(buffer_, buffer_size_); + buffer_ = MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"); + buffer_size_ = buffer_size; + } else { + internal_memset(buffer_, 0, buffer_size); + } + return buffer_; } // Releases [from, to) range of pages back to OS. - void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) { - const uptr from_page = allocator.CompactPtrToPointer(region_base, from); - const uptr to_page = allocator.CompactPtrToPointer(region_base, to); + void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to, uptr class_id) { + const uptr region_base = allocator_.GetRegionBeginBySizeClass(class_id); + const uptr from_page = allocator_.CompactPtrToPointer(region_base, from); + const uptr to_page = allocator_.CompactPtrToPointer(region_base, to); ReleaseMemoryPagesToOS(from_page, to_page); - released_ranges_count++; - released_bytes += to_page - from_page; + released_ranges_count_++; + released_bytes_ += to_page - from_page; } private: - const Allocator &allocator; - const uptr region_base = 0; - uptr released_ranges_count = 0; - uptr released_bytes = 0; + const Allocator &allocator_; + uptr released_ranges_count_ = 0; + uptr released_bytes_ = 0; + void *buffer_ = nullptr; + uptr buffer_size_ = 0; }; template @@ -162,9 +175,10 @@ } void ForceReleaseToOS() { + MemoryMapperT memory_mapper(*this); for (uptr class_id = 1; class_id < kNumClasses; class_id++) { BlockingMutexLock l(&GetRegionInfo(class_id)->mutex); - MaybeReleaseToOS(class_id, true /*force*/); + MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/); } } @@ -173,7 +187,8 @@ alignment <= SizeClassMap::kMaxSize; } - NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id, + NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper, + AllocatorStats *stat, uptr class_id, const CompactPtrT *chunks, uptr n_chunks) { RegionInfo *region = GetRegionInfo(class_id); uptr region_beg = GetRegionBeginBySizeClass(class_id); @@ -196,7 +211,7 @@ region->num_freed_chunks = new_num_freed_chunks; region->stats.n_freed += n_chunks; - MaybeReleaseToOS(class_id, false /*force*/); + MaybeReleaseToOS(memory_mapper, class_id, false /*force*/); } NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -431,11 +446,6 @@ buffer = reinterpret_cast( memory_mapper->MapPackedCounterArrayBuffer(buffer_size)); } - ~PackedCounterArray() { - if (buffer) { - memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size); - } - } bool IsAllocated() const { return !!buffer; @@ -480,8 +490,9 @@ template class FreePagesRangeTracker { public: - explicit FreePagesRangeTracker(MemoryMapperT *mapper) + explicit FreePagesRangeTracker(MemoryMapperT *mapper, uptr class_id) : memory_mapper(mapper), + class_id(class_id), page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)), in_the_range(false), current_page(0), @@ -507,13 +518,14 @@ void CloseOpenedRange() { if (in_the_range) { memory_mapper->ReleasePageRangeToOS( - current_range_start_page << page_size_scaled_log, + class_id, current_range_start_page << page_size_scaled_log, current_page << page_size_scaled_log); in_the_range = false; } } MemoryMapperT *const memory_mapper; + const uptr class_id; const uptr page_size_scaled_log; bool in_the_range; uptr current_page; @@ -528,7 +540,8 @@ static void ReleaseFreeMemoryToOS(CompactPtrT *free_array, uptr free_array_count, uptr chunk_size, uptr allocated_pages_count, - MemoryMapper *memory_mapper) { + MemoryMapper *memory_mapper, + uptr class_id) { const uptr page_size = GetPageSizeCached(); // Figure out the number of chunks per page and whether we can take a fast @@ -590,7 +603,7 @@ // Iterate over pages detecting ranges of pages with chunk counters equal // to the expected number of chunks for the particular page. - FreePagesRangeTracker range_tracker(memory_mapper); + FreePagesRangeTracker range_tracker(memory_mapper, class_id); if (same_chunk_count_per_page) { // Fast path, every page has the same number of chunks affecting it. for (uptr i = 0; i < counters.GetCount(); i++) @@ -868,7 +881,8 @@ // // TODO(morehouse): Support a callback on memory release so HWASan can release // aliases as well. - void MaybeReleaseToOS(uptr class_id, bool force) { + void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id, + bool force) { RegionInfo *region = GetRegionInfo(class_id); const uptr chunk_size = ClassIdToSize(class_id); const uptr page_size = GetPageSizeCached(); @@ -892,17 +906,16 @@ } } - MemoryMapper memory_mapper(*this, class_id); - ReleaseFreeMemoryToOS( GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size, - RoundUpTo(region->allocated_user, page_size) / page_size, - &memory_mapper); + RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper, + class_id); - if (memory_mapper.GetReleasedRangesCount() > 0) { + uptr ranges, bytes; + if (memory_mapper->GetAndResetStats(ranges, bytes)) { region->rtoi.n_freed_at_last_release = region->stats.n_freed; - region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount(); - region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes(); + region->rtoi.num_releases += ranges; + region->rtoi.last_released_bytes = bytes; } region->rtoi.last_release_at_ns = MonotonicNanoTime(); } diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp @@ -1243,7 +1243,7 @@ Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)), last_page_reported(0) {} - void ReleasePageRangeToOS(u32 from, u32 to) { + void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) { from >>= page_size_scaled_log; to >>= page_size_scaled_log; ASSERT_LT(from, to); @@ -1283,7 +1283,7 @@ for (auto test_case : test_cases) { RangeRecorder range_recorder; - RangeTracker tracker(&range_recorder); + RangeTracker tracker(&range_recorder, 1); for (int i = 0; test_case[i] != 0; i++) tracker.NextPage(test_case[i] == 'x'); tracker.Done(); @@ -1309,7 +1309,7 @@ free(buffer); } - void ReleasePageRangeToOS(u32 from, u32 to) { + void ReleasePageRangeToOS(u32 class_id, u32 from, u32 to) { uptr page_size_scaled = GetPageSizeCached() >> Allocator64::kCompactPtrScale; for (u32 i = from; i < to; i += page_size_scaled) @@ -1353,7 +1353,7 @@ Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(), chunk_size, kAllocatedPagesCount, - &memory_mapper); + &memory_mapper, class_id); // Verify that there are no released pages touched by used chunks and all // ranges of free chunks big enough to contain the entire memory pages had