Index: lib/sanitizer_common/sanitizer_allocator_internal.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_internal.h +++ lib/sanitizer_common/sanitizer_allocator_internal.h @@ -46,9 +46,12 @@ typedef SizeClassAllocatorLocalCache InternalAllocatorCache; +typedef LargeMmapAllocator + SecondaryInternalAllocator; + typedef CombinedAllocator - > InternalAllocator; + SecondaryInternalAllocator> InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); Index: lib/sanitizer_common/sanitizer_allocator_secondary.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_secondary.h +++ lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -14,14 +14,66 @@ #error This file must be included inside sanitizer_allocator.h #endif +// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total +// allocated chunks. To be used in memory constrained or not memory hungry cases +// (currently, 32 bits and internal allocator). +class LargeMmapAllocatorPtrArrayStatic { + public: + INLINE void *Init() { return &p_[0]; } + INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); } + private: + static const int kMaxNumChunks = 1 << 15; + uptr p_[kMaxNumChunks]; +}; + +// Much less restricted LargeMmapAllocator chunks list (comparing to +// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks. +// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the +// same functionality in Fuchsia case, which does not support MAP_NORESERVE. +class LargeMmapAllocatorPtrArrayDynamic { + public: + INLINE void *Init() { + uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr), + "sanitizer_large_allocator"); + CHECK(p); + return reinterpret_cast(p); + } + + INLINE void EnsureSpace(uptr n) { + CHECK_LT(n, kMaxNumChunks); + DCHECK(n <= n_reserved_); + if (UNLIKELY(n == n_reserved_)) { + address_range_.MapOrDie( + reinterpret_cast(address_range_.base()) + + n_reserved_ * sizeof(uptr), + kChunksBlockCount * sizeof(uptr)); + n_reserved_ += kChunksBlockCount; + } + } + + private: + static const int kMaxNumChunks = 1 << 20; + static const int kChunksBlockCount = 1 << 14; + ReservedAddressRange address_range_; + uptr n_reserved_; +}; + +#if SANITIZER_WORDSIZE == 32 +typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray; +#else +typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray; +#endif + // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template +template class LargeMmapAllocator { public: void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); + chunks_ = reinterpret_cast(ptr_array_.Init()); } void Init() { @@ -63,11 +115,11 @@ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log)); { SpinMutexLock l(&mutex_); + ptr_array_.EnsureSpace(n_chunks_); uptr idx = n_chunks_++; - chunks_sorted_ = false; - CHECK_LT(idx, kMaxNumChunks); h->chunk_idx = idx; chunks_[idx] = h; + chunks_sorted_ = false; stats.n_allocs++; stats.currently_allocated += map_size; stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated); @@ -85,9 +137,8 @@ uptr idx = h->chunk_idx; CHECK_EQ(chunks_[idx], h); CHECK_LT(idx, n_chunks_); - chunks_[idx] = chunks_[n_chunks_ - 1]; + chunks_[idx] = chunks_[--n_chunks_]; chunks_[idx]->chunk_idx = idx; - n_chunks_--; chunks_sorted_ = false; stats.n_frees++; stats.currently_allocated -= h->map_size; @@ -223,7 +274,7 @@ EnsureSortedChunks(); // Avoid doing the sort while iterating. for (uptr i = 0; i < n_chunks_; i++) { auto t = chunks_[i]; - callback(reinterpret_cast(GetUser(chunks_[i])), arg); + callback(reinterpret_cast(GetUser(t)), arg); // Consistency check: verify that the array did not change. CHECK_EQ(chunks_[i], t); CHECK_EQ(chunks_[i]->chunk_idx, i); @@ -231,7 +282,6 @@ } private: - static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18); struct Header { uptr map_beg; uptr map_size; @@ -257,7 +307,8 @@ } uptr page_size_; - Header *chunks_[kMaxNumChunks]; + Header **chunks_; + PtrArrayT ptr_array_; uptr n_chunks_; bool chunks_sorted_; struct Stats { @@ -266,4 +317,3 @@ SpinMutex mutex_; }; -