Index: compiler-rt/lib/asan/asan_allocator.h =================================================================== --- compiler-rt/lib/asan/asan_allocator.h +++ compiler-rt/lib/asan/asan_allocator.h @@ -147,7 +147,6 @@ const uptr kAllocatorSize = 0x40000000000ULL; // 4T. typedef DefaultSizeClassMap SizeClassMap; # endif -template struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -155,56 +154,40 @@ typedef __asan::SizeClassMap SizeClassMap; typedef AsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template -using PrimaryAllocatorASVT = SizeClassAllocator64>; -using PrimaryAllocator = PrimaryAllocatorASVT; +using PrimaryAllocator = SizeClassAllocator64; #else // Fallback to SizeClassAllocator32. static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; # if SANITIZER_WORDSIZE == 32 -template -using ByteMapASVT = FlatByteMap; +using ByteMap = FlatByteMap; # elif SANITIZER_WORDSIZE == 64 -template -using ByteMapASVT = - TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; +using ByteMap = TwoLevelByteMap<(kNumRegions >> 12), 1 << 12>; # endif typedef CompactSizeClassMap SizeClassMap; -template struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = 16; typedef __asan::SizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __asan::kRegionSizeLog; - using AddressSpaceView = AddressSpaceViewTy; - using ByteMap = __asan::ByteMapASVT; + using ByteMap = __asan::ByteMap; typedef AsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; -template -using PrimaryAllocatorASVT = SizeClassAllocator32 >; -using PrimaryAllocator = PrimaryAllocatorASVT; +using PrimaryAllocator = SizeClassAllocator32; #endif // SANITIZER_CAN_USE_ALLOCATOR64 static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; -template -using AllocatorCacheASVT = - SizeClassAllocatorLocalCache>; -using AllocatorCache = AllocatorCacheASVT; +using AllocatorCache = SizeClassAllocatorLocalCache; + +using SecondaryAllocator = + LargeMmapAllocator; -template -using SecondaryAllocatorASVT = - LargeMmapAllocator; template using AsanAllocatorASVT = - CombinedAllocator, - AllocatorCacheASVT, - SecondaryAllocatorASVT, + CombinedAllocator; using AsanAllocator = AsanAllocatorASVT; Index: compiler-rt/lib/hwasan/hwasan_allocator.h =================================================================== --- compiler-rt/lib/hwasan/hwasan_allocator.h +++ compiler-rt/lib/hwasan/hwasan_allocator.h @@ -56,7 +56,6 @@ static const uptr kSpaceSize = 0x2000000000ULL; static const uptr kMetadataSize = sizeof(Metadata); typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap; - using AddressSpaceView = LocalAddressSpaceView; typedef HwasanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; Index: compiler-rt/lib/lsan/lsan_allocator.h =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.h +++ compiler-rt/lib/lsan/lsan_allocator.h @@ -53,25 +53,19 @@ defined(__arm__) static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; -template -using ByteMapASVT = - TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; +using ByteMap = TwoLevelByteMap<(kNumRegions >> 12), 1 << 12>; -template struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = sizeof(ChunkMetadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __lsan::kRegionSizeLog; - using AddressSpaceView = AddressSpaceViewTy; - using ByteMap = __lsan::ByteMapASVT; + using ByteMap = __lsan::ByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; -template -using PrimaryAllocatorASVT = SizeClassAllocator32>; -using PrimaryAllocator = PrimaryAllocatorASVT; +using PrimaryAllocator = SizeClassAllocator32; #elif defined(__x86_64__) || defined(__powerpc64__) # if defined(__powerpc64__) const uptr kAllocatorSpace = 0xa0000000000ULL; @@ -80,7 +74,6 @@ const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. # endif -template struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -88,30 +81,20 @@ typedef DefaultSizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template -using PrimaryAllocatorASVT = SizeClassAllocator64>; -using PrimaryAllocator = PrimaryAllocatorASVT; +using PrimaryAllocator = SizeClassAllocator64; #endif -template -using AllocatorCacheASVT = - SizeClassAllocatorLocalCache>; -using AllocatorCache = AllocatorCacheASVT; +using AllocatorCache = SizeClassAllocatorLocalCache; +using AllocatorCache = AllocatorCache; -template -using SecondaryAllocatorASVT = - LargeMmapAllocator; +using SecondaryAllocator = + LargeMmapAllocator; template -using AllocatorASVT = - CombinedAllocator, - AllocatorCacheASVT, - SecondaryAllocatorASVT, - AddressSpaceView>; +using AllocatorASVT = CombinedAllocator; using Allocator = AllocatorASVT; AllocatorCache *GetAllocatorCache(); Index: compiler-rt/lib/msan/msan_allocator.cc =================================================================== --- compiler-rt/lib/msan/msan_allocator.cc +++ compiler-rt/lib/msan/msan_allocator.cc @@ -56,7 +56,6 @@ static const uptr kMetadataSize = sizeof(Metadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __msan::kRegionSizeLog; - using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __msan::ByteMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; @@ -78,7 +77,6 @@ typedef DefaultSizeClassMap SizeClassMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = LocalAddressSpaceView; }; typedef SizeClassAllocator64 PrimaryAllocator; @@ -93,7 +91,6 @@ typedef DefaultSizeClassMap SizeClassMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = LocalAddressSpaceView; }; typedef SizeClassAllocator64 PrimaryAllocator; @@ -109,7 +106,6 @@ static const uptr kMetadataSize = sizeof(Metadata); typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = __msan::kRegionSizeLog; - using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __msan::ByteMap; typedef MsanMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_bytemap.h @@ -14,10 +14,9 @@ #endif // Maps integers in rage [0, kSize) to u8 values. -template +template class FlatByteMap { public: - using AddressSpaceView = AddressSpaceViewTy; void Init() { internal_memset(map_, 0, sizeof(map_)); } @@ -27,11 +26,12 @@ CHECK_EQ(0U, map_[idx]); map_[idx] = val; } - u8 operator[] (uptr idx) { + const u8 &operator[](uptr idx) const { CHECK_LT(idx, kSize); // FIXME: CHECK may be too expensive here. return map_[idx]; } + private: u8 map_[kSize]; }; @@ -42,11 +42,9 @@ // Each value is initially zero and can be set to something else only once. // Setting and getting values from multiple threads is safe w/o extra locking. template class TwoLevelByteMap { public: - using AddressSpaceView = AddressSpaceViewTy; void Init() { internal_memset(map1_, 0, sizeof(map1_)); mu_.Init(); @@ -72,12 +70,14 @@ map2[idx % kSize2] = val; } - u8 operator[] (uptr idx) const { + const u8 &operator[](uptr idx) const { CHECK_LT(idx, kSize1 * kSize2); u8 *map2 = Get(idx / kSize2); - if (!map2) return 0; - auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]); - return *value_ptr; + if (!map2) { + static const u8 zero = 0; + return zero; + } + return map2[idx % kSize2]; } private: Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -25,12 +25,10 @@ class CombinedAllocator { public: using AddressSpaceView = AddressSpaceViewTy; - static_assert(is_same::value, - "PrimaryAllocator is using wrong AddressSpaceView"); - static_assert(is_same::value, - "SecondaryAllocator is using wrong AddressSpaceView"); + using PrimaryAllocatorWithView = + typename PrimaryAllocator::template WithView; + using SecondaryAllocatorWithView = + typename SecondaryAllocator::template WithView; void InitLinkerInitialized(s32 release_to_os_interval_ms) { stats_.InitLinkerInitialized(); @@ -198,7 +196,8 @@ } private: - PrimaryAllocator primary_; - SecondaryAllocator secondary_; + PrimaryAllocatorWithView primary_; + SecondaryAllocatorWithView secondary_; + AllocatorGlobalStats stats_; }; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -36,7 +36,6 @@ static const uptr kMetadataSize = 0; typedef InternalSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = kInternalAllocatorRegionSizeLog; - using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __sanitizer::ByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -47,7 +47,6 @@ template class SizeClassAllocator32 { public: - using AddressSpaceView = typename Params::AddressSpaceView; static const uptr kSpaceBeg = Params::kSpaceBeg; static const u64 kSpaceSize = Params::kSpaceSize; static const uptr kMetadataSize = Params::kMetadataSize; @@ -56,10 +55,6 @@ typedef typename Params::ByteMap ByteMap; typedef typename Params::MapUnmapCallback MapUnmapCallback; - static_assert( - is_same::value, - "AddressSpaceView type mismatch"); - static const bool kRandomShuffleChunks = Params::kFlags & SizeClassAllocator32FlagMasks::kRandomShuffleChunks; static const bool kUseSeparateSizeClassForBatch = Params::kFlags & @@ -144,17 +139,6 @@ alignment <= SizeClassMap::kMaxSize; } - void *GetMetaData(const void *p) { - CHECK(PointerIsMine(p)); - uptr mem = reinterpret_cast(p); - uptr beg = ComputeRegionBeg(mem); - uptr size = ClassIdToSize(GetSizeClass(p)); - u32 offset = mem - beg; - uptr n = offset / (u32)size; // 32-bit division - uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; - return reinterpret_cast(meta); - } - NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c, uptr class_id) { DCHECK_LT(class_id, kNumClasses); @@ -179,50 +163,8 @@ sci->free_list.push_front(b); } - bool PointerIsMine(const void *p) { - uptr mem = reinterpret_cast(p); - if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) - return false; - return GetSizeClass(p) != 0; - } - - uptr GetSizeClass(const void *p) { - return possible_regions[ComputeRegionId(reinterpret_cast(p))]; - } - - void *GetBlockBegin(const void *p) { - CHECK(PointerIsMine(p)); - uptr mem = reinterpret_cast(p); - uptr beg = ComputeRegionBeg(mem); - uptr size = ClassIdToSize(GetSizeClass(p)); - u32 offset = mem - beg; - u32 n = offset / (u32)size; // 32-bit division - uptr res = beg + (n * (u32)size); - return reinterpret_cast(res); - } - - uptr GetActuallyAllocatedSize(void *p) { - CHECK(PointerIsMine(p)); - return ClassIdToSize(GetSizeClass(p)); - } - uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); } - uptr TotalMemoryUsed() { - // No need to lock here. - uptr res = 0; - for (uptr i = 0; i < kNumPossibleRegions; i++) - if (possible_regions[i]) - res += kRegionSize; - return res; - } - - void TestOnlyUnmap() { - for (uptr i = 0; i < kNumPossibleRegions; i++) - if (possible_regions[i]) - UnmapWithCallback((i * kRegionSize), kRegionSize); - } - // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. void ForceLock() { @@ -237,23 +179,6 @@ } } - // Iterate over all existing chunks. - // The allocator must be locked when calling this function. - void ForEachChunk(ForEachChunkCallback callback, void *arg) { - for (uptr region = 0; region < kNumPossibleRegions; region++) - if (possible_regions[region]) { - uptr chunk_size = ClassIdToSize(possible_regions[region]); - uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); - uptr region_beg = region * kRegionSize; - for (uptr chunk = region_beg; - chunk < region_beg + max_chunks_in_region * chunk_size; - chunk += chunk_size) { - // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); - callback(chunk, arg); - } - } - } - void PrintStats() {} static uptr AdditionalSize() { return 0; } @@ -261,6 +186,82 @@ typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; + template + class WithView : public SizeClassAllocator32 { + public: + uptr GetSizeClass(const void *p) { + return *AddressSpaceView::Load( + &possible_regions[ComputeRegionId(reinterpret_cast(p))]); + } + + bool PointerIsMine(const void *p) { + uptr mem = reinterpret_cast(p); + if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize) + return false; + return (GetSizeClass(p)) != 0; + } + + void *GetBlockBegin(const void *p) { + CHECK(PointerIsMine(p)); + uptr mem = reinterpret_cast(p); + uptr beg = ComputeRegionBeg(mem); + uptr size = ClassIdToSize(GetSizeClass(p)); + u32 offset = mem - beg; + u32 n = offset / (u32)size; // 32-bit division + uptr res = beg + (n * (u32)size); + return reinterpret_cast(res); + } + + uptr GetActuallyAllocatedSize(void *p) { + CHECK(PointerIsMine(p)); + return ClassIdToSize(GetSizeClass(p)); + } + + void *GetMetaData(const void *p) { + CHECK(PointerIsMine(p)); + uptr mem = reinterpret_cast(p); + uptr beg = ComputeRegionBeg(mem); + uptr size = ClassIdToSize(GetSizeClass(p)); + u32 offset = mem - beg; + uptr n = offset / (u32)size; // 32-bit division + uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize; + return reinterpret_cast(meta); + } + + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + for (uptr region = 0; region < kNumPossibleRegions; region++) + if (auto id = *AddressSpaceView::Load(&possible_regions[region])) { + uptr chunk_size = ClassIdToSize(id); + uptr max_chunks_in_region = + kRegionSize / (chunk_size + kMetadataSize); + uptr region_beg = region * kRegionSize; + for (uptr chunk = region_beg; + chunk < region_beg + max_chunks_in_region * chunk_size; + chunk += chunk_size) { + // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); + callback(chunk, arg); + } + } + } + + void TestOnlyUnmap() { + for (uptr i = 0; i < kNumPossibleRegions; i++) + if (*AddressSpaceView::Load(&possible_regions[i])) + UnmapWithCallback((i * kRegionSize), kRegionSize); + } + + uptr TotalMemoryUsed() { + // No need to lock here. + uptr res = 0; + for (uptr i = 0; i < kNumPossibleRegions; i++) + if (*AddressSpaceView::Load(&possible_regions[i])) + res += kRegionSize; + return res; + } + }; + private: static const uptr kRegionSize = 1 << kRegionSizeLog; static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -45,7 +45,6 @@ template class SizeClassAllocator64 { public: - using AddressSpaceView = typename Params::AddressSpaceView; static const uptr kSpaceBeg = Params::kSpaceBeg; static const uptr kSpaceSize = Params::kSpaceSize; static const uptr kMetadataSize = Params::kMetadataSize; @@ -287,24 +286,6 @@ } } - // Iterate over all existing chunks. - // The allocator must be locked when calling this function. - void ForEachChunk(ForEachChunkCallback callback, void *arg) { - for (uptr class_id = 1; class_id < kNumClasses; class_id++) { - RegionInfo *region = GetRegionInfo(class_id); - uptr chunk_size = ClassIdToSize(class_id); - uptr region_beg = SpaceBeg() + class_id * kRegionSize; - uptr region_allocated_user_size = - AddressSpaceView::Load(region)->allocated_user; - for (uptr chunk = region_beg; - chunk < region_beg + region_allocated_user_size; - chunk += chunk_size) { - // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); - callback(chunk, arg); - } - } - } - static uptr ClassIdToSize(uptr class_id) { return SizeClassMap::Size(class_id); } @@ -549,6 +530,28 @@ range_tracker.Done(); } + template + class WithView : public SizeClassAllocator64 { + public: + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) { + RegionInfo *region = GetRegionInfo(class_id); + uptr chunk_size = ClassIdToSize(class_id); + uptr region_beg = SpaceBeg() + class_id * kRegionSize; + uptr region_allocated_user_size = + AddressSpaceView::Load(region)->allocated_user; + for (uptr chunk = region_beg; + chunk < region_beg + region_allocated_user_size; + chunk += chunk_size) { + // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); + callback(chunk, arg); + } + } + } + }; + private: friend class MemoryMapper; Index: compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -67,11 +67,9 @@ // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). template + class PtrArrayT = DefaultLargeMmapAllocatorPtrArray> class LargeMmapAllocator { public: - using AddressSpaceView = AddressSpaceViewTy; void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); chunks_ = reinterpret_cast(ptr_array_.Init()); @@ -201,52 +199,6 @@ return GetUser(h); } - void EnsureSortedChunks() { - if (chunks_sorted_) return; - Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_); - Sort(reinterpret_cast(chunks), n_chunks_); - for (uptr i = 0; i < n_chunks_; i++) - AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i; - chunks_sorted_ = true; - } - - // This function does the same as GetBlockBegin, but is much faster. - // Must be called with the allocator locked. - void *GetBlockBeginFastLocked(void *ptr) { - mutex_.CheckLocked(); - uptr p = reinterpret_cast(ptr); - uptr n = n_chunks_; - if (!n) return nullptr; - EnsureSortedChunks(); - auto min_mmap_ = reinterpret_cast(chunks_[0]); - auto max_mmap_ = - reinterpret_cast(chunks_[n - 1]) + chunks_[n - 1]->map_size; - if (p < min_mmap_ || p >= max_mmap_) - return nullptr; - uptr beg = 0, end = n - 1; - // This loop is a log(n) lower_bound. It does not check for the exact match - // to avoid expensive cache-thrashing loads. - while (end - beg >= 2) { - uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1 - if (p < reinterpret_cast(chunks_[mid])) - end = mid - 1; // We are not interested in chunks_[mid]. - else - beg = mid; // chunks_[mid] may still be what we want. - } - - if (beg < end) { - CHECK_EQ(beg + 1, end); - // There are 2 chunks left, choose one. - if (p >= reinterpret_cast(chunks_[end])) - beg = end; - } - - Header *h = chunks_[beg]; - if (h->map_beg + h->map_size <= p || p < h->map_beg) - return nullptr; - return GetUser(h); - } - void PrintStats() { Printf("Stats: LargeMmapAllocator: allocated %zd times, " "remains %zd (%zd K) max %zd M; by size logs: ", @@ -270,19 +222,72 @@ mutex_.Unlock(); } - // Iterate over all existing chunks. - // The allocator must be locked when calling this function. - void ForEachChunk(ForEachChunkCallback callback, void *arg) { - EnsureSortedChunks(); // Avoid doing the sort while iterating. - const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); - for (uptr i = 0; i < n_chunks_; i++) { - const Header *t = chunks[i]; - callback(reinterpret_cast(GetUser(t)), arg); - // Consistency check: verify that the array did not change. - CHECK_EQ(chunks[i], t); - CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i); + template + class WithView : public LargeMmapAllocator { + public: + void EnsureSortedChunks() { + if (chunks_sorted_) + return; + Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_); + Sort(reinterpret_cast(chunks), n_chunks_); + for (uptr i = 0; i < n_chunks_; i++) + AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i; + chunks_sorted_ = true; } - } + + // This function does the same as GetBlockBegin, but is much faster. + // Must be called with the allocator locked. + void *GetBlockBeginFastLocked(void *ptr) { + mutex_.CheckLocked(); + uptr p = reinterpret_cast(ptr); + uptr n = n_chunks_; + if (!n) + return nullptr; + EnsureSortedChunks(); + auto min_mmap_ = reinterpret_cast(chunks_[0]); + auto max_mmap_ = + reinterpret_cast(chunks_[n - 1]) + chunks_[n - 1]->map_size; + if (p < min_mmap_ || p >= max_mmap_) + return nullptr; + uptr beg = 0, end = n - 1; + // This loop is a log(n) lower_bound. It does not check for the exact + // match to avoid expensive cache-thrashing loads. + while (end - beg >= 2) { + uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1 + if (p < reinterpret_cast(chunks_[mid])) + end = mid - 1; // We are not interested in chunks_[mid]. + else + beg = mid; // chunks_[mid] may still be what we want. + } + + if (beg < end) { + CHECK_EQ(beg + 1, end); + // There are 2 chunks left, choose one. + if (p >= reinterpret_cast(chunks_[end])) + beg = end; + } + + Header *h = chunks_[beg]; + if (h->map_beg + h->map_size <= p || p < h->map_beg) + return nullptr; + return GetUser(h); + } + + // Iterate over all existing chunks. + // The allocator must be locked when calling this function. + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + EnsureSortedChunks(); // Avoid doing the sort while + // iterating. + const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_); + for (uptr i = 0; i < n_chunks_; i++) { + const Header *t = chunks[i]; + callback(reinterpret_cast(GetUser(t)), arg); + // Consistency check: verify that the array did not change. + CHECK_EQ(chunks[i], t); + CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i); + } + } + }; private: struct Header { Index: compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -52,7 +52,6 @@ typedef DefaultSizeClassMap SizeClassMap; #endif -template struct AP64 { // Allocator Params. Short name for shorter demangled names.. static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -60,10 +59,8 @@ typedef ::SizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template struct AP64Dyn { static const uptr kSpaceBeg = ~(uptr)0; static const uptr kSpaceSize = kAllocatorSize; @@ -71,10 +68,8 @@ typedef ::SizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template struct AP64Compact { static const uptr kSpaceBeg = ~(uptr)0; static const uptr kSpaceSize = kAllocatorSize; @@ -82,10 +77,8 @@ typedef CompactSizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template struct AP64VeryCompact { static const uptr kSpaceBeg = ~(uptr)0; static const uptr kSpaceSize = 1ULL << 37; @@ -93,10 +86,8 @@ typedef VeryCompactSizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template struct AP64Dense { static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -104,31 +95,13 @@ typedef DenseSizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; -template -using Allocator64ASVT = SizeClassAllocator64>; -using Allocator64 = Allocator64ASVT; - -template -using Allocator64DynamicASVT = SizeClassAllocator64>; -using Allocator64Dynamic = Allocator64DynamicASVT; - -template -using Allocator64CompactASVT = - SizeClassAllocator64>; -using Allocator64Compact = Allocator64CompactASVT; - -template -using Allocator64VeryCompactASVT = - SizeClassAllocator64>; -using Allocator64VeryCompact = - Allocator64VeryCompactASVT; - -template -using Allocator64DenseASVT = SizeClassAllocator64>; -using Allocator64Dense = Allocator64DenseASVT; +using Allocator64 = SizeClassAllocator64; +using Allocator64Dynamic = SizeClassAllocator64; +using Allocator64Compact = SizeClassAllocator64; +using Allocator64VeryCompact = SizeClassAllocator64; +using Allocator64Dense = SizeClassAllocator64; #elif defined(__mips64) static const u64 kAddressSpaceSize = 1ULL << 40; @@ -145,22 +118,17 @@ static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24); static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog; -template struct AP32Compact { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = kAddressSpaceSize; static const uptr kMetadataSize = 16; typedef CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = ::kRegionSizeLog; - using AddressSpaceView = AddressSpaceViewTy; - using ByteMap = FlatByteMap; + using ByteMap = FlatByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; -template -using Allocator32CompactASVT = - SizeClassAllocator32>; -using Allocator32Compact = Allocator32CompactASVT; +using Allocator32Compact = SizeClassAllocator32; template void TestSizeClassMap() { @@ -169,6 +137,10 @@ SCMap::Validate(); } +template +using TestAllocator = + typename Allocator::template WithView; + TEST(SanitizerCommon, DefaultSizeClassMap) { TestSizeClassMap(); } @@ -191,7 +163,7 @@ template void TestSizeClassAllocator() { - Allocator *a = new Allocator; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -228,7 +200,7 @@ CHECK_GE(a->GetActuallyAllocatedSize(x), size); uptr class_id = a->GetSizeClass(x); CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size)); - uptr *metadata = reinterpret_cast(a->GetMetaData(x)); + uptr *metadata = reinterpret_cast(a->GetMetaData(x)); metadata[0] = reinterpret_cast(x) + 1; metadata[1] = 0xABCD; } @@ -236,7 +208,7 @@ // Deallocate all. for (uptr i = 0; i < allocated.size(); i++) { void *x = allocated[i]; - uptr *metadata = reinterpret_cast(a->GetMetaData(x)); + uptr *metadata = reinterpret_cast(a->GetMetaData(x)); CHECK_EQ(metadata[0], reinterpret_cast(x) + 1); CHECK_EQ(metadata[1], 0xABCD); cache.Deallocate(a, a->GetSizeClass(x), x); @@ -291,24 +263,18 @@ TestSizeClassAllocator(); } -template struct AP32SeparateBatches { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = kAddressSpaceSize; static const uptr kMetadataSize = 16; typedef DefaultSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = ::kRegionSizeLog; - using AddressSpaceView = AddressSpaceViewTy; - using ByteMap = FlatByteMap; + using ByteMap = FlatByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; }; -template -using Allocator32SeparateBatchesASVT = - SizeClassAllocator32>; -using Allocator32SeparateBatches = - Allocator32SeparateBatchesASVT; +using Allocator32SeparateBatches = SizeClassAllocator32; TEST(SanitizerCommon, SizeClassAllocator32SeparateBatches) { TestSizeClassAllocator(); @@ -316,7 +282,7 @@ template void SizeClassAllocatorMetadataStress() { - Allocator *a = new Allocator; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -370,7 +336,7 @@ template void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) { - Allocator *a = new Allocator; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -431,7 +397,6 @@ // to run them all at the same time. FIXME: Make them not flaky and reenable. #if !SANITIZER_WINDOWS -template struct AP64WithCallback { static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -439,14 +404,13 @@ typedef ::SizeClassMap SizeClassMap; typedef TestMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - typedef SizeClassAllocator64> Allocator64WithCallBack; - Allocator64WithCallBack *a = new Allocator64WithCallBack; + typedef SizeClassAllocator64 Allocator64WithCallBack; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state. SizeClassAllocatorLocalCache cache; @@ -466,15 +430,13 @@ #endif #endif -template struct AP32WithCallback { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = kAddressSpaceSize; static const uptr kMetadataSize = 16; typedef CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = ::kRegionSizeLog; - using AddressSpaceView = AddressSpaceViewTy; - using ByteMap = FlatByteMap; + using ByteMap = FlatByteMap; typedef TestMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; }; @@ -482,8 +444,8 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - typedef SizeClassAllocator32> Allocator32WithCallBack; - Allocator32WithCallBack *a = new Allocator32WithCallBack; + typedef SizeClassAllocator32 Allocator32WithCallBack; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); EXPECT_EQ(TestMapUnmapCallback::map_count, 0); SizeClassAllocatorLocalCache cache; @@ -504,7 +466,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator a; + TestAllocator> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -518,7 +480,7 @@ // machine to OOM. #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID TEST(SanitizerCommon, SizeClassAllocator64Overflow) { - Allocator64 a; + TestAllocator a; a.Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -542,7 +504,7 @@ #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator a; + TestAllocator> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -569,7 +531,7 @@ for (int i = 0; i < kNumAllocs; i++) { char *x = (char *)a.Allocate(&stats, size, 1); CHECK_GE(a.GetActuallyAllocatedSize(x), size); - uptr *meta = reinterpret_cast(a.GetMetaData(x)); + uptr *meta = reinterpret_cast(a.GetMetaData(x)); *meta = i; allocated[i] = x; } @@ -583,7 +545,7 @@ for (int i = 0; i < kNumAllocs; i++) { int idx = kNumAllocs - i - 1; char *p = allocated[idx]; - uptr *meta = reinterpret_cast(a.GetMetaData(p)); + uptr *meta = reinterpret_cast(a.GetMetaData(p)); CHECK_EQ(*meta, idx); CHECK(a.PointerIsMine(p)); a.Deallocate(&stats, p); @@ -625,7 +587,7 @@ typedef CombinedAllocator Allocator; - Allocator *a = new Allocator; + auto *a = new Allocator; a->Init(kReleaseToOSIntervalNever); std::mt19937 r; @@ -726,7 +688,7 @@ void TestSizeClassAllocatorLocalCache() { AllocatorCache cache; typedef typename AllocatorCache::Allocator Allocator; - Allocator *a = new Allocator(); + auto *a = new TestAllocator(); a->Init(kReleaseToOSIntervalNever); memset(&cache, 0, sizeof(cache)); @@ -802,7 +764,7 @@ TEST(SanitizerCommon, AllocatorLeakTest) { typedef AllocatorCache::Allocator Allocator; - Allocator a; + TestAllocator a; a.Init(kReleaseToOSIntervalNever); uptr total_used_memory = 0; for (int i = 0; i < 100; i++) { @@ -835,7 +797,7 @@ // The allocator cache is supposed to be POD and zero initialized. We should be // able to call Deallocate on a zeroed cache, and it will self-initialize. TEST(Allocator, AllocatorCacheDeallocNewThread) { - AllocatorCache::Allocator allocator; + TestAllocator allocator; allocator.Init(kReleaseToOSIntervalNever); AllocatorCache main_cache; AllocatorCache child_cache; @@ -906,7 +868,7 @@ template void TestSizeClassAllocatorIteration() { - Allocator *a = new Allocator; + auto *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -964,7 +926,7 @@ } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator a; + TestAllocator> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -991,7 +953,7 @@ } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator a; + TestAllocator> a; a.Init(); AllocatorStats stats; stats.Init(); @@ -1032,7 +994,6 @@ // machine to OOM. #if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap; -template struct AP64_SpecialSizeClassMap { static const uptr kSpaceBeg = kAllocatorSpace; static const uptr kSpaceSize = kAllocatorSize; @@ -1040,16 +1001,15 @@ typedef SpecialSizeClassMap SizeClassMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = AddressSpaceViewTy; }; // Regression test for out-of-memory condition in PopulateFreeList(). TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) { // In a world where regions are small and chunks are huge... - typedef SizeClassAllocator64> SpecialAllocator64; + typedef SizeClassAllocator64 SpecialAllocator64; const uptr kRegionSize = kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded; - SpecialAllocator64 *a = new SpecialAllocator64; + TestAllocator *a = new TestAllocator; a->Init(kReleaseToOSIntervalNever); SizeClassAllocatorLocalCache cache; memset(&cache, 0, sizeof(cache)); @@ -1381,10 +1341,7 @@ m.TestOnlyUnmap(); } -template -using TestByteMapASVT = - TwoLevelByteMap<1 << 12, 1 << 13, AddressSpaceView, TestMapUnmapCallback>; -using TestByteMap = TestByteMapASVT; +using TestByteMap = TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback>; struct TestByteMapParam { TestByteMap *m; Index: compiler-rt/lib/scudo/scudo_allocator.h =================================================================== --- compiler-rt/lib/scudo/scudo_allocator.h +++ compiler-rt/lib/scudo/scudo_allocator.h @@ -80,7 +80,6 @@ typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = SizeClassAllocator64FlagMasks::kRandomShuffleChunks; - using AddressSpaceView = LocalAddressSpaceView; }; typedef SizeClassAllocator64 PrimaryT; #else @@ -96,7 +95,6 @@ static const uptr kMetadataSize = 0; typedef __scudo::SizeClassMap SizeClassMap; static const uptr kRegionSizeLog = RegionSizeLog; - using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __scudo::ByteMap; typedef NoOpMapUnmapCallback MapUnmapCallback; static const uptr kFlags = Index: compiler-rt/lib/tsan/rtl/tsan_rtl.h =================================================================== --- compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -58,15 +58,14 @@ static const uptr kAllocatorRegionSizeLog = 20; static const uptr kAllocatorNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kAllocatorRegionSizeLog; -using ByteMap = TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12, - LocalAddressSpaceView, MapUnmapCallback>; +using ByteMap = + TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12, MapUnmapCallback>; struct AP32 { static const uptr kSpaceBeg = 0; static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kMetadataSize = 0; typedef __sanitizer::CompactSizeClassMap SizeClassMap; static const uptr kRegionSizeLog = kAllocatorRegionSizeLog; - using AddressSpaceView = LocalAddressSpaceView; using ByteMap = __tsan::ByteMap; typedef __tsan::MapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; @@ -80,7 +79,6 @@ typedef DefaultSizeClassMap SizeClassMap; typedef __tsan::MapUnmapCallback MapUnmapCallback; static const uptr kFlags = 0; - using AddressSpaceView = LocalAddressSpaceView; }; typedef SizeClassAllocator64 PrimaryAllocator; #endif