Index: lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary32.h +++ lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -24,7 +24,7 @@ // be returned by MmapOrDie(). // // Region: -// a result of an allocation of kRegionSize bytes aligned on kRegionSize. +// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). // Since the regions are aligned by kRegionSize, there are exactly // kNumPossibleRegions possible regions in the address space and so we keep // a ByteMap possible_regions to store the size classes of each Region. @@ -106,7 +106,6 @@ void Init(s32 release_to_os_interval_ms) { possible_regions.TestOnlyInit(); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); - num_stashed_regions = 0; } s32 ReleaseToOSIntervalMs() const { @@ -276,49 +275,15 @@ return mem & ~(kRegionSize - 1); } - // Allocates a region of kRegionSize bytes, aligned on kRegionSize. If we get - // more than one region back (in the event the allocation is aligned on the - // first try), attempt to store the second region into a stash. If the stash - // is full, just unmap the superfluous memory. - uptr AllocateRegionSlow(AllocatorStats *stat) { - uptr map_size = kRegionSize; - uptr padding_chunk; - uptr region = reinterpret_cast( - MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32", - &padding_chunk)); - if (padding_chunk) { - // We have an extra region, attempt to stash it. - CHECK_EQ(padding_chunk, region + kRegionSize); - bool trim_extra = true; - { - SpinMutexLock l(®ions_stash_mutex); - if (num_stashed_regions < kMaxStashedRegions) { - regions_stash[num_stashed_regions++] = padding_chunk; - map_size = 2 * kRegionSize; - trim_extra = false; - } - } - if (trim_extra) - UnmapOrDie((void*)padding_chunk, kRegionSize); - } - MapUnmapCallback().OnMap(region, map_size); - stat->Add(AllocatorStatMapped, map_size); - return region; - } - uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { CHECK_LT(class_id, kNumClasses); - uptr region = 0; - { - SpinMutexLock l(®ions_stash_mutex); - if (num_stashed_regions > 0) - region = regions_stash[--num_stashed_regions]; - } - if (!region) - region = AllocateRegionSlow(stat); - CHECK(IsAligned(region, kRegionSize)); - possible_regions.set(ComputeRegionId(region), static_cast(class_id)); - return region; + uptr res = reinterpret_cast(MmapAlignedOrDie(kRegionSize, kRegionSize, + "SizeClassAllocator32")); + MapUnmapCallback().OnMap(res, kRegionSize); + stat->Add(AllocatorStatMapped, kRegionSize); + CHECK_EQ(0U, (res & (kRegionSize - 1))); + possible_regions.set(ComputeRegionId(res), static_cast(class_id)); + return res; } SizeClassInfo *GetSizeClassInfo(uptr class_id) { @@ -351,13 +316,6 @@ } } - // Unless several threads request regions simultaneously from different size - // classes, the stash rarely contains more than 1 entry. - static const uptr kMaxStashedRegions = 8; - SpinMutex regions_stash_mutex; - uptr num_stashed_regions; - uptr regions_stash[kMaxStashedRegions]; - ByteMap possible_regions; SizeClassInfo size_class_info_array[kNumClasses]; }; Index: lib/sanitizer_common/sanitizer_common.h =================================================================== --- lib/sanitizer_common/sanitizer_common.h +++ lib/sanitizer_common/sanitizer_common.h @@ -92,15 +92,7 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. -// Since the predominant use case of this function is "size == alignment" and -// the nature of the way the alignment requirement is satisfied (by allocating -// size+alignment bytes of memory), there's a potential of address space -// fragmentation. The padding_chunk parameter provides the opportunity to -// return the contiguous padding of "size" bytes of the allocated chunk if the -// initial allocation happened to be perfectly aligned and the platform supports -// partial unmapping of the mapped region. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type, - uptr *padding_chunk); +void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an // unaccessible memory. bool MprotectNoAccess(uptr addr, uptr size); Index: lib/sanitizer_common/sanitizer_posix.cc =================================================================== --- lib/sanitizer_common/sanitizer_posix.cc +++ lib/sanitizer_common/sanitizer_posix.cc @@ -146,29 +146,22 @@ } // We want to map a chunk of address space aligned to 'alignment'. -// We do it by mapping a bit more and then unmapping redundant pieces. +// We do it by maping a bit more and then unmaping redundant pieces. // We probably can do it with fewer syscalls in some OS-dependent way. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type, - uptr* padding_chunk) { +void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; uptr map_res = (uptr)MmapOrDie(map_size, mem_type); uptr map_end = map_res + map_size; - bool is_aligned = IsAligned(map_res, alignment); - if (is_aligned && padding_chunk && size == alignment) { - *padding_chunk = map_res + size; - return (void *)map_res; - } - if (padding_chunk) - *padding_chunk = 0; uptr res = map_res; - if (!is_aligned) { - res = (map_res + alignment - 1) & ~(alignment - 1); - UnmapOrDie((void*)map_res, res - map_res); - } + if (res & (alignment - 1)) // Not aligned. + res = (map_res + alignment) & ~(alignment - 1); uptr end = res + size; - UnmapOrDie((void*)end, map_end - end); + if (res != map_res) + UnmapOrDie((void*)map_res, res - map_res); + if (end != map_end) + UnmapOrDie((void*)end, map_end - end); return (void*)res; } Index: lib/sanitizer_common/sanitizer_win.cc =================================================================== --- lib/sanitizer_common/sanitizer_win.cc +++ lib/sanitizer_common/sanitizer_win.cc @@ -132,14 +132,10 @@ } // We want to map a chunk of address space aligned to 'alignment'. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type, - uptr *padding_chunk) { +void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); - if (padding_chunk) - *padding_chunk = 0; - // Windows will align our allocations to at least 64K. alignment = Max(alignment, GetMmapGranularity()); Index: lib/sanitizer_common/tests/sanitizer_common_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_common_test.cc +++ lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -77,8 +77,8 @@ for (uptr size = 1; size <= 32; size *= 2) { for (uptr alignment = 1; alignment <= 32; alignment *= 2) { for (int iter = 0; iter < 100; iter++) { - uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize, - "MmapAlignedOrDieTest", nullptr); + uptr res = (uptr)MmapAlignedOrDie( + size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest"); EXPECT_EQ(0U, res % (alignment * PageSize)); internal_memset((void*)res, 1, size * PageSize); UnmapOrDie((void*)res, size * PageSize); @@ -87,37 +87,6 @@ } } -TEST(SanitizerCommon, MmapAlignedOrDiePaddingChunk) { - uptr PageSize = GetPageSizeCached(); - for (uptr size = 1; size <= 32; size *= 2) { - for (uptr alignment = 1; alignment <= 32; alignment *= 2) { - for (int iter = 0; iter < 100; iter++) { - uptr padding_chunk; - uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize, - "MmapAlignedOrDiePaddingChunkTest", &padding_chunk); - EXPECT_EQ(0U, res % (alignment * PageSize)); - internal_memset((void*)res, 1, size * PageSize); - UnmapOrDie((void*)res, size * PageSize); - if (SANITIZER_WINDOWS || (size != alignment)) { - // Not supported on Windows or for different size and alignment. - EXPECT_EQ(0U, padding_chunk); - continue; - } - if (size == 1 && alignment == 1) { - // mmap returns PageSize aligned chunks, so this is a specific case - // where we can check that padding_chunk will never be 0. - EXPECT_NE(0U, padding_chunk); - } - if (padding_chunk) { - EXPECT_EQ(res + size * PageSize, padding_chunk); - internal_memset((void*)padding_chunk, 1, alignment * PageSize); - UnmapOrDie((void*)padding_chunk, alignment * PageSize); - } - } - } - } -} - #if SANITIZER_LINUX TEST(SanitizerCommon, SanitizerSetThreadName) { const char *names[] = {