diff --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt --- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt +++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt @@ -77,7 +77,6 @@ fuchsia.cpp linux.cpp report.cpp - secondary.cpp string_utils.cpp ) diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h --- a/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -14,6 +14,7 @@ #include "flags.h" #include "primary32.h" #include "primary64.h" +#include "secondary.h" #include "size_class_map.h" #include "tsd_exclusive.h" #include "tsd_shared.h" @@ -31,6 +32,7 @@ // 512KB regions typedef SizeClassAllocator32 Primary; #endif + typedef MapAllocator<> Secondary; template using TSDRegistryT = TSDRegistryExT; // Exclusive }; @@ -43,6 +45,7 @@ // 512KB regions typedef SizeClassAllocator32 Primary; #endif + typedef MapAllocator<> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, max 2 TSDs. }; @@ -56,6 +59,7 @@ // 64KB regions typedef SizeClassAllocator32 Primary; #endif + typedef MapAllocator<0U> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, only 1 TSD. }; @@ -63,6 +67,7 @@ struct FuchsiaConfig { // 1GB Regions typedef SizeClassAllocator64 Primary; + typedef MapAllocator<> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, max 8 TSDs. }; diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -449,7 +449,7 @@ } private: - typedef MapAllocator SecondaryT; + using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG; diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -48,7 +48,7 @@ } // namespace LargeBlock -class MapAllocator { +template class MapAllocator { public: void initLinkerInitialized(GlobalStats *S) { Stats.initLinkerInitialized(); @@ -83,9 +83,13 @@ Callback(reinterpret_cast(&H) + LargeBlock::getHeaderSize()); } + static uptr getMaxFreeListSize(void) { return MaxFreeListSize; } + private: HybridMutex Mutex; DoublyLinkedList InUseBlocks; + // The free list is sorted based on the committed size of blocks. + DoublyLinkedList FreeBlocks; uptr AllocatedBytes; uptr FreedBytes; uptr LargestSize; @@ -94,6 +98,154 @@ LocalStats Stats; }; +// As with the Primary, the size passed to this function includes any desired +// alignment, so that the frontend can align the user allocation. The hint +// parameter allows us to unmap spurious memory when dealing with larger +// (greater than a page) alignments on 32-bit platforms. +// Due to the sparsity of address space available on those platforms, requesting +// an allocation from the Secondary with a large alignment would end up wasting +// VA space (even though we are not committing the whole thing), hence the need +// to trim off some of the reserved space. +// For allocations requested with an alignment greater than or equal to a page, +// the committed memory will amount to something close to Size - AlignmentHint +// (pending rounding and headers). +template +void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, + uptr *BlockEnd) { + DCHECK_GT(Size, AlignmentHint); + const uptr PageSize = getPageSizeCached(); + const uptr RoundedSize = + roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize); + + if (MaxFreeListSize && AlignmentHint < PageSize) { + ScopedLock L(Mutex); + for (auto &H : FreeBlocks) { + const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast(&H); + if (FreeBlockSize < RoundedSize) + continue; + // Candidate free block should only be at most 4 pages larger. + if (FreeBlockSize > RoundedSize + 4 * PageSize) + break; + FreeBlocks.remove(&H); + InUseBlocks.push_back(&H); + AllocatedBytes += FreeBlockSize; + NumberOfAllocs++; + Stats.add(StatAllocated, FreeBlockSize); + if (BlockEnd) + *BlockEnd = H.BlockEnd; + return reinterpret_cast(reinterpret_cast(&H) + + LargeBlock::getHeaderSize()); + } + } + + MapPlatformData Data = {}; + const uptr MapSize = RoundedSize + 2 * PageSize; + uptr MapBase = + reinterpret_cast(map(nullptr, MapSize, "scudo:secondary", + MAP_NOACCESS | MAP_ALLOWNOMEM, &Data)); + if (UNLIKELY(!MapBase)) + return nullptr; + uptr CommitBase = MapBase + PageSize; + uptr MapEnd = MapBase + MapSize; + + // In the unlikely event of alignments larger than a page, adjust the amount + // of memory we want to commit, and trim the extra memory. + if (UNLIKELY(AlignmentHint >= PageSize)) { + // For alignments greater than or equal to a page, the user pointer (eg: the + // pointer that is returned by the C or C++ allocation APIs) ends up on a + // page boundary , and our headers will live in the preceding page. + CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize; + const uptr NewMapBase = CommitBase - PageSize; + DCHECK_GE(NewMapBase, MapBase); + // We only trim the extra memory on 32-bit platforms: 64-bit platforms + // are less constrained memory wise, and that saves us two syscalls. + if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { + unmap(reinterpret_cast(MapBase), NewMapBase - MapBase, 0, &Data); + MapBase = NewMapBase; + } + const uptr NewMapEnd = CommitBase + PageSize + + roundUpTo((Size - AlignmentHint), PageSize) + + PageSize; + DCHECK_LE(NewMapEnd, MapEnd); + if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { + unmap(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd, 0, &Data); + MapEnd = NewMapEnd; + } + } + + const uptr CommitSize = MapEnd - PageSize - CommitBase; + const uptr Ptr = + reinterpret_cast(map(reinterpret_cast(CommitBase), + CommitSize, "scudo:secondary", 0, &Data)); + LargeBlock::Header *H = reinterpret_cast(Ptr); + H->MapBase = MapBase; + H->MapSize = MapEnd - MapBase; + H->BlockEnd = CommitBase + CommitSize; + H->Data = Data; + { + ScopedLock L(Mutex); + InUseBlocks.push_back(H); + AllocatedBytes += CommitSize; + if (LargestSize < CommitSize) + LargestSize = CommitSize; + NumberOfAllocs++; + Stats.add(StatAllocated, CommitSize); + Stats.add(StatMapped, H->MapSize); + } + if (BlockEnd) + *BlockEnd = CommitBase + CommitSize; + return reinterpret_cast(Ptr + LargeBlock::getHeaderSize()); +} + +template +void MapAllocator::deallocate(void *Ptr) { + LargeBlock::Header *H = LargeBlock::getHeader(Ptr); + { + ScopedLock L(Mutex); + InUseBlocks.remove(H); + const uptr CommitSize = H->BlockEnd - reinterpret_cast(H); + FreedBytes += CommitSize; + NumberOfFrees++; + Stats.sub(StatAllocated, CommitSize); + if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) { + bool Inserted = false; + for (auto &F : FreeBlocks) { + const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast(&F); + if (FreeBlockSize >= CommitSize) { + FreeBlocks.insert(H, &F); + Inserted = true; + break; + } + } + if (!Inserted) + FreeBlocks.push_back(H); + const uptr From = + roundUpTo(reinterpret_cast(H) + LargeBlock::getHeaderSize(), + getPageSizeCached()); + MapPlatformData Data = H->Data; + // TODO(kostyak): use release_to_os_interval_ms + releasePagesToOS(H->MapBase, From - H->MapBase, H->BlockEnd - From, + &Data); + return; + } + Stats.sub(StatMapped, H->MapSize); + } + void *Addr = reinterpret_cast(H->MapBase); + const uptr Size = H->MapSize; + MapPlatformData Data = H->Data; + unmap(Addr, Size, UNMAP_ALL, &Data); +} + +template +void MapAllocator::getStats(ScopedString *Str) const { + Str->append( + "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times " + "(%zuK), remains %zu (%zuK) max %zuM\n", + NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10, + NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10, + LargestSize >> 20); +} + } // namespace scudo #endif // SCUDO_SECONDARY_H_ diff --git a/compiler-rt/lib/scudo/standalone/secondary.cpp b/compiler-rt/lib/scudo/standalone/secondary.cpp deleted file mode 100644 --- a/compiler-rt/lib/scudo/standalone/secondary.cpp +++ /dev/null @@ -1,116 +0,0 @@ -//===-- secondary.cpp -------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "secondary.h" - -#include "string_utils.h" - -namespace scudo { - -// As with the Primary, the size passed to this function includes any desired -// alignment, so that the frontend can align the user allocation. The hint -// parameter allows us to unmap spurious memory when dealing with larger -// (greater than a page) alignments on 32-bit platforms. -// Due to the sparsity of address space available on those platforms, requesting -// an allocation from the Secondary with a large alignment would end up wasting -// VA space (even though we are not committing the whole thing), hence the need -// to trim off some of the reserved space. -// For allocations requested with an alignment greater than or equal to a page, -// the committed memory will amount to something close to Size - AlignmentHint -// (pending rounding and headers). -void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) { - DCHECK_GT(Size, AlignmentHint); - const uptr PageSize = getPageSizeCached(); - const uptr MapSize = - roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize; - MapPlatformData Data = {}; - uptr MapBase = - reinterpret_cast(map(nullptr, MapSize, "scudo:secondary", - MAP_NOACCESS | MAP_ALLOWNOMEM, &Data)); - if (UNLIKELY(!MapBase)) - return nullptr; - uptr CommitBase = MapBase + PageSize; - uptr MapEnd = MapBase + MapSize; - - // In the unlikely event of alignments larger than a page, adjust the amount - // of memory we want to commit, and trim the extra memory. - if (UNLIKELY(AlignmentHint >= PageSize)) { - // For alignments greater than or equal to a page, the user pointer (eg: the - // pointer that is returned by the C or C++ allocation APIs) ends up on a - // page boundary , and our headers will live in the preceding page. - CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize; - const uptr NewMapBase = CommitBase - PageSize; - DCHECK_GE(NewMapBase, MapBase); - // We only trim the extra memory on 32-bit platforms: 64-bit platforms - // are less constrained memory wise, and that saves us two syscalls. - if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { - unmap(reinterpret_cast(MapBase), NewMapBase - MapBase, 0, &Data); - MapBase = NewMapBase; - } - const uptr NewMapEnd = CommitBase + PageSize + - roundUpTo((Size - AlignmentHint), PageSize) + - PageSize; - DCHECK_LE(NewMapEnd, MapEnd); - if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { - unmap(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd, 0, &Data); - MapEnd = NewMapEnd; - } - } - - const uptr CommitSize = MapEnd - PageSize - CommitBase; - const uptr Ptr = - reinterpret_cast(map(reinterpret_cast(CommitBase), - CommitSize, "scudo:secondary", 0, &Data)); - LargeBlock::Header *H = reinterpret_cast(Ptr); - H->MapBase = MapBase; - H->MapSize = MapEnd - MapBase; - H->BlockEnd = CommitBase + CommitSize; - H->Data = Data; - { - ScopedLock L(Mutex); - InUseBlocks.push_back(H); - AllocatedBytes += CommitSize; - if (LargestSize < CommitSize) - LargestSize = CommitSize; - NumberOfAllocs++; - Stats.add(StatAllocated, CommitSize); - Stats.add(StatMapped, H->MapSize); - } - if (BlockEnd) - *BlockEnd = CommitBase + CommitSize; - return reinterpret_cast(Ptr + LargeBlock::getHeaderSize()); -} - -void MapAllocator::deallocate(void *Ptr) { - LargeBlock::Header *H = LargeBlock::getHeader(Ptr); - { - ScopedLock L(Mutex); - InUseBlocks.remove(H); - const uptr CommitSize = H->BlockEnd - reinterpret_cast(H); - FreedBytes += CommitSize; - NumberOfFrees++; - Stats.sub(StatAllocated, CommitSize); - Stats.sub(StatMapped, H->MapSize); - } - void *Addr = reinterpret_cast(H->MapBase); - const uptr Size = H->MapSize; - MapPlatformData Data; - Data = H->Data; - unmap(Addr, Size, UNMAP_ALL, &Data); -} - -void MapAllocator::getStats(ScopedString *Str) const { - Str->append( - "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times " - "(%zuK), remains %zu (%zuK) max %zuM\n", - NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10, - NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10, - LargestSize >> 20); -} - -} // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -223,6 +223,7 @@ // Tiny allocator, its Primary only serves chunks of 1024 bytes. using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>; typedef scudo::SizeClassAllocator32 Primary; + typedef scudo::MapAllocator<0U> Secondary; template using TSDRegistryT = scudo::TSDRegistrySharedT; }; diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -16,18 +16,20 @@ #include #include -TEST(ScudoSecondaryTest, SecondaryBasic) { +template static void testSecondaryBasic(void) { scudo::GlobalStats S; S.init(); - scudo::MapAllocator *L = new scudo::MapAllocator; + SecondaryT *L = new SecondaryT; L->init(&S); const scudo::uptr Size = 1U << 16; void *P = L->allocate(Size); EXPECT_NE(P, nullptr); memset(P, 'A', Size); - EXPECT_GE(scudo::MapAllocator::getBlockSize(P), Size); + EXPECT_GE(SecondaryT::getBlockSize(P), Size); L->deallocate(P); - EXPECT_DEATH(memset(P, 'A', Size), ""); + // If we are not using a free list, blocks are unmapped on deallocation. + if (SecondaryT::getMaxFreeListSize() == 0U) + EXPECT_DEATH(memset(P, 'A', Size), ""); const scudo::uptr Align = 1U << 16; P = L->allocate(Size + Align, Align); @@ -50,13 +52,21 @@ Str.output(); } +TEST(ScudoSecondaryTest, SecondaryBasic) { + testSecondaryBasic>(); + testSecondaryBasic>(); + testSecondaryBasic>(); +} + +using LargeAllocator = scudo::MapAllocator<>; + // This exercises a variety of combinations of size and alignment for the // MapAllocator. The size computation done here mimic the ones done by the // combined allocator. TEST(ScudoSecondaryTest, SecondaryCombinations) { constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16); constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign); - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) { for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16; @@ -84,7 +94,7 @@ } TEST(ScudoSecondaryTest, SecondaryIterate) { - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); std::vector V; const scudo::uptr PageSize = scudo::getPageSizeCached(); @@ -110,7 +120,7 @@ static std::condition_variable Cv; static bool Ready = false; -static void performAllocations(scudo::MapAllocator *L) { +static void performAllocations(LargeAllocator *L) { std::vector V; const scudo::uptr PageSize = scudo::getPageSizeCached(); { @@ -127,7 +137,7 @@ } TEST(ScudoSecondaryTest, SecondaryThreadsRace) { - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); std::thread Threads[10]; for (scudo::uptr I = 0; I < 10U; I++)