Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -82,7 +82,7 @@ static const uptr kFlags = SizeClassAllocator64FlagMasks::kRandomShuffleChunks; }; -typedef SizeClassAllocator64 PrimaryAllocator; +typedef SizeClassAllocator64 PrimaryT; #else static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; # if SANITIZER_WORDSIZE == 32 @@ -102,16 +102,15 @@ SizeClassAllocator32FlagMasks::kRandomShuffleChunks | SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch; }; -typedef SizeClassAllocator32 PrimaryAllocator; +typedef SizeClassAllocator32 PrimaryT; #endif // SANITIZER_CAN_USE_ALLOCATOR64 #include "scudo_allocator_secondary.h" #include "scudo_allocator_combined.h" -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef ScudoCombinedAllocator ScudoBackendAllocator; +typedef SizeClassAllocatorLocalCache AllocatorCacheT; +typedef LargeMmapAllocator SecondaryT; +typedef CombinedAllocator BackendT; void initScudo(); Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -62,7 +62,7 @@ #endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32) } -static ScudoBackendAllocator &getBackendAllocator(); +static BackendT &getBackend(); namespace Chunk { static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { @@ -92,9 +92,9 @@ static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { const uptr ClassId = Header->ClassId; if (ClassId) - return PrimaryAllocator::ClassIdToSize(ClassId) - getHeaderSize() - + return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - (Header->Offset << MinAlignmentLog); - return SecondaryAllocator::GetActuallyAllocatedSize( + return SecondaryT::GetActuallyAllocatedSize( getBackendPtr(Ptr, Header)) - getHeaderSize(); } @@ -103,7 +103,7 @@ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; if (Header->ClassId) return SizeOrUnusedBytes; - return SecondaryAllocator::GetActuallyAllocatedSize( + return SecondaryT::GetActuallyAllocatedSize( getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes; } @@ -175,7 +175,7 @@ } // namespace Chunk struct QuarantineCallback { - explicit QuarantineCallback(AllocatorCache *Cache) + explicit QuarantineCallback(AllocatorCacheT *Cache) : Cache_(Cache) {} // Chunk recycling function, returns a quarantined chunk to the backend, @@ -188,10 +188,9 @@ Chunk::eraseHeader(Ptr); void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header); if (Header.ClassId) - getBackendAllocator().deallocatePrimary(Cache_, BackendPtr, - Header.ClassId); + getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId); else - getBackendAllocator().deallocateSecondary(BackendPtr); + getBackend().deallocateSecondary(BackendPtr); } // Internal quarantine allocation and deallocation functions. We first check @@ -199,34 +198,33 @@ // TODO(kostyak): figure out the best way to protect the batches. void *Allocate(uptr Size) { const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); - return getBackendAllocator().allocatePrimary(Cache_, BatchClassId); + return getBackend().allocatePrimary(Cache_, BatchClassId); } void Deallocate(void *Ptr) { const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch)); - getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId); + getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId); } - AllocatorCache *Cache_; + AllocatorCacheT *Cache_; COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); }; -typedef Quarantine ScudoQuarantine; -typedef ScudoQuarantine::Cache ScudoQuarantineCache; -COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= +typedef Quarantine QuarantineT; +typedef QuarantineT::Cache QuarantineCacheT; +COMPILER_CHECK(sizeof(QuarantineCacheT) <= sizeof(ScudoTSD::QuarantineCachePlaceHolder)); -ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) { - return reinterpret_cast( - TSD->QuarantineCachePlaceHolder); +QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) { + return reinterpret_cast(TSD->QuarantineCachePlaceHolder); } -struct ScudoAllocator { +struct Allocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); - ScudoBackendAllocator BackendAllocator; - ScudoQuarantine AllocatorQuarantine; + BackendT Backend; + QuarantineT Quarantine; u32 QuarantineChunksUpToSize; @@ -240,8 +238,8 @@ atomic_uint8_t RssLimitExceeded; atomic_uint64_t RssLastCheckedAtNS; - explicit ScudoAllocator(LinkerInitialized) - : AllocatorQuarantine(LINKER_INITIALIZED) {} + explicit Allocator(LinkerInitialized) + : Quarantine(LINKER_INITIALIZED) {} NOINLINE void performSanityChecks(); @@ -260,10 +258,10 @@ atomic_store_relaxed(&HashAlgorithm, CRC32Hardware); SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms); + Backend.init(common_flags()->allocator_release_to_os_interval_ms); HardRssLimitMb = common_flags()->hard_rss_limit_mb; SoftRssLimitMb = common_flags()->soft_rss_limit_mb; - AllocatorQuarantine.Init( + Quarantine.Init( static_cast(getFlags()->QuarantineSizeKb) << 10, static_cast(getFlags()->ThreadLocalQuarantineSizeKb) << 10); QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize; @@ -329,18 +327,18 @@ void *BackendPtr; uptr BackendSize; u8 ClassId; - if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) { + if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) { BackendSize = AlignedSize; ClassId = SizeClassMap::ClassID(BackendSize); bool UnlockRequired; ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); - BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId); + BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId); if (UnlockRequired) TSD->unlock(); } else { BackendSize = NeededSize; ClassId = 0; - BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment); + BackendPtr = Backend.allocateSecondary(BackendSize, Alignment); } if (UNLIKELY(!BackendPtr)) { SetAllocatorOutOfMemory(); @@ -351,7 +349,7 @@ // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && ClassId) - memset(BackendPtr, 0, PrimaryAllocator::ClassIdToSize(ClassId)); + memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId)); UnpackedHeader Header = {}; uptr UserPtr = reinterpret_cast(BackendPtr) + Chunk::getHeaderSize(); @@ -391,7 +389,7 @@ // quarantine chunk size threshold. void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size) { - const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) || + const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) || (Size > QuarantineChunksUpToSize); if (BypassQuarantine) { Chunk::eraseHeader(Ptr); @@ -399,12 +397,12 @@ if (Header->ClassId) { bool UnlockRequired; ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); - getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr, - Header->ClassId); + getBackend().deallocatePrimary(&TSD->Cache, BackendPtr, + Header->ClassId); if (UnlockRequired) TSD->unlock(); } else { - getBackendAllocator().deallocateSecondary(BackendPtr); + getBackend().deallocateSecondary(BackendPtr); } } else { // If a small memory amount was allocated with a larger alignment, we want @@ -418,9 +416,8 @@ Chunk::compareExchangeHeader(Ptr, &NewHeader, Header); bool UnlockRequired; ScudoTSD *TSD = getTSDAndLock(&UnlockRequired); - AllocatorQuarantine.Put(getQuarantineCache(TSD), - QuarantineCallback(&TSD->Cache), Ptr, - EstimatedSize); + Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache), + Ptr, EstimatedSize); if (UnlockRequired) TSD->unlock(); } @@ -530,15 +527,14 @@ } void commitBack(ScudoTSD *TSD) { - AllocatorQuarantine.Drain(getQuarantineCache(TSD), - QuarantineCallback(&TSD->Cache)); - BackendAllocator.destroyCache(&TSD->Cache); + Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache)); + Backend.destroyCache(&TSD->Cache); } uptr getStats(AllocatorStat StatType) { initThreadMaybe(); uptr stats[AllocatorStatCount]; - BackendAllocator.getStats(stats); + Backend.getStats(stats); return stats[StatType]; } @@ -557,11 +553,11 @@ void printStats() { initThreadMaybe(); - BackendAllocator.printStats(); + Backend.printStats(); } }; -NOINLINE void ScudoAllocator::performSanityChecks() { +NOINLINE void Allocator::performSanityChecks() { // Verify that the header offset field can hold the maximum offset. In the // case of the Secondary allocator, it takes care of alignment and the // offset will always be 0. In the case of the Primary, the worst case @@ -596,7 +592,7 @@ // Opportunistic RSS limit check. This will update the RSS limit status, if // it can, every 100ms, otherwise it will just return the current one. -NOINLINE bool ScudoAllocator::isRssLimitExceeded() { +NOINLINE bool Allocator::isRssLimitExceeded() { u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS); const u64 CurrentCheck = MonotonicNanoTime(); if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL))) @@ -626,10 +622,10 @@ return atomic_load_relaxed(&RssLimitExceeded); } -static ScudoAllocator Instance(LINKER_INITIALIZED); +static Allocator Instance(LINKER_INITIALIZED); -static ScudoBackendAllocator &getBackendAllocator() { - return Instance.BackendAllocator; +static BackendT &getBackend() { + return Instance.Backend; } void initScudo() { @@ -637,7 +633,7 @@ } void ScudoTSD::init() { - getBackendAllocator().initCache(&Cache); + getBackend().initCache(&Cache); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); } Index: lib/scudo/scudo_allocator_combined.h =================================================================== --- lib/scudo/scudo_allocator_combined.h +++ lib/scudo/scudo_allocator_combined.h @@ -16,12 +16,12 @@ #define SCUDO_ALLOCATOR_COMBINED_H_ #ifndef SCUDO_ALLOCATOR_H_ -#error "This file must be included inside scudo_allocator.h." +# error "This file must be included inside scudo_allocator.h." #endif template -class ScudoCombinedAllocator { +class CombinedAllocator { public: void init(s32 ReleaseToOSIntervalMs) { Primary.Init(ReleaseToOSIntervalMs); Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -66,7 +66,7 @@ } } // namespace LargeChunk -class ScudoLargeMmapAllocator { +class LargeMmapAllocator { public: void Init() { internal_memset(this, 0, sizeof(*this)); Index: lib/scudo/scudo_tsd.h =================================================================== --- lib/scudo/scudo_tsd.h +++ lib/scudo/scudo_tsd.h @@ -24,7 +24,7 @@ namespace __scudo { struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD { - AllocatorCache Cache; + AllocatorCacheT Cache; uptr QuarantineCachePlaceHolder[4]; void init();