Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -107,11 +107,12 @@ #endif // SANITIZER_CAN_USE_ALLOCATOR64 #include "scudo_allocator_secondary.h" +#include "scudo_allocator_combined.h" typedef SizeClassAllocatorLocalCache AllocatorCache; typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef CombinedAllocator - ScudoBackendAllocator; +typedef ScudoCombinedAllocator ScudoBackendAllocator; void initScudo(); Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -73,8 +73,9 @@ // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { - uptr Size = getBackendAllocator().GetActuallyAllocatedSize( - getAllocBeg(Header)); + uptr Size = + getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header), + Header->FromPrimary); if (Size == 0) return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); @@ -221,7 +222,8 @@ explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} - // Chunk recycling function, returns a quarantined chunk to the backend. + // Chunk recycling function, returns a quarantined chunk to the backend, + // first making sure it hasn't been tampered with. void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); @@ -231,17 +233,19 @@ } Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); - getBackendAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary); } - // Internal quarantine allocation and deallocation functions. + // Internal quarantine allocation and deallocation functions. We first check + // that the batches are indeed serviced by the Primary. + // TODO(kostyak): figure out the best way to protect the batches. + COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize); void *Allocate(uptr Size) { - // TODO(kostyak): figure out the best way to protect the batches. - return getBackendAllocator().Allocate(Cache_, Size, MinAlignment); + return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true); } void Deallocate(void *Ptr) { - getBackendAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr, true); } AllocatorCache *Cache_; @@ -359,54 +363,48 @@ Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; - if (Alignment > MinAlignment) - NeededSize += Alignment; - if (NeededSize >= MaxAllowedMallocSize) + uptr AlignedSize = (Alignment > MinAlignment) ? + NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; + if (AlignedSize >= MaxAllowedMallocSize) return BackendAllocator.ReturnNullOrDieOnBadRequest(); - // Primary backed and Secondary backed allocations have a different - // treatment. We deal with alignment requirements of Primary serviced - // allocations here, but the Secondary will take care of its own alignment - // needs, which means we also have to work around some limitations of the - // combined allocator to accommodate the situation. - bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); + // Primary and Secondary backed allocations have a different treatment. We + // deal with alignment requirements of Primary serviced allocations here, + // but the Secondary will take care of its own alignment needs. + bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment); void *Ptr; uptr Salt; + uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize; uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { Salt = getPrng(ThreadContext)->getNext(); Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), - NeededSize, AllocationAlignment); + AllocationSize, AllocationAlignment, + FromPrimary); ThreadContext->unlock(); } else { SpinMutexLock l(&FallbackMutex); Salt = FallbackPrng.getNext(); - Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, - AllocationAlignment); + Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, + AllocationAlignment, FromPrimary); } if (!Ptr) return BackendAllocator.ReturnNullOrDieOnOOM(); - uptr AllocBeg = reinterpret_cast(Ptr); - // If the allocation was serviced by the secondary, the returned pointer - // accounts for ChunkHeaderSize to pass the alignment check of the combined - // allocator. Adjust it here. - if (!FromPrimary) { - AllocBeg -= AlignedChunkHeaderSize; - if (Alignment > MinAlignment) - NeededSize -= Alignment; - } - // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) - memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr)); + memset(Ptr, 0, + BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary)); + uptr AllocBeg = reinterpret_cast(Ptr); uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; - if (!IsAligned(UserBeg, Alignment)) + if (!IsAligned(UserBeg, Alignment)) { + CHECK(FromPrimary); UserBeg = RoundUpTo(UserBeg, Alignment); - CHECK_LE(UserBeg + Size, AllocBeg + NeededSize); + } + CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize); UnpackedHeader Header = {}; Header.State = ChunkAllocated; uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; @@ -437,17 +435,20 @@ // with no additional security value. void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, uptr Size) { + bool FromPrimary = Header->FromPrimary; bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); ScudoThreadContext *ThreadContext = getThreadContextAndLock(); if (LIKELY(ThreadContext)) { - getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr); + getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr, + FromPrimary); ThreadContext->unlock(); } else { SpinMutexLock Lock(&FallbackMutex); - getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr); + getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr, + FromPrimary); } } else { UnpackedHeader NewHeader = *Header; Index: lib/scudo/scudo_allocator_combined.h =================================================================== --- /dev/null +++ lib/scudo/scudo_allocator_combined.h @@ -0,0 +1,157 @@ +//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo Combined Allocator, dispatches allocation & deallocation requests to +/// the Primary or the Secondary backend allocators. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ALLOCATOR_COMBINED_H_ +#define SCUDO_ALLOCATOR_COMBINED_H_ + +#ifndef SCUDO_ALLOCATOR_H_ +#error "This file must be included inside scudo_allocator.h." +#endif + +template +class ScudoCombinedAllocator { + public: + void InitLinkerInitialized(bool may_return_null, s32 ReleaseToOSIntervalMs) { + UNIMPLEMENTED(); + } + + void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) { + Primary.Init(ReleaseToOSIntervalMs); + Secondary.Init(AllocatorMayReturnNull); + Stats.Init(); + atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull); + } + + void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment, + bool FromPrimary) { + if (FromPrimary) + return Cache->Allocate(&Primary, Primary.ClassID(Size)); + return Secondary.Allocate(&Stats, Size, Alignment); + } + + void *ReturnNullOrDieOnBadRequest() { + if (atomic_load_relaxed(&MayReturnNull)) + return nullptr; + ReportAllocatorCannotReturnNull(false); + } + + void *ReturnNullOrDieOnOOM() { + if (atomic_load_relaxed(&MayReturnNull)) + return nullptr; + ReportAllocatorCannotReturnNull(true); + } + + void SetMayReturnNull(bool AllocatorMayReturnNull) { + UNIMPLEMENTED(); + } + + s32 ReleaseToOSIntervalMs() const { + return Primary.ReleaseToOSIntervalMs(); + } + + void SetReleaseToOSIntervalMs(s32 ReleaseToOSIntervalMs) { + UNIMPLEMENTED(); + } + + void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) { + UNIMPLEMENTED(); + } + + void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) { + if (FromPrimary) + Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr); + else + Secondary.Deallocate(&Stats, Ptr); + } + + void *Reallocate(AllocatorCache *Cache, void *Ptr, uptr NewSize, + uptr Alignment) { + UNIMPLEMENTED(); + } + + bool PointerIsMine(void *Ptr) { + UNIMPLEMENTED(); + } + + bool FromPrimary(void *Ptr) { + return Primary.PointerIsMine(Ptr); + } + + void *GetMetaData(const void *Ptr) { + UNIMPLEMENTED(); + } + + void *GetBlockBegin(const void *Ptr) { + UNIMPLEMENTED(); + } + + void *GetBlockBeginFastLocked(void *Ptr) { + UNIMPLEMENTED(); + } + + uptr GetActuallyAllocatedSize(void *Ptr, bool FromPrimary) { + if (FromPrimary) + return Primary.GetActuallyAllocatedSize(Ptr); + return Secondary.GetActuallyAllocatedSize(Ptr); + } + + uptr TotalMemoryUsed() { + UNIMPLEMENTED(); + } + + void TestOnlyUnmap() { + UNIMPLEMENTED(); + } + + void InitCache(AllocatorCache *Cache) { + Cache->Init(&Stats); + } + + void DestroyCache(AllocatorCache *Cache) { + Cache->Destroy(&Primary, &Stats); + } + + void SwallowCache(AllocatorCache *Cache) { + Cache->Drain(&Primary); + } + + void GetStats(AllocatorStatCounters StatType) const { + Stats.Get(StatType); + } + + void PrintStats() { + UNIMPLEMENTED(); + } + + void ForceLock() { + UNIMPLEMENTED(); + } + + void ForceUnlock() { + UNIMPLEMENTED(); + } + + void ForEachChunk(ForEachChunkCallback Callback, void *Arg) { + UNIMPLEMENTED(); + } + + private: + PrimaryAllocator Primary; + SecondaryAllocator Secondary; + AllocatorGlobalStats Stats; + atomic_uint8_t MayReturnNull; +}; + +#endif // SCUDO_ALLOCATOR_COMBINED_H_ Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -26,20 +26,18 @@ void Init(bool AllocatorMayReturnNull) { PageSize = GetPageSizeCached(); - atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed); + atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull); } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. uptr MapSize = Size + SecondaryHeaderSize; + if (Alignment > MinAlignment) + MapSize += Alignment; MapSize = RoundUpTo(MapSize, PageSize); // Account for 2 guard pages, one before and one after the chunk. MapSize += 2 * PageSize; - // The size passed to the Secondary comprises the alignment, if large - // enough. Subtract it here to get the requested size, including header. - if (Alignment > MinAlignment) - Size -= Alignment; uptr MapBeg = reinterpret_cast(MmapNoAccess(MapSize)); if (MapBeg == ~static_cast(0)) @@ -56,24 +54,25 @@ // area better and unmap extraneous memory. This will also ensure that the // offset and unused bytes field of the header stay small. if (Alignment > MinAlignment) { - if (UserBeg & (Alignment - 1)) - UserBeg += Alignment - (UserBeg & (Alignment - 1)); - CHECK_GE(UserBeg, MapBeg); - uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize; - CHECK_GE(NewMapBeg, MapBeg); + if (!IsAligned(UserBeg, Alignment)) { + UserBeg = (UserBeg + Alignment) & ~(Alignment - 1); + CHECK_GE(UserBeg, MapBeg); + uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - + PageSize; + CHECK_GE(NewMapBeg, MapBeg); + if (NewMapBeg != MapBeg) { + UnmapOrDie(reinterpret_cast(MapBeg), NewMapBeg - MapBeg); + MapBeg = NewMapBeg; + } + } uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize), PageSize) + PageSize; CHECK_LE(NewMapEnd, MapEnd); - // Unmap the extra memory if it's large enough, on both sides. - uptr Diff = NewMapBeg - MapBeg; - if (Diff > PageSize) - UnmapOrDie(reinterpret_cast(MapBeg), Diff); - Diff = MapEnd - NewMapEnd; - if (Diff > PageSize) - UnmapOrDie(reinterpret_cast(NewMapEnd), Diff); - MapBeg = NewMapBeg; - MapEnd = NewMapEnd; - MapSize = NewMapEnd - NewMapBeg; + if (NewMapEnd != MapEnd) { + UnmapOrDie(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd); + MapEnd = NewMapEnd; + } + MapSize = MapEnd - MapBeg; } uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize); @@ -94,23 +93,23 @@ Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); } - return reinterpret_cast(UserBeg); + return reinterpret_cast(Ptr); } void *ReturnNullOrDieOnBadRequest() { - if (atomic_load(&MayReturnNull, memory_order_acquire)) + if (atomic_load_relaxed(&MayReturnNull)) return nullptr; ReportAllocatorCannotReturnNull(false); } void *ReturnNullOrDieOnOOM() { - if (atomic_load(&MayReturnNull, memory_order_acquire)) + if (atomic_load_relaxed(&MayReturnNull)) return nullptr; ReportAllocatorCannotReturnNull(true); } void SetMayReturnNull(bool AllocatorMayReturnNull) { - atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release); + UNIMPLEMENTED(); } void Deallocate(AllocatorStats *Stats, void *Ptr) { @@ -168,9 +167,7 @@ private: // A Secondary allocated chunk header contains the base of the mapping and - // its size. Currently, the base is always a page before the header, but - // we might want to extend that number in the future based on the size of - // the allocation. + // its size, which comprises the guard pages. struct SecondaryHeader { uptr MapBeg; uptr MapSize;