Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -277,6 +277,9 @@ ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; + StaticSpinMutex GlobalPrngMutex; + ScudoPrng GlobalPrng; + // The fallback caches are used when the thread local caches have been // 'detroyed' on thread tear-down. They are protected by a Mutex as they can // be accessed by different threads. @@ -303,10 +306,10 @@ // result, the maximum offset will be at most the maximum alignment for the // last size class minus the header size, in multiples of MinAlignment. UnpackedHeader Header = {}; - uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex( - SizeClassMap::kMaxSize - MinAlignment); - uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> - MinAlignmentLog; + uptr MaxPrimaryAlignment = + 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); + uptr MaxOffset = + (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; Header.Offset = MaxOffset; if (Header.Offset != MaxOffset) { dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " @@ -332,9 +335,10 @@ AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); + GlobalPrng.init(); + Cookie = GlobalPrng.getU64(); BackendAllocator.InitCache(&FallbackAllocatorCache); FallbackPrng.init(); - Cookie = FallbackPrng.getU64(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. @@ -374,28 +378,32 @@ void *Ptr; u8 Salt; - uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize; - uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - Salt = getPrng(ThreadContext)->getU8(); - Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), - AllocationSize, AllocationAlignment, - FromPrimary); - ThreadContext->unlock(); + if (FromPrimary) { + ScudoThreadContext *ThreadContext = getThreadContextAndLock(); + if (LIKELY(ThreadContext)) { + Salt = getPrng(ThreadContext)->getU8(); + Ptr = BackendAllocator.Allocate( + getAllocatorCache(ThreadContext), AlignedSize, MinAlignment, true); + ThreadContext->unlock(); + } else { + SpinMutexLock l(&FallbackMutex); + Salt = FallbackPrng.getU8(); + Ptr = BackendAllocator.Allocate( + &FallbackAllocatorCache, AlignedSize, MinAlignment, true); + } } else { - SpinMutexLock l(&FallbackMutex); - Salt = FallbackPrng.getU8(); - Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize, - AllocationAlignment, FromPrimary); + { + SpinMutexLock l(&GlobalPrngMutex); + Salt = GlobalPrng.getU8(); + } + Ptr = BackendAllocator.Allocate(nullptr, NeededSize, Alignment, false); } if (UNLIKELY(!Ptr)) return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) - memset(Ptr, 0, - BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary)); + memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr, true)); UnpackedHeader Header = {}; uptr AllocBeg = reinterpret_cast(Ptr); @@ -409,11 +417,12 @@ uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; } - CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize); + CHECK_LE(UserBeg + Size, + AllocBeg + (FromPrimary ? AlignedSize : NeededSize)); Header.State = ChunkAllocated; Header.AllocType = Type; if (FromPrimary) { - Header.FromPrimary = FromPrimary; + Header.FromPrimary = 1; Header.SizeOrUnusedBytes = Size; } else { // The secondary fits the allocations to a page, so the amount of unused @@ -424,7 +433,7 @@ if (TrailingBytes) Header.SizeOrUnusedBytes = PageSize - TrailingBytes; } - Header.Salt = static_cast(Salt); + Header.Salt = Salt; getScudoChunk(UserBeg)->storeHeader(&Header); void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); @@ -442,15 +451,18 @@ if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); - ScudoThreadContext *ThreadContext = getThreadContextAndLock(); - if (LIKELY(ThreadContext)) { - getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr, - FromPrimary); - ThreadContext->unlock(); + if (FromPrimary) { + ScudoThreadContext *ThreadContext = getThreadContextAndLock(); + if (LIKELY(ThreadContext)) { + getBackendAllocator().Deallocate( + getAllocatorCache(ThreadContext), Ptr, true); + ThreadContext->unlock(); + } else { + SpinMutexLock Lock(&FallbackMutex); + getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr, true); + } } else { - SpinMutexLock Lock(&FallbackMutex); - getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr, - FromPrimary); + getBackendAllocator().Deallocate(nullptr, Ptr, false); } } else { UnpackedHeader NewHeader = *Header; @@ -580,7 +592,7 @@ void *calloc(uptr NMemB, uptr Size) { initThreadMaybe(); - if (CheckForCallocOverflow(NMemB, Size)) + if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) return FailureHandler::OnBadRequest(); return allocate(NMemB * Size, MinAlignment, FromMalloc, true); }