Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -269,14 +269,6 @@ StaticSpinMutex GlobalPrngMutex; ScudoPrng GlobalPrng; - // The fallback caches are used when the thread local caches have been - // 'detroyed' on thread tear-down. They are protected by a Mutex as they can - // be accessed by different threads. - StaticSpinMutex FallbackMutex; - AllocatorCache FallbackAllocatorCache; - ScudoQuarantineCache FallbackQuarantineCache; - ScudoPrng FallbackPrng; - u32 QuarantineChunksUpToSize; bool DeallocationTypeMismatch; @@ -284,8 +276,7 @@ bool DeleteSizeMismatch; explicit ScudoAllocator(LinkerInitialized) - : AllocatorQuarantine(LINKER_INITIALIZED), - FallbackQuarantineCache(LINKER_INITIALIZED) {} + : AllocatorQuarantine(LINKER_INITIALIZED) {} void init(const AllocatorOptions &Options) { // Verify that the header offset field can hold the maximum offset. In the @@ -329,8 +320,6 @@ QuarantineChunksUpToSize = Options.QuarantineChunksUpToSize; GlobalPrng.init(); Cookie = GlobalPrng.getU64(); - BackendAllocator.initCache(&FallbackAllocatorCache); - FallbackPrng.init(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. @@ -374,16 +363,9 @@ if (FromPrimary) { AllocSize = AlignedSize; ScudoTSD *TSD = getTSDAndLock(); - if (LIKELY(TSD)) { - Salt = TSD->Prng.getU8(); - Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize); - TSD->unlock(); - } else { - SpinMutexLock l(&FallbackMutex); - Salt = FallbackPrng.getU8(); - Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache, - AllocSize); - } + Salt = TSD->Prng.getU8(); + Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize); + TSD->unlock(); } else { { SpinMutexLock l(&GlobalPrngMutex); @@ -446,13 +428,8 @@ void *Ptr = Chunk->getAllocBeg(Header); if (Header->FromPrimary) { ScudoTSD *TSD = getTSDAndLock(); - if (LIKELY(TSD)) { - getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr); - TSD->unlock(); - } else { - SpinMutexLock Lock(&FallbackMutex); - getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr); - } + getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr); + TSD->unlock(); } else { getBackendAllocator().deallocateSecondary(Ptr); } @@ -467,17 +444,10 @@ NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); ScudoTSD *TSD = getTSDAndLock(); - if (LIKELY(TSD)) { - AllocatorQuarantine.Put(getQuarantineCache(TSD), - QuarantineCallback(&TSD->Cache), - Chunk, EstimatedSize); - TSD->unlock(); - } else { - SpinMutexLock l(&FallbackMutex); - AllocatorQuarantine.Put(&FallbackQuarantineCache, - QuarantineCallback(&FallbackAllocatorCache), - Chunk, EstimatedSize); - } + AllocatorQuarantine.Put(getQuarantineCache(TSD), + QuarantineCallback(&TSD->Cache), + Chunk, EstimatedSize); + TSD->unlock(); } } @@ -625,7 +595,8 @@ Instance.init(Options); } -void ScudoTSD::init() { +void ScudoTSD::init(bool Shared) { + UnlockRequired = Shared; getBackendAllocator().initCache(&Cache); Prng.init(); memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); Index: lib/scudo/scudo_tls.h =================================================================== --- lib/scudo/scudo_tls.h +++ lib/scudo/scudo_tls.h @@ -24,16 +24,43 @@ namespace __scudo { -// Platform specific base thread context definitions. -#include "scudo_tls_context_android.inc" -#include "scudo_tls_context_linux.inc" - -struct ALIGNED(64) ScudoTSD : public ScudoTSDPlatform { +struct ALIGNED(64) ScudoTSD { AllocatorCache Cache; ScudoPrng Prng; uptr QuarantineCachePlaceHolder[4]; - void init(); + + void init(bool Shared); void commitBack(); + + INLINE bool tryLock() { + if (Mutex.TryLock()) { + atomic_store_relaxed(&Precedence, 0); + return true; + } + if (atomic_load_relaxed(&Precedence) == 0) + atomic_store_relaxed(&Precedence, NanoTime()); + return false; + } + + INLINE void lock() { + Mutex.Lock(); + atomic_store_relaxed(&Precedence, 0); + } + + INLINE void unlock() { + if (!UnlockRequired) + return; + Mutex.Unlock(); + } + + INLINE u64 getPrecedence() { + return atomic_load_relaxed(&Precedence); + } + + private: + bool UnlockRequired; + StaticSpinMutex Mutex; + atomic_uint64_t Precedence; }; void initThread(bool MinimalInit); Index: lib/scudo/scudo_tls_android.cpp =================================================================== --- lib/scudo/scudo_tls_android.cpp +++ lib/scudo/scudo_tls_android.cpp @@ -50,7 +50,7 @@ TSDs = reinterpret_cast( MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs")); for (u32 i = 0; i < NumberOfTSDs; i++) - TSDs[i].init(); + TSDs[i].init(/*Shared=*/true); } void initThread(bool MinimalInit) { Index: lib/scudo/scudo_tls_android.inc =================================================================== --- lib/scudo/scudo_tls_android.inc +++ lib/scudo/scudo_tls_android.inc @@ -11,9 +11,6 @@ /// //===----------------------------------------------------------------------===// -#ifndef SCUDO_TLS_ANDROID_H_ -#define SCUDO_TLS_ANDROID_H_ - #ifndef SCUDO_TLS_H_ # error "This file must be included inside scudo_tls.h." #endif // SCUDO_TLS_H_ @@ -30,7 +27,7 @@ ALWAYS_INLINE ScudoTSD *getTSDAndLock() { ScudoTSD *TSD = reinterpret_cast(*get_android_tls_ptr()); - CHECK(TSD); + CHECK(TSD && "No TSD associated with the current thread!"); // Try to lock the currently associated context. if (TSD->tryLock()) return TSD; @@ -39,5 +36,3 @@ } #endif // SANITIZER_LINUX && SANITIZER_ANDROID - -#endif // SCUDO_TLS_ANDROID_H_ Index: lib/scudo/scudo_tls_context_android.inc =================================================================== --- lib/scudo/scudo_tls_context_android.inc +++ /dev/null @@ -1,54 +0,0 @@ -//===-- scudo_tls_context_android.inc ---------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// Android specific base thread context definition. -/// -//===----------------------------------------------------------------------===// - -#ifndef SCUDO_TLS_CONTEXT_ANDROID_INC_ -#define SCUDO_TLS_CONTEXT_ANDROID_INC_ - -#ifndef SCUDO_TLS_H_ -# error "This file must be included inside scudo_tls.h." -#endif // SCUDO_TLS_H_ - -#if SANITIZER_LINUX && SANITIZER_ANDROID - -struct ScudoTSDPlatform { - INLINE bool tryLock() { - if (Mutex.TryLock()) { - atomic_store_relaxed(&Precedence, 0); - return true; - } - if (atomic_load_relaxed(&Precedence) == 0) - atomic_store_relaxed(&Precedence, NanoTime()); - return false; - } - - INLINE void lock() { - Mutex.Lock(); - atomic_store_relaxed(&Precedence, 0); - } - - INLINE void unlock() { - Mutex.Unlock(); - } - - INLINE u64 getPrecedence() { - return atomic_load_relaxed(&Precedence); - } - - private: - StaticSpinMutex Mutex; - atomic_uint64_t Precedence; -}; - -#endif // SANITIZER_LINUX && SANITIZER_ANDROID - -#endif // SCUDO_TLS_CONTEXT_ANDROID_INC_ Index: lib/scudo/scudo_tls_context_linux.inc =================================================================== --- lib/scudo/scudo_tls_context_linux.inc +++ /dev/null @@ -1,29 +0,0 @@ -//===-- scudo_tls_context_linux.inc -----------------------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -/// -/// Linux specific base thread context definition. -/// -//===----------------------------------------------------------------------===// - -#ifndef SCUDO_TLS_CONTEXT_LINUX_INC_ -#define SCUDO_TLS_CONTEXT_LINUX_INC_ - -#ifndef SCUDO_TLS_H_ -# error "This file must be included inside scudo_tls.h." -#endif // SCUDO_TLS_H_ - -#if SANITIZER_LINUX && !SANITIZER_ANDROID - -struct ScudoTSDPlatform { - ALWAYS_INLINE void unlock() {} -}; - -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID - -#endif // SCUDO_TLS_CONTEXT_LINUX_INC_ Index: lib/scudo/scudo_tls_linux.cpp =================================================================== --- lib/scudo/scudo_tls_linux.cpp +++ lib/scudo/scudo_tls_linux.cpp @@ -30,6 +30,10 @@ __attribute__((tls_model("initial-exec"))) THREADLOCAL ScudoTSD TSD; +// Fallback TSD for when the thread isn't initialized yet or is torn down. It +// can be shared between multiple threads and as such must be locked. +ScudoTSD FallbackTSD; + static void teardownThread(void *Ptr) { uptr I = reinterpret_cast(Ptr); // The glibc POSIX thread-local-storage deallocation routine calls user @@ -51,6 +55,7 @@ static void initOnce() { CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0); initScudo(); + FallbackTSD.init(/*Shared=*/true); } void initThread(bool MinimalInit) { @@ -59,7 +64,7 @@ return; CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast( GetPthreadDestructorIterations())), 0); - TSD.init(); + TSD.init(/*Shared=*/false); ScudoThreadState = ThreadInitialized; } Index: lib/scudo/scudo_tls_linux.inc =================================================================== --- lib/scudo/scudo_tls_linux.inc +++ lib/scudo/scudo_tls_linux.inc @@ -12,9 +12,6 @@ /// //===----------------------------------------------------------------------===// -#ifndef SCUDO_TLS_LINUX_H_ -#define SCUDO_TLS_LINUX_H_ - #ifndef SCUDO_TLS_H_ # error "This file must be included inside scudo_tls.h." #endif // SCUDO_TLS_H_ @@ -31,6 +28,8 @@ __attribute__((tls_model("initial-exec"))) extern THREADLOCAL ScudoTSD TSD; +extern ScudoTSD FallbackTSD; + ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) { if (LIKELY(ScudoThreadState != ThreadNotInitialized)) return; @@ -38,11 +37,11 @@ } ALWAYS_INLINE ScudoTSD *getTSDAndLock() { - if (UNLIKELY(ScudoThreadState != ThreadInitialized)) - return nullptr; + if (UNLIKELY(ScudoThreadState != ThreadInitialized)) { + FallbackTSD.lock(); + return &FallbackTSD; + } return &TSD; } #endif // SANITIZER_LINUX && !SANITIZER_ANDROID - -#endif // SCUDO_TLS_LINUX_H_