Index: compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h +++ compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h @@ -126,6 +126,14 @@ atomic_store(A, V, memory_order_relaxed); } +template +INLINE typename T::Type atomic_compare_exchange(volatile T *A, + typename T::Type Cmp, + typename T::Type Xchg) { + atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); + return Cmp; +} + } // namespace scudo #endif // SCUDO_ATOMIC_H_ Index: compiler-rt/trunk/lib/scudo/standalone/bytemap.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/bytemap.h +++ compiler-rt/trunk/lib/scudo/standalone/bytemap.h @@ -45,8 +45,8 @@ map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap")); } void init() { - initLinkerInitialized(); Mutex.init(); + initLinkerInitialized(); } void reset() { @@ -92,7 +92,7 @@ u8 *getOrCreate(uptr Index) { u8 *Res = get(Index); if (!Res) { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); if (!(Res = get(Index))) { Res = reinterpret_cast(map(nullptr, Level2Size, "scudo:bytemap")); atomic_store(&Level1Map[Index], reinterpret_cast(Res), @@ -103,7 +103,7 @@ } atomic_uptr *Level1Map; - StaticSpinMutex Mutex; + HybridMutex Mutex; }; } // namespace scudo Index: compiler-rt/trunk/lib/scudo/standalone/common.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/common.h +++ compiler-rt/trunk/lib/scudo/standalone/common.h @@ -115,11 +115,12 @@ // Platform specific functions. -void yieldPlatform(); - extern uptr PageSizeCached; uptr getPageSizeSlow(); INLINE uptr getPageSizeCached() { + // Bionic uses a hardcoded value. + if (SCUDO_ANDROID) + return 4096U; if (LIKELY(PageSizeCached)) return PageSizeCached; return getPageSizeSlow(); Index: compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc +++ compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc @@ -23,11 +23,6 @@ namespace scudo { -void yieldPlatform() { - const zx_status_t Status = _zx_nanosleep(0); - CHECK_EQ(Status, ZX_OK); -} - uptr getPageSize() { return PAGE_SIZE; } void NORETURN die() { __builtin_trap(); } @@ -155,18 +150,20 @@ // Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS // because the Fuchsia implementation of sync_mutex_t has clang thread safety // annotations. Were we to apply proper capability annotations to the top level -// BlockingMutex class itself, they would not be needed. As it stands, the +// HybridMutex class itself, they would not be needed. As it stands, the // thread analysis thinks that we are locking the mutex and accidentally leaving // it locked on the way out. -void BlockingMutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS { +bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS { // Size and alignment must be compatible between both types. - COMPILER_CHECK(sizeof(sync_mutex_t) <= sizeof(OpaqueStorage)); - COMPILER_CHECK(!(alignof(decltype(OpaqueStorage)) % alignof(sync_mutex_t))); - sync_mutex_lock(reinterpret_cast(OpaqueStorage)); + return sync_mutex_trylock(&M) == ZX_OK; +} + +void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS { + sync_mutex_lock(&M); } -void BlockingMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { - sync_mutex_unlock(reinterpret_cast(OpaqueStorage)); +void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS { + sync_mutex_unlock(&M); } u64 getMonotonicTime() { return _zx_clock_get_monotonic(); } Index: compiler-rt/trunk/lib/scudo/standalone/linux.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/linux.cc +++ compiler-rt/trunk/lib/scudo/standalone/linux.cc @@ -37,8 +37,6 @@ namespace scudo { -void yieldPlatform() { sched_yield(); } - uptr getPageSize() { return static_cast(sysconf(_SC_PAGESIZE)); } void NORETURN die() { abort(); } @@ -46,15 +44,18 @@ void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags, UNUSED MapPlatformData *Data) { int MmapFlags = MAP_PRIVATE | MAP_ANON; - if (Flags & MAP_NOACCESS) + int MmapProt; + if (Flags & MAP_NOACCESS) { MmapFlags |= MAP_NORESERVE; + MmapProt = PROT_NONE; + } else { + MmapProt = PROT_READ | PROT_WRITE; + } if (Addr) { // Currently no scenario for a noaccess mapping with a fixed address. DCHECK_EQ(Flags & MAP_NOACCESS, 0); MmapFlags |= MAP_FIXED; } - const int MmapProt = - (Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE; void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0); if (P == MAP_FAILED) { if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM) @@ -84,22 +85,34 @@ // Calling getenv should be fine (c)(tm) at any time. const char *getEnv(const char *Name) { return getenv(Name); } -void BlockingMutex::lock() { - atomic_u32 *M = reinterpret_cast(&OpaqueStorage); - if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked) +namespace { +enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 }; +} + +bool HybridMutex::tryLock() { + return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked; +} + +// The following is based on https://akkadia.org/drepper/futex.pdf. +void HybridMutex::lockSlow() { + u32 V = atomic_compare_exchange(&M, Unlocked, Locked); + if (V == Unlocked) return; - while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked) - syscall(SYS_futex, reinterpret_cast(OpaqueStorage), - FUTEX_WAIT_PRIVATE, MtxSleeping, nullptr, nullptr, 0); + if (V != Sleeping) + V = atomic_exchange(&M, Sleeping, memory_order_acquire); + while (V != Unlocked) { + syscall(SYS_futex, reinterpret_cast(&M), FUTEX_WAIT_PRIVATE, Sleeping, + nullptr, nullptr, 0); + V = atomic_exchange(&M, Sleeping, memory_order_acquire); + } } -void BlockingMutex::unlock() { - atomic_u32 *M = reinterpret_cast(&OpaqueStorage); - const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release); - DCHECK_NE(V, MtxUnlocked); - if (V == MtxSleeping) - syscall(SYS_futex, reinterpret_cast(OpaqueStorage), - FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); +void HybridMutex::unlock() { + if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) { + atomic_store(&M, Unlocked, memory_order_release); + syscall(SYS_futex, reinterpret_cast(&M), FUTEX_WAKE_PRIVATE, 1, + nullptr, nullptr, 0); + } } u64 getMonotonicTime() { @@ -141,8 +154,8 @@ } void outputRaw(const char *Buffer) { - static StaticSpinMutex Mutex; - SpinMutexLock L(&Mutex); + static HybridMutex Mutex; + ScopedLock L(Mutex); write(2, Buffer, strlen(Buffer)); } Index: compiler-rt/trunk/lib/scudo/standalone/mutex.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/mutex.h +++ compiler-rt/trunk/lib/scudo/standalone/mutex.h @@ -12,82 +12,62 @@ #include "atomic_helpers.h" #include "common.h" +#include + +#if SCUDO_FUCHSIA +#include // for sync_mutex_t +#endif + namespace scudo { -class StaticSpinMutex { +class HybridMutex { public: - void init() { atomic_store_relaxed(&State, 0); } - - void lock() { + void init() { memset(this, 0, sizeof(*this)); } + bool tryLock(); + NOINLINE void lock() { if (tryLock()) return; - lockSlow(); - } - - bool tryLock() { - return atomic_exchange(&State, 1, memory_order_acquire) == 0; - } - - void unlock() { atomic_store(&State, 0, memory_order_release); } - - void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); } - -private: - atomic_u8 State; - - void NOINLINE lockSlow() { - for (u32 I = 0;; I++) { - if (I < 10) - yieldProcessor(10); - else - yieldPlatform(); - if (atomic_load_relaxed(&State) == 0 && - atomic_exchange(&State, 1, memory_order_acquire) == 0) + // The compiler may try to fully unroll the loop, ending up in a + // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This + // is large, ugly and unneeded, a compact loop is better for our purpose + // here. Use a pragma to tell the compiler not to unroll the loop. +#ifdef __clang__ +#pragma nounroll +#endif + for (u8 I = 0U; I < NumberOfTries; I++) { + yieldProcessor(NumberOfYields); + if (tryLock()) return; } + lockSlow(); } -}; - -class SpinMutex : public StaticSpinMutex { -public: - SpinMutex() { init(); } + void unlock(); private: - SpinMutex(const SpinMutex &) = delete; - void operator=(const SpinMutex &) = delete; -}; + static constexpr u8 NumberOfTries = 10U; + static constexpr u8 NumberOfYields = 10U; -class BlockingMutex { -public: - explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{} {} - BlockingMutex() { memset(this, 0, sizeof(*this)); } - void lock(); - void unlock(); - void checkLocked() { - atomic_u32 *M = reinterpret_cast(&OpaqueStorage); - CHECK_NE(MtxUnlocked, atomic_load_relaxed(M)); - } +#if SCUDO_LINUX + atomic_u32 M; +#elif SCUDO_FUCHSIA + sync_mutex_t M; +#endif -private: - enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; - uptr OpaqueStorage[1]; + void lockSlow(); }; -template class GenericScopedLock { +class ScopedLock { public: - explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); } - ~GenericScopedLock() { Mutex->unlock(); } + explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); } + ~ScopedLock() { Mutex.unlock(); } private: - MutexType *Mutex; + HybridMutex &Mutex; - GenericScopedLock(const GenericScopedLock &) = delete; - void operator=(const GenericScopedLock &) = delete; + ScopedLock(const ScopedLock &) = delete; + void operator=(const ScopedLock &) = delete; }; -typedef GenericScopedLock SpinMutexLock; -typedef GenericScopedLock BlockingMutexLock; - } // namespace scudo #endif // SCUDO_MUTEX_H_ Index: compiler-rt/trunk/lib/scudo/standalone/primary32.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/primary32.h +++ compiler-rt/trunk/lib/scudo/standalone/primary32.h @@ -97,7 +97,7 @@ TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); SizeClassInfo *Sci = getSizeClassInfo(ClassId); - BlockingMutexLock L(&Sci->Mutex); + ScopedLock L(Sci->Mutex); TransferBatch *B = Sci->FreeList.front(); if (B) Sci->FreeList.pop_front(); @@ -115,7 +115,7 @@ DCHECK_LT(ClassId, NumClasses); DCHECK_GT(B->getCount(), 0); SizeClassInfo *Sci = getSizeClassInfo(ClassId); - BlockingMutexLock L(&Sci->Mutex); + ScopedLock L(Sci->Mutex); Sci->FreeList.push_front(B); Sci->Stats.PushedBlocks += B->getCount(); if (Sci->CanRelease) @@ -164,7 +164,7 @@ void releaseToOS() { for (uptr I = 1; I < NumClasses; I++) { SizeClassInfo *Sci = getSizeClassInfo(I); - BlockingMutexLock L(&Sci->Mutex); + ScopedLock L(Sci->Mutex); releaseToOSMaybe(Sci, I, /*Force=*/true); } } @@ -192,7 +192,7 @@ }; struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { - BlockingMutex Mutex; + HybridMutex Mutex; IntrusiveList FreeList; SizeClassStats Stats; bool CanRelease; @@ -217,7 +217,7 @@ const uptr MapEnd = MapBase + MapSize; uptr Region = MapBase; if (isAligned(Region, RegionSize)) { - SpinMutexLock L(&RegionsStashMutex); + ScopedLock L(RegionsStashMutex); if (NumberOfStashedRegions < MaxStashedRegions) RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize; else @@ -237,7 +237,7 @@ DCHECK_LT(ClassId, NumClasses); uptr Region = 0; { - SpinMutexLock L(&RegionsStashMutex); + ScopedLock L(RegionsStashMutex); if (NumberOfStashedRegions > 0) Region = RegionsStash[--NumberOfStashedRegions]; } @@ -389,7 +389,7 @@ // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry. static constexpr uptr MaxStashedRegions = 4; - StaticSpinMutex RegionsStashMutex; + HybridMutex RegionsStashMutex; uptr NumberOfStashedRegions; uptr RegionsStash[MaxStashedRegions]; }; Index: compiler-rt/trunk/lib/scudo/standalone/primary64.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/primary64.h +++ compiler-rt/trunk/lib/scudo/standalone/primary64.h @@ -100,7 +100,7 @@ TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); RegionInfo *Region = getRegionInfo(ClassId); - BlockingMutexLock L(&Region->Mutex); + ScopedLock L(Region->Mutex); TransferBatch *B = Region->FreeList.front(); if (B) Region->FreeList.pop_front(); @@ -117,7 +117,7 @@ void pushBatch(uptr ClassId, TransferBatch *B) { DCHECK_GT(B->getCount(), 0); RegionInfo *Region = getRegionInfo(ClassId); - BlockingMutexLock L(&Region->Mutex); + ScopedLock L(Region->Mutex); Region->FreeList.push_front(B); Region->Stats.PushedBlocks += B->getCount(); if (Region->CanRelease) @@ -168,7 +168,7 @@ void releaseToOS() { for (uptr I = 1; I < NumClasses; I++) { RegionInfo *Region = getRegionInfo(I); - BlockingMutexLock L(&Region->Mutex); + ScopedLock L(Region->Mutex); releaseToOSMaybe(Region, I, /*Force=*/true); } } @@ -194,7 +194,7 @@ }; struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo { - BlockingMutex Mutex; + HybridMutex Mutex; IntrusiveList FreeList; RegionStats Stats; bool CanRelease; Index: compiler-rt/trunk/lib/scudo/standalone/quarantine.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/quarantine.h +++ compiler-rt/trunk/lib/scudo/standalone/quarantine.h @@ -202,7 +202,7 @@ void NOINLINE drain(CacheT *C, Callback Cb) { { - SpinMutexLock L(&CacheMutex); + ScopedLock L(CacheMutex); Cache.transfer(C); } if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock()) @@ -211,7 +211,7 @@ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) { { - SpinMutexLock L(&CacheMutex); + ScopedLock L(CacheMutex); Cache.transfer(C); } RecyleMutex.lock(); @@ -227,9 +227,9 @@ private: // Read-only data. - alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex CacheMutex; + alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex; CacheT Cache; - alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex RecyleMutex; + alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex; atomic_uptr MinSize; atomic_uptr MaxSize; alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize; @@ -238,7 +238,7 @@ CacheT Tmp; Tmp.init(); { - SpinMutexLock L(&CacheMutex); + ScopedLock L(CacheMutex); // Go over the batches and merge partially filled ones to // save some memory, otherwise batches themselves (since the memory used // by them is counted against quarantine limit) can overcome the actual Index: compiler-rt/trunk/lib/scudo/standalone/secondary.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/secondary.h +++ compiler-rt/trunk/lib/scudo/standalone/secondary.h @@ -82,7 +82,7 @@ } private: - StaticSpinMutex Mutex; + HybridMutex Mutex; LargeBlock::Header *Tail; uptr AllocatedBytes; uptr FreedBytes; Index: compiler-rt/trunk/lib/scudo/standalone/secondary.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/secondary.cc +++ compiler-rt/trunk/lib/scudo/standalone/secondary.cc @@ -72,7 +72,7 @@ H->BlockEnd = CommitBase + CommitSize; H->Data = Data; { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); if (!Tail) { Tail = H; } else { @@ -95,7 +95,7 @@ void MapAllocator::deallocate(void *Ptr) { LargeBlock::Header *H = LargeBlock::getHeader(Ptr); { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); LargeBlock::Header *Prev = H->Prev; LargeBlock::Header *Next = H->Next; if (Prev) { Index: compiler-rt/trunk/lib/scudo/standalone/stats.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/stats.h +++ compiler-rt/trunk/lib/scudo/standalone/stats.h @@ -65,7 +65,7 @@ } void link(LocalStats *S) { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); S->Next = Next; S->Prev = this; Next->Prev = S; @@ -73,7 +73,7 @@ } void unlink(LocalStats *S) { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); S->Prev->Next = S->Next; S->Next->Prev = S->Prev; for (uptr I = 0; I < StatCount; I++) @@ -82,7 +82,7 @@ void get(uptr *S) const { memset(S, 0, StatCount * sizeof(uptr)); - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); const LocalStats *Stats = this; for (;;) { for (uptr I = 0; I < StatCount; I++) @@ -97,7 +97,7 @@ } private: - mutable StaticSpinMutex Mutex; + mutable HybridMutex Mutex; }; } // namespace scudo Index: compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc +++ compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc @@ -11,9 +11,15 @@ #include "gtest/gtest.h" #include +#include static const char *MappingName = "scudo:test"; +TEST(ScudoMapTest, PageSize) { + EXPECT_EQ(scudo::getPageSizeCached(), + static_cast(getpagesize())); +} + TEST(ScudoMapTest, MapNoAccessUnmap) { const scudo::uptr Size = 4 * scudo::getPageSizeCached(); scudo::MapPlatformData Data = {}; Index: compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc +++ compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc @@ -12,15 +12,15 @@ #include -template class TestData { +class TestData { public: - explicit TestData(MutexType *M) : Mutex(M) { + explicit TestData(scudo::HybridMutex &M) : Mutex(M) { for (scudo::u32 I = 0; I < Size; I++) Data[I] = 0; } void write() { - Lock L(Mutex); + scudo::ScopedLock L(Mutex); T V0 = Data[0]; for (scudo::u32 I = 0; I < Size; I++) { EXPECT_EQ(Data[I], V0); @@ -29,14 +29,14 @@ } void tryWrite() { - if (!Mutex->tryLock()) + if (!Mutex.tryLock()) return; T V0 = Data[0]; for (scudo::u32 I = 0; I < Size; I++) { EXPECT_EQ(Data[I], V0); Data[I]++; } - Mutex->unlock(); + Mutex.unlock(); } void backoff() { @@ -48,10 +48,9 @@ } private: - typedef scudo::GenericScopedLock Lock; static const scudo::u32 Size = 64U; typedef scudo::u64 T; - MutexType *Mutex; + scudo::HybridMutex &Mutex; ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size]; }; @@ -62,8 +61,8 @@ const scudo::u32 NumberOfIterations = 16 * 1024; #endif -template static void *lockThread(void *Param) { - TestData *Data = reinterpret_cast *>(Param); +static void *lockThread(void *Param) { + TestData *Data = reinterpret_cast(Param); for (scudo::u32 I = 0; I < NumberOfIterations; I++) { Data->write(); Data->backoff(); @@ -71,8 +70,8 @@ return 0; } -template static void *tryThread(void *Param) { - TestData *Data = reinterpret_cast *>(Param); +static void *tryThread(void *Param) { + TestData *Data = reinterpret_cast(Param); for (scudo::u32 I = 0; I < NumberOfIterations; I++) { Data->tryWrite(); Data->backoff(); @@ -80,42 +79,24 @@ return 0; } -template static void checkLocked(MutexType *M) { - scudo::GenericScopedLock L(M); - M->checkLocked(); -} - -TEST(ScudoMutexTest, SpinMutex) { - scudo::SpinMutex M; +TEST(ScudoMutexTest, Mutex) { + scudo::HybridMutex M; M.init(); - TestData Data(&M); + TestData Data(M); pthread_t Threads[NumberOfThreads]; for (scudo::u32 I = 0; I < NumberOfThreads; I++) - pthread_create(&Threads[I], 0, lockThread, &Data); + pthread_create(&Threads[I], 0, lockThread, &Data); for (scudo::u32 I = 0; I < NumberOfThreads; I++) pthread_join(Threads[I], 0); } -TEST(ScudoMutexTest, SpinMutexTry) { - scudo::SpinMutex M; +TEST(ScudoMutexTest, MutexTry) { + scudo::HybridMutex M; M.init(); - TestData Data(&M); - pthread_t Threads[NumberOfThreads]; - for (scudo::u32 I = 0; I < NumberOfThreads; I++) - pthread_create(&Threads[I], 0, tryThread, &Data); - for (scudo::u32 I = 0; I < NumberOfThreads; I++) - pthread_join(Threads[I], 0); -} - -TEST(ScudoMutexTest, BlockingMutex) { - scudo::u64 MutexMemory[1024] = {}; - scudo::BlockingMutex *M = - new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED); - TestData Data(M); + TestData Data(M); pthread_t Threads[NumberOfThreads]; for (scudo::u32 I = 0; I < NumberOfThreads; I++) - pthread_create(&Threads[I], 0, lockThread, &Data); + pthread_create(&Threads[I], 0, tryThread, &Data); for (scudo::u32 I = 0; I < NumberOfThreads; I++) pthread_join(Threads[I], 0); - checkLocked(M); } Index: compiler-rt/trunk/lib/scudo/standalone/tsd.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tsd.h +++ compiler-rt/trunk/lib/scudo/standalone/tsd.h @@ -57,7 +57,7 @@ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } private: - StaticSpinMutex Mutex; + HybridMutex Mutex; atomic_uptr Precedence; }; Index: compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h +++ compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h @@ -60,7 +60,7 @@ private: void initOnceMaybe(Allocator *Instance) { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); if (Initialized) return; initLinkerInitialized(Instance); // Sets Initialized. @@ -82,7 +82,7 @@ pthread_key_t PThreadKey; bool Initialized; TSD *FallbackTSD; - StaticSpinMutex Mutex; + HybridMutex Mutex; static THREADLOCAL ThreadState State; static THREADLOCAL TSD ThreadTSD; Index: compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h +++ compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h @@ -94,7 +94,7 @@ } void initOnceMaybe(Allocator *Instance) { - SpinMutexLock L(&Mutex); + ScopedLock L(Mutex); if (Initialized) return; initLinkerInitialized(Instance); // Sets Initialized. @@ -152,7 +152,7 @@ u32 NumberOfCoPrimes; u32 CoPrimes[MaxTSDCount]; bool Initialized; - StaticSpinMutex Mutex; + HybridMutex Mutex; #if SCUDO_LINUX && !SCUDO_ANDROID static THREADLOCAL TSD *ThreadTSD; #endif