diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp @@ -112,47 +112,6 @@ CHECK_EQ(status, ZX_OK); } -enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; - -BlockingMutex::BlockingMutex() { - // NOTE! It's important that this use internal_memset, because plain - // memset might be intercepted (e.g., actually be __asan_memset). - // Defining this so the compiler initializes each field, e.g.: - // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {} - // might result in the compiler generating a call to memset, which would - // have the same problem. - internal_memset(this, 0, sizeof(*this)); -} - -void BlockingMutex::Lock() { - CHECK_EQ(owner_, 0); - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); - if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) - return; - while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { - zx_status_t status = - _zx_futex_wait(reinterpret_cast(m), MtxSleeping, - ZX_HANDLE_INVALID, ZX_TIME_INFINITE); - if (status != ZX_ERR_BAD_STATE) // Normal race. - CHECK_EQ(status, ZX_OK); - } -} - -void BlockingMutex::Unlock() { - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); - u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); - CHECK_NE(v, MtxUnlocked); - if (v == MtxSleeping) { - zx_status_t status = _zx_futex_wake(reinterpret_cast(m), 1); - CHECK_EQ(status, ZX_OK); - } -} - -void BlockingMutex::CheckLocked() const { - auto m = reinterpret_cast(&opaque_storage_); - CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); -} - uptr GetPageSize() { return _zx_system_get_page_size(); } uptr GetMmapGranularity() { return _zx_system_get_page_size(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -659,48 +659,6 @@ # endif } -enum { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; - -BlockingMutex::BlockingMutex() { - internal_memset(this, 0, sizeof(*this)); -} - -void BlockingMutex::Lock() { - CHECK_EQ(owner_, 0); - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); - if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) - return; - while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { -#if SANITIZER_FREEBSD - _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0); -#elif SANITIZER_NETBSD - sched_yield(); /* No userspace futex-like synchronization */ -#else - internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping, - 0, 0, 0); -#endif - } -} - -void BlockingMutex::Unlock() { - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); - u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); - CHECK_NE(v, MtxUnlocked); - if (v == MtxSleeping) { -#if SANITIZER_FREEBSD - _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0); -#elif SANITIZER_NETBSD - /* No userspace futex-like synchronization */ -#else - internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0); -#endif - } -} - -void BlockingMutex::CheckLocked() const { - auto m = reinterpret_cast(&opaque_storage_); - CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); -} # endif // !SANITIZER_SOLARIS // ----------------- sanitizer_linux.h diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -516,25 +516,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {} -BlockingMutex::BlockingMutex() { - internal_memset(this, 0, sizeof(*this)); -} - -void BlockingMutex::Lock() { - CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); - CHECK_EQ(OS_SPINLOCK_INIT, 0); - CHECK_EQ(owner_, 0); - OSSpinLockLock((OSSpinLock*)&opaque_storage_); -} - -void BlockingMutex::Unlock() { - OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); -} - -void BlockingMutex::CheckLocked() const { - CHECK_NE(*(const OSSpinLock*)&opaque_storage_, 0); -} - u64 NanoTime() { timeval tv; internal_memset(&tv, 0, sizeof(tv)); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -338,111 +338,6 @@ void FutexWait(atomic_uint32_t *p, u32 cmp); void FutexWake(atomic_uint32_t *p, u32 count); -class MUTEX BlockingMutex { - public: - explicit constexpr BlockingMutex(LinkerInitialized) - : opaque_storage_ {0, }, owner_ {0} {} - BlockingMutex(); - void Lock() ACQUIRE(); - void Unlock() RELEASE(); - - // This function does not guarantee an explicit check that the calling thread - // is the thread which owns the mutex. This behavior, while more strictly - // correct, causes problems in cases like StopTheWorld, where a parent thread - // owns the mutex but a child checks that it is locked. Rather than - // maintaining complex state to work around those situations, the check only - // checks that the mutex is owned, and assumes callers to be generally - // well-behaved. - void CheckLocked() const CHECK_LOCKED(); - - private: - // Solaris mutex_t has a member that requires 64-bit alignment. - ALIGNED(8) uptr opaque_storage_[10]; - uptr owner_; // for debugging -}; - -// Reader-writer spin mutex. -class MUTEX RWMutex { - public: - RWMutex() { - atomic_store(&state_, kUnlocked, memory_order_relaxed); - } - - ~RWMutex() { - CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); - } - - void Lock() ACQUIRE() { - u32 cmp = kUnlocked; - if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, - memory_order_acquire)) - return; - LockSlow(); - } - - void Unlock() RELEASE() { - u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); - DCHECK_NE(prev & kWriteLock, 0); - (void)prev; - } - - void ReadLock() ACQUIRE_SHARED() { - u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); - if ((prev & kWriteLock) == 0) - return; - ReadLockSlow(); - } - - void ReadUnlock() RELEASE_SHARED() { - u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); - DCHECK_EQ(prev & kWriteLock, 0); - DCHECK_GT(prev & ~kWriteLock, 0); - (void)prev; - } - - void CheckLocked() const CHECK_LOCKED() { - CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); - } - - private: - atomic_uint32_t state_; - - enum { - kUnlocked = 0, - kWriteLock = 1, - kReadLock = 2 - }; - - void NOINLINE LockSlow() { - for (int i = 0;; i++) { - if (i < 10) - proc_yield(10); - else - internal_sched_yield(); - u32 cmp = atomic_load(&state_, memory_order_relaxed); - if (cmp == kUnlocked && - atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, - memory_order_acquire)) - return; - } - } - - void NOINLINE ReadLockSlow() { - for (int i = 0;; i++) { - if (i < 10) - proc_yield(10); - else - internal_sched_yield(); - u32 prev = atomic_load(&state_, memory_order_acquire); - if ((prev & kWriteLock) == 0) - return; - } - } - - RWMutex(const RWMutex &) = delete; - void operator=(const RWMutex &) = delete; -}; - template class SCOPED_LOCK GenericScopedLock { public: @@ -475,6 +370,11 @@ void operator=(const GenericScopedReadLock &) = delete; }; +// Temporal measure for incremental migration. +// These typedef's should be removed and all uses renamed to Mutex. +typedef Mutex BlockingMutex; +typedef Mutex RWMutex; + typedef GenericScopedLock SpinMutexLock; typedef GenericScopedLock BlockingMutexLock; typedef GenericScopedLock RWMutexLock; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp @@ -225,28 +225,6 @@ void FutexWake(atomic_uint32_t *p, u32 count) {} -BlockingMutex::BlockingMutex() { - CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_)); - internal_memset(this, 0, sizeof(*this)); - CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0); -} - -void BlockingMutex::Lock() { - CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_)); - CHECK_NE(owner_, (uptr)thr_self()); - CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0); - CHECK(!owner_); - owner_ = (uptr)thr_self(); -} - -void BlockingMutex::Unlock() { - CHECK(owner_ == (uptr)thr_self()); - owner_ = 0; - CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0); -} - -void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); } - } // namespace __sanitizer #endif // SANITIZER_SOLARIS diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -827,27 +827,6 @@ WakeByAddressAll(p); } -// ---------------------- BlockingMutex ---------------- {{{1 - -BlockingMutex::BlockingMutex() { - CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_)); - internal_memset(this, 0, sizeof(*this)); -} - -void BlockingMutex::Lock() { - AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_); - CHECK_EQ(owner_, 0); - owner_ = GetThreadSelf(); -} - -void BlockingMutex::Unlock() { - CheckLocked(); - owner_ = 0; - ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_); -} - -void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); } - uptr GetTlsSize() { return 0; }