Index: lib/sanitizer_common/sanitizer_common.cc =================================================================== --- lib/sanitizer_common/sanitizer_common.cc +++ lib/sanitizer_common/sanitizer_common.cc @@ -30,6 +30,79 @@ StaticSpinMutex report_file_mu; ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0}; +RWMutex::RWMutex() { + owner_ = 0; + atomic_store(&state_, kUnlocked, memory_order_relaxed); +} + +RWMutex::~RWMutex() { + CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); +} + +void RWMutex::Lock() { + CHECK_NE(owner_, GetTid()); + u32 cmp = kUnlocked; + if (!atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, + memory_order_acquire)) { + LockSlow(); + } + CHECK_EQ(owner_, 0); + owner_ = GetTid(); +} + +void RWMutex::Unlock() { + CHECK_EQ(owner_, GetTid()); + owner_ = 0; + u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); + DCHECK_NE(prev & kWriteLock, 0); + (void)prev; +} + +void RWMutex::ReadLock() { + u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); + if ((prev & kWriteLock) != 0) { + ReadLockSlow(); + } +} + +void RWMutex::ReadUnlock() { + u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); + DCHECK_EQ(prev & kWriteLock, 0); + DCHECK_GT(prev & ~kWriteLock, 0); + (void)prev; +} + +void RWMutex::CheckLocked() { + u32 state = atomic_load(&state_, memory_order_relaxed); + CHECK((owner_ == GetTid()) ? state != kUnlocked : state == kReadLock); +} + +void NOINLINE RWMutex::LockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + u32 cmp = atomic_load(&state_, memory_order_relaxed); + if (cmp == kUnlocked && + atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + } +} + +void NOINLINE RWMutex::ReadLockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + u32 prev = atomic_load(&state_, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + } +} + void RawWrite(const char *buffer) { report_file.Write(buffer, internal_strlen(buffer)); } Index: lib/sanitizer_common/sanitizer_linux.cc =================================================================== --- lib/sanitizer_common/sanitizer_linux.cc +++ lib/sanitizer_common/sanitizer_linux.cc @@ -520,20 +520,26 @@ } void BlockingMutex::Lock() { - CHECK_EQ(owner_, 0); + CHECK_NE(owner_, GetTid()); atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); - if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) - return; - while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { + if (atomic_exchange(m, MtxLocked, memory_order_acquire) != MtxUnlocked) { + while (atomic_exchange(m, MtxSleeping, memory_order_acquire) + != MtxUnlocked) { #if SANITIZER_FREEBSD - _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0); + _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0); #else - internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0); + internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, + 0, 0, 0); #endif + } } + CHECK_EQ(owner_, 0); + owner_ = GetTid(); } void BlockingMutex::Unlock() { + CHECK_EQ(owner_, GetTid()); + owner_ = 0; atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed); CHECK_NE(v, MtxUnlocked); @@ -547,6 +553,7 @@ } void BlockingMutex::CheckLocked() { + CHECK_EQ(owner_, GetTid()); atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); } Index: lib/sanitizer_common/sanitizer_mutex.h =================================================================== --- lib/sanitizer_common/sanitizer_mutex.h +++ lib/sanitizer_common/sanitizer_mutex.h @@ -92,48 +92,16 @@ // Reader-writer spin mutex. class RWMutex { public: - RWMutex() { - atomic_store(&state_, kUnlocked, memory_order_relaxed); - } - - ~RWMutex() { - CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); - } - - void Lock() { - u32 cmp = kUnlocked; - if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, - memory_order_acquire)) - return; - LockSlow(); - } - - void Unlock() { - u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); - DCHECK_NE(prev & kWriteLock, 0); - (void)prev; - } - - void ReadLock() { - u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); - if ((prev & kWriteLock) == 0) - return; - ReadLockSlow(); - } - - void ReadUnlock() { - u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); - DCHECK_EQ(prev & kWriteLock, 0); - DCHECK_GT(prev & ~kWriteLock, 0); - (void)prev; - } - - void CheckLocked() { - CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); - } - + RWMutex(); + ~RWMutex(); + void Lock(); + void Unlock(); + void ReadLock(); + void ReadUnlock(); + void CheckLocked(); private: atomic_uint32_t state_; + uptr owner_; // for debugging, unset for read-only locks enum { kUnlocked = 0, @@ -141,31 +109,8 @@ kReadLock = 2 }; - void NOINLINE LockSlow() { - for (int i = 0;; i++) { - if (i < 10) - proc_yield(10); - else - internal_sched_yield(); - u32 cmp = atomic_load(&state_, memory_order_relaxed); - if (cmp == kUnlocked && - atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, - memory_order_acquire)) - return; - } - } - - void NOINLINE ReadLockSlow() { - for (int i = 0;; i++) { - if (i < 10) - proc_yield(10); - else - internal_sched_yield(); - u32 prev = atomic_load(&state_, memory_order_acquire); - if ((prev & kWriteLock) == 0) - return; - } - } + void NOINLINE LockSlow(); + void NOINLINE ReadLockSlow(); RWMutex(const RWMutex&); void operator = (const RWMutex&);