diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -85,6 +85,186 @@ atomic_uint32_t state_ = {0}; }; +// Reader-writer mutex. +class MUTEX Mutex2 { + public: + constexpr Mutex2() {} + + void Lock() ACQUIRE() { + u64 reset_mask = ~0ull; + u64 state = atomic_load_relaxed(&state_); + const uptr kMaxSpinIters = 1500; + for (uptr spin_iters = 0;; spin_iters++) { + u64 new_state; + bool locked = (state & (kWriterLock | kReaderLockMask)) != 0; + if (LIKELY(!locked)) { + // The mutex is not read-/write-locked, try to lock. + new_state = (state | kWriterLock) & reset_mask; + } else if (spin_iters > kMaxSpinIters) { + // We've spun enough, increment waiting writers count and block. + // The counter will be decremented by whoever wakes us. + new_state = (state + kWaitingWriterInc) & reset_mask; + } else if ((state & kWriterSpinWait) == 0) { + // Active spinning, but denote our presence so that unlocking + // thread does not wake up other threads. + new_state = state | kWriterSpinWait; + } else { + // Active spinning. + state = atomic_load(&state_, memory_order_relaxed); + continue; + } + if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, + memory_order_acquire))) + continue; + if (LIKELY(!locked)) + return; // We've locked the mutex. + if (spin_iters > kMaxSpinIters) { + // We've incremented waiting writers, so now block. + writers_.Wait(); + spin_iters = 0; + state = atomic_load(&state_, memory_order_relaxed); + DCHECK_NE(state & kWriterSpinWait, 0); + } else { + // We've set kWriterSpinWait, but we are still in active spinning. + } + // We either blocked and were unblocked, + // or we just spun but set kWriterSpinWait. + // Either way we need to reset kWriterSpinWait + // next time we take the lock or block again. + reset_mask = ~kWriterSpinWait; + } + } + + void Unlock() RELEASE() { + bool wake_writer; + u64 wake_readers; + u64 new_state; + u64 state = atomic_load_relaxed(&state_); + do { + DCHECK_NE(state & kWriterLock, 0); + DCHECK_EQ(state & kReaderLockMask, 0); + new_state = state & ~kWriterLock; + wake_writer = + (state & kWriterSpinWait) == 0 && (state & kWaitingWriterMask) != 0; + if (wake_writer) + new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait; + wake_readers = + (state & (kWriterSpinWait | kWaitingWriterMask)) != 0 + ? 0 + : ((state & kWaitingReaderMask) >> kWaitingReaderShift); + if (wake_readers) + new_state = (new_state & ~kWaitingReaderMask) + + (wake_readers << kReaderLockShift); + } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, + memory_order_release))); + if (UNLIKELY(wake_writer)) + writers_.Post(); + else if (UNLIKELY(wake_readers)) + readers_.Post(wake_readers); + } + + void ReadLock() ACQUIRE_SHARED() { + bool locked; + u64 new_state; + u64 state = atomic_load_relaxed(&state_); + do { + locked = + (state & kReaderLockMask) == 0 && + (state & (kWriterLock | kWriterSpinWait | kWaitingWriterMask)) != 0; + if (LIKELY(!locked)) + new_state = state + kReaderLockInc; + else + new_state = state + kWaitingReaderInc; + } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, + memory_order_acquire))); + if (UNLIKELY(locked)) + readers_.Wait(); + DCHECK_EQ(atomic_load_relaxed(&state_) & kWriterLock, 0); + DCHECK_NE(atomic_load_relaxed(&state_) & kReaderLockMask, 0); + } + + void ReadUnlock() RELEASE_SHARED() { + bool wake; + u64 new_state; + u64 state = atomic_load_relaxed(&state_); + do { + DCHECK_NE(state & kReaderLockMask, 0); + DCHECK_EQ(state & (kWaitingReaderMask | kWriterLock), 0); + new_state = state - kReaderLockInc; + wake = (new_state & (kReaderLockMask | kWriterSpinWait)) == 0 && + (new_state & kWaitingWriterMask) != 0; + if (wake) + new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait; + } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state, + memory_order_release))); + if (UNLIKELY(wake)) + writers_.Post(); + } + + // This function does not guarantee an explicit check that the calling thread + // is the thread which owns the mutex. This behavior, while more strictly + // correct, causes problems in cases like StopTheWorld, where a parent thread + // owns the mutex but a child checks that it is locked. Rather than + // maintaining complex state to work around those situations, the check only + // checks that the mutex is owned. + void CheckWriteLocked() const CHECK_LOCKED() { + CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock); + } + + void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); } + + void CheckReadLocked() const CHECK_LOCKED() { + CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask); + } + + private: + atomic_uint64_t state_ = {0}; + Semaphore writers_; + Semaphore readers_; + + // The state has 3 counters: + // - number of readers holding the lock, + // if non zero, the mutex is read-locked + // - number of waiting readers, + // if not zero, the mutex is write-locked + // - number of waiting writers, + // if non zero, the mutex is read- or write-locked + // And 2 flags: + // - writer lock + // if set, the mutex is write-locked + // - a writer is awake and spin-waiting + // the flag is used to prevent thundering herd problem + // (new writers are not woken if this flag is set) + // + // Writer support active spinning, readers does not. + // But readers are more aggressive and always take the mutex + // if there are any other readers. + // Writers hand off the mutex to readers: after wake up readers + // already assume ownership of the mutex (don't need to do any + // state updates). But the mutex is not handed off to writers, + // after wake up writers compete to lock the mutex again. + // This is needed to allow repeated write locks even in presence + // of other blocked writers. + static constexpr u64 kCounterWidth = 20; + static constexpr u64 kReaderLockShift = 0; + static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift; + static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1) + << kReaderLockShift; + static constexpr u64 kWaitingReaderShift = kCounterWidth; + static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift; + static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1) + << kWaitingReaderShift; + static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth; + static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift; + static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1) + << kWaitingWriterShift; + static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth); + static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1); + + Mutex2(const Mutex2 &) = delete; + void operator=(const Mutex2 &) = delete; +}; + void FutexWait(atomic_uint32_t *p, u32 cmp); void FutexWake(atomic_uint32_t *p, u32 count); diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp @@ -33,6 +33,7 @@ Lock l(mtx_); T v0 = data_[0]; for (int i = 0; i < kSize; i++) { + mtx_->CheckLocked(); CHECK_EQ(data_[i], v0); data_[i]++; } @@ -43,12 +44,22 @@ return; T v0 = data_[0]; for (int i = 0; i < kSize; i++) { + mtx_->CheckLocked(); CHECK_EQ(data_[i], v0); data_[i]++; } mtx_->Unlock(); } + void Read() { + ReadLock l(mtx_); + T v0 = data_[0]; + for (int i = 0; i < kSize; i++) { + mtx_->CheckReadLocked(); + CHECK_EQ(data_[i], v0); + } + } + void Backoff() { volatile T data[kSize] = {}; for (int i = 0; i < kSize; i++) { @@ -59,6 +70,7 @@ private: typedef GenericScopedLock Lock; + typedef GenericScopedReadLock ReadLock; static const int kSize = 64; typedef u64 T; MutexType *mtx_; @@ -93,6 +105,19 @@ return 0; } +template +static void *read_write_thread(void *param) { + TestData *data = (TestData *)param; + for (int i = 0; i < kIters; i++) { + if ((i % 10) == 0) + data->Write(); + else + data->Read(); + data->Backoff(); + } + return 0; +} + template static void check_locked(MutexType *mtx) { GenericScopedLock l(mtx); @@ -133,6 +158,15 @@ check_locked(mtx); } +TEST(SanitizerCommon, Mutex2) { + Mutex2 mtx; + TestData data(&mtx); + pthread_t threads[kThreads]; + for (int i = 0; i < kThreads; i++) + PTHREAD_CREATE(&threads[i], 0, read_write_thread, &data); + for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0); +} + struct SemaphoreData { Semaphore *sem; bool done;