diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -85,12 +85,87 @@ atomic_uint32_t state_ = {0}; }; +typedef int MutexType; + +enum { + // Used as sentinel and to catch unassigned types + // (should not be used as real Mutex type). + MutexInvalid = 0, + MutexThreadRegistry, + // Each tool own mutexes must start at this number. + MutexLastCommon, + // Type for legacy mutexes that are not checked for deadlocks. + MutexUnchecked = -1, + // Special marks that can be used in MutexMeta::can_lock table. + // The leaf mutexes can be locked under any other non-leaf mutex, + // but no other mutex can be locked while under a leaf mutex. + MutexLeaf = -1, + // Multiple mutexes of this type can be locked at the same time. + MutexMulti = -3, +}; + +// Go linker does not support THREADLOCAL variables, +// so we can't use per-thread state. +#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO) + +#if SANITIZER_CHECK_DEADLOCKS +struct MutexMeta { + MutexType type; + const char *name; + // The table fixes what mutexes can be locked under what mutexes. + // If the entry for MutexTypeFoo contains MutexTypeBar, + // then Bar mutex can be locked while under Foo mutex. + // Can also contain the special MutexLeaf/MutexMulti marks. + MutexType can_lock[10]; +}; +#endif + +class CheckedMutex { + public: + constexpr CheckedMutex(MutexType type) +#if SANITIZER_CHECK_DEADLOCKS + : type_(type) +#endif + { + } + + ALWAYS_INLINE void Lock() { +#if SANITIZER_CHECK_DEADLOCKS + LockImpl(GET_CALLER_PC()); +#endif + } + + ALWAYS_INLINE void Unlock() { +#if SANITIZER_CHECK_DEADLOCKS + UnlockImpl(); +#endif + } + + // Checks that the current thread does not hold any mutexes + // (e.g. when returning from a runtime function to user code). + static void CheckNoLocks() { +#if SANITIZER_CHECK_DEADLOCKS + CheckNoLocksImpl(); +#endif + } + + private: +#if SANITIZER_CHECK_DEADLOCKS + const MutexType type_; + + void LockImpl(uptr pc); + void UnlockImpl(); + static void CheckNoLocksImpl(); +#endif +}; + // Reader-writer mutex. -class MUTEX Mutex2 { +class MUTEX Mutex : CheckedMutex { public: - constexpr Mutex2() {} + constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {} void Lock() ACQUIRE() { + CheckedMutex::Lock(); u64 reset_mask = ~0ull; u64 state = atomic_load_relaxed(&state_); const uptr kMaxSpinIters = 1500; @@ -136,6 +211,7 @@ } void Unlock() RELEASE() { + CheckedMutex::Unlock(); bool wake_writer; u64 wake_readers; u64 new_state; @@ -164,6 +240,7 @@ } void ReadLock() ACQUIRE_SHARED() { + CheckedMutex::Lock(); bool locked; u64 new_state; u64 state = atomic_load_relaxed(&state_); @@ -184,6 +261,7 @@ } void ReadUnlock() RELEASE_SHARED() { + CheckedMutex::Unlock(); bool wake; u64 new_state; u64 state = atomic_load_relaxed(&state_); @@ -261,8 +339,8 @@ static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth); static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1); - Mutex2(const Mutex2 &) = delete; - void operator=(const Mutex2 &) = delete; + Mutex(const Mutex &) = delete; + void operator=(const Mutex &) = delete; }; void FutexWait(atomic_uint32_t *p, u32 cmp); @@ -409,6 +487,8 @@ typedef GenericScopedLock BlockingMutexLock; typedef GenericScopedLock RWMutexLock; typedef GenericScopedReadLock RWMutexReadLock; +typedef GenericScopedLock Lock; +typedef GenericScopedReadLock ReadLock; } // namespace __sanitizer diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp @@ -36,4 +36,170 @@ FutexWake(&state_, count); } +#if SANITIZER_CHECK_DEADLOCKS +// An empty mutex meta table, it effectively disables deadlock detection. +// Each tool can override the table to define own mutex hierarchy and +// enable deadlock detection. +// The table defines a static mutex hierarchy (what mutexes can be locked +// under what mutexes). This table is checked to be acyclic and then +// actual mutex lock/unlock operations are checked to adhere to this hierarchy. +const int kMutexTypeMax = 20; +SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {}; +SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {} +StaticSpinMutex mutex_meta_mtx; +int mutex_count = -1; +// Adjacency matrix of what mutexes can be locked under what mutexes. +bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax]; +// Mutex types with MutexMulti mark. +bool mutex_multi[kMutexTypeMax]; + +void DebugMutexInit() { + // Build adjacency matrix. + bool leaf[kMutexTypeMax] = {}; + int cnt[kMutexTypeMax] = {}; + for (int t = 0;; t++) { + mutex_count = t; + CHECK_LE(mutex_count, kMutexTypeMax); + if (!mutex_meta[t].name) + break; + CHECK_EQ(t, mutex_meta[t].type); + for (int j = 0; j < (int)ARRAY_SIZE(mutex_meta[t].can_lock); j++) { + MutexType z = mutex_meta[t].can_lock[j]; + if (z == MutexInvalid) + break; + if (z == MutexLeaf) { + CHECK(!leaf[t]); + leaf[t] = true; + continue; + } + if (z == MutexMulti) { + mutex_multi[t] = true; + continue; + } + CHECK_LT(z, kMutexTypeMax); + CHECK(!mutex_can_lock[t][z]); + mutex_can_lock[t][z] = true; + cnt[t]++; + } + } + // Add leaf mutexes. + for (int t = 0; t < mutex_count; t++) { + if (!leaf[t]) + continue; + CHECK_EQ(cnt[t], 0); + for (int z = 0; z < mutex_count; z++) { + if (z == MutexInvalid || t == z || leaf[z]) + continue; + CHECK(!mutex_can_lock[z][t]); + mutex_can_lock[z][t] = true; + } + } + // Build the transitive closure and check that the graphs is acyclic. + // The table is too large for stack allocation. + static bool trans[kMutexTypeMax][kMutexTypeMax]; + for (int i = 0; i < mutex_count; i++) { + for (int j = 0; j < mutex_count; j++) trans[i][j] = mutex_can_lock[i][j]; + } + for (int k = 0; k < mutex_count; k++) { + for (int i = 0; i < mutex_count; i++) { + for (int j = 0; j < mutex_count; j++) { + if (trans[i][k] && trans[k][j]) + trans[i][j] = true; + } + } + } + for (int i = 0; i < mutex_count; i++) { + if (trans[i][i]) { + Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name); + Die(); + } + } +} + +struct InternalDeadlockDetector { + struct LockDesc { + u64 seq; + uptr pc; + int recursion; + }; + int initialized; + u64 sequence; + LockDesc locked[kMutexTypeMax]; + + void Lock(MutexType type, uptr pc) { + if (!Initialize(type)) + return; + CHECK_LT(type, mutex_count); + // Find the last locked mutex type. + // This is the type we will use for hierarchy checks. + u64 max_seq = 0; + MutexType max_idx = MutexInvalid; + for (int i = 0; i != mutex_count; i++) { + if (locked[i].seq == 0) + continue; + CHECK_NE(locked[i].seq, max_seq); + if (max_seq < locked[i].seq) { + max_seq = locked[i].seq; + max_idx = (MutexType)i; + } + } + if (max_idx == type && mutex_multi[type]) { + // Recursive lock of the same type. + CHECK_EQ(locked[type].seq, max_seq); + CHECK(locked[type].pc); + locked[type].recursion++; + return; + } + if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) { + Printf("%s: internal deadlock: can't lock %s under %s mutex\n", + SanitizerToolName, mutex_meta[type].name, + mutex_meta[max_idx].name); + PrintMutexPC(pc); + CHECK(0); + } + locked[type].seq = ++sequence; + locked[type].pc = pc; + locked[type].recursion = 1; + } + + void Unlock(MutexType type) { + if (!Initialize(type)) + return; + CHECK_LT(type, mutex_count); + CHECK(locked[type].seq); + CHECK_GT(locked[type].recursion, 0); + if (--locked[type].recursion) + return; + locked[type].seq = 0; + locked[type].pc = 0; + } + + void CheckNoLocks() { + for (int i = 0; i < mutex_count; i++) CHECK_EQ(locked[i].recursion, 0); + } + + bool Initialize(MutexType type) { + if (type == MutexUnchecked || type == MutexInvalid) + return false; + CHECK_GT(type, MutexInvalid); + if (initialized != 0) + return initialized > 0; + initialized = -1; + SpinMutexLock lock(&mutex_meta_mtx); + if (mutex_count < 0) + DebugMutexInit(); + initialized = mutex_count ? 1 : -1; + return initialized > 0; + } +}; + +THREADLOCAL InternalDeadlockDetector deadlock_detector; + +void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); } + +void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); } + +void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); } +#endif + } // namespace __sanitizer diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_mutex_test.cpp @@ -158,12 +158,12 @@ check_locked(mtx); } -TEST(SanitizerCommon, Mutex2) { - Mutex2 mtx; - TestData data(&mtx); +TEST(SanitizerCommon, Mutex) { + Mutex mtx; + TestData data(&mtx); pthread_t threads[kThreads]; for (int i = 0; i < kThreads; i++) - PTHREAD_CREATE(&threads[i], 0, read_write_thread, &data); + PTHREAD_CREATE(&threads[i], 0, read_write_thread, &data); for (int i = 0; i < kThreads; i++) PTHREAD_JOIN(threads[i], 0); } diff --git a/compiler-rt/lib/tsan/CMakeLists.txt b/compiler-rt/lib/tsan/CMakeLists.txt --- a/compiler-rt/lib/tsan/CMakeLists.txt +++ b/compiler-rt/lib/tsan/CMakeLists.txt @@ -39,7 +39,6 @@ rtl/tsan_malloc_mac.cpp rtl/tsan_md5.cpp rtl/tsan_mman.cpp - rtl/tsan_mutex.cpp rtl/tsan_mutexset.cpp rtl/tsan_preinit.cpp rtl/tsan_report.cpp @@ -94,7 +93,6 @@ rtl/tsan_interface_inl.h rtl/tsan_interface_java.h rtl/tsan_mman.h - rtl/tsan_mutex.h rtl/tsan_mutexset.h rtl/tsan_platform.h rtl/tsan_ppc_regs.h diff --git a/compiler-rt/lib/tsan/go/build.bat b/compiler-rt/lib/tsan/go/build.bat --- a/compiler-rt/lib/tsan/go/build.bat +++ b/compiler-rt/lib/tsan/go/build.bat @@ -4,7 +4,6 @@ ..\rtl\tsan_clock.cpp ^ ..\rtl\tsan_flags.cpp ^ ..\rtl\tsan_md5.cpp ^ - ..\rtl\tsan_mutex.cpp ^ ..\rtl\tsan_report.cpp ^ ..\rtl\tsan_rtl.cpp ^ ..\rtl\tsan_rtl_mutex.cpp ^ diff --git a/compiler-rt/lib/tsan/go/buildgo.sh b/compiler-rt/lib/tsan/go/buildgo.sh --- a/compiler-rt/lib/tsan/go/buildgo.sh +++ b/compiler-rt/lib/tsan/go/buildgo.sh @@ -9,7 +9,6 @@ ../rtl/tsan_flags.cpp ../rtl/tsan_interface_atomic.cpp ../rtl/tsan_md5.cpp - ../rtl/tsan_mutex.cpp ../rtl/tsan_report.cpp ../rtl/tsan_rtl.cpp ../rtl/tsan_rtl_mutex.cpp diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h --- a/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -15,6 +15,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_mutex.h" #include "ubsan/ubsan_platform.h" // Setup defaults for compile definitions. @@ -172,6 +173,17 @@ // as 16-bit values, see tsan_defs.h. }; +enum MutexType { + MutexTypeTrace = MutexLastCommon, + MutexTypeReport, + MutexTypeSyncVar, + MutexTypeAnnotations, + MutexTypeAtExit, + MutexTypeFired, + MutexTypeRacy, + MutexTypeGlobalProc, +}; + } // namespace __tsan #endif // TSAN_DEFS_H diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h --- a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h +++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h @@ -20,7 +20,6 @@ #include "sanitizer_common/sanitizer_common.h" #include "tsan_defs.h" -#include "tsan_mutex.h" namespace __tsan { diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -196,12 +196,13 @@ unsigned finalize_key; #endif - BlockingMutex atexit_mu; + Mutex atexit_mu; Vector AtExitStack; InterceptorContext() - : libignore(LINKER_INITIALIZED), AtExitStack() { - } + : libignore(LINKER_INITIALIZED), + atexit_mu(MutexTypeAtExit), + AtExitStack() {} }; static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)]; @@ -267,7 +268,7 @@ if (!thr_->ignore_interceptors) { ProcessPendingSignals(thr_); FuncExit(thr_); - CheckNoLocks(thr_); + CheckedMutex::CheckNoLocks(); } } @@ -377,7 +378,7 @@ AtExitCtx *ctx; { // Ensure thread-safety. - BlockingMutexLock l(&interceptor_ctx()->atexit_mu); + Lock l(&interceptor_ctx()->atexit_mu); // Pop AtExitCtx from the top of the stack of callback functions uptr element = interceptor_ctx()->AtExitStack.Size() - 1; @@ -433,7 +434,10 @@ // Store ctx in a local stack-like structure // Ensure thread-safety. - BlockingMutexLock l(&interceptor_ctx()->atexit_mu); + Lock l(&interceptor_ctx()->atexit_mu); + // __cxa_atexit calls calloc, without this we fail with + // atexit_mu held on exit from the calloc interceptor. + ScopedIgnoreInterceptors ignore; res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0); // Push AtExitCtx on the top of the stack of callback functions diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -15,7 +15,6 @@ #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_vector.h" #include "tsan_interface_ann.h" -#include "tsan_mutex.h" #include "tsan_report.h" #include "tsan_rtl.h" #include "tsan_mman.h" @@ -38,7 +37,7 @@ ~ScopedAnnotation() { FuncExit(thr_); - CheckNoLocks(thr_); + CheckedMutex::CheckNoLocks(); } private: ThreadState *const thr_; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -218,8 +218,9 @@ } #endif -template -static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { +template +static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, + morder mo) NO_THREAD_SAFETY_ANALYSIS { CHECK(IsLoadOrder(mo)); // This fast-path is critical for performance. // Assume the access is atomic. @@ -254,9 +255,9 @@ } #endif -template +template static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { + morder mo) NO_THREAD_SAFETY_ANALYSIS { CHECK(IsStoreOrder(mo)); MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); // This fast-path is critical for performance. @@ -277,8 +278,9 @@ s->mtx.Unlock(); } -template -static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { +template +static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) NO_THREAD_SAFETY_ANALYSIS { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); SyncVar *s = 0; if (mo != mo_relaxed) { @@ -399,9 +401,9 @@ return c; } -template -static bool AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T *c, T v, morder mo, morder fmo) { +template +static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, + morder mo, morder fmo) NO_THREAD_SAFETY_ANALYSIS { // 31.7.2.18: "The failure argument shall not be memory_order_release // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic // (mo_relaxed) when those are used. diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp @@ -12,7 +12,6 @@ #include "tsan_interface_java.h" #include "tsan_rtl.h" -#include "tsan_mutex.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.h b/compiler-rt/lib/tsan/rtl/tsan_mutex.h deleted file mode 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mutex.h +++ /dev/null @@ -1,87 +0,0 @@ -//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#ifndef TSAN_MUTEX_H -#define TSAN_MUTEX_H - -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "tsan_defs.h" - -namespace __tsan { - -enum MutexType { - MutexTypeInvalid, - MutexTypeTrace, - MutexTypeThreads, - MutexTypeReport, - MutexTypeSyncVar, - MutexTypeSyncTab, - MutexTypeSlab, - MutexTypeAnnotations, - MutexTypeAtExit, - MutexTypeMBlock, - MutexTypeJavaMBlock, - MutexTypeDDetector, - MutexTypeFired, - MutexTypeRacy, - MutexTypeGlobalProc, - - // This must be the last. - MutexTypeCount -}; - -class Mutex { - public: - explicit Mutex(MutexType type); - ~Mutex(); - - void Lock(); - void Unlock(); - - void ReadLock(); - void ReadUnlock(); - - void CheckLocked(); - - private: - atomic_uintptr_t state_; -#if SANITIZER_DEBUG - MutexType type_; -#endif - - Mutex(const Mutex&); - void operator = (const Mutex&); -}; - -typedef GenericScopedLock Lock; -typedef GenericScopedReadLock ReadLock; - -class InternalDeadlockDetector { - public: - InternalDeadlockDetector(); - void Lock(MutexType t); - void Unlock(MutexType t); - void CheckNoLocks(); - private: - u64 seq_; - u64 locked_[MutexTypeCount]; -}; - -void InitializeMutex(); - -// Checks that the current thread does not hold any runtime locks -// (e.g. when returning from an interceptor). -void CheckNoLocks(ThreadState *thr); - -} // namespace __tsan - -#endif // TSAN_MUTEX_H diff --git a/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp deleted file mode 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mutex.cpp +++ /dev/null @@ -1,280 +0,0 @@ -//===-- tsan_mutex.cpp ----------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_libc.h" -#include "tsan_mutex.h" -#include "tsan_platform.h" -#include "tsan_rtl.h" - -namespace __tsan { - -// Simple reader-writer spin-mutex. Optimized for not-so-contended case. -// Readers have preference, can possibly starvate writers. - -// The table fixes what mutexes can be locked under what mutexes. -// E.g. if the row for MutexTypeThreads contains MutexTypeReport, -// then Report mutex can be locked while under Threads mutex. -// The leaf mutexes can be locked under any other mutexes. -// Recursive locking is not supported. -#if SANITIZER_DEBUG && !SANITIZER_GO -const MutexType MutexTypeLeaf = (MutexType)-1; -static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { - /*0 MutexTypeInvalid*/ {}, - /*1 MutexTypeTrace*/ {MutexTypeLeaf}, - /*2 MutexTypeThreads*/ {MutexTypeReport}, - /*3 MutexTypeReport*/ {MutexTypeSyncVar, - MutexTypeMBlock, MutexTypeJavaMBlock}, - /*4 MutexTypeSyncVar*/ {MutexTypeDDetector}, - /*5 MutexTypeSyncTab*/ {}, // unused - /*6 MutexTypeSlab*/ {MutexTypeLeaf}, - /*7 MutexTypeAnnotations*/ {}, - /*8 MutexTypeAtExit*/ {MutexTypeSyncVar}, - /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, - /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, - /*11 MutexTypeDDetector*/ {}, - /*12 MutexTypeFired*/ {MutexTypeLeaf}, - /*13 MutexTypeRacy*/ {MutexTypeLeaf}, - /*14 MutexTypeGlobalProc*/ {}, -}; - -static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; -#endif - -void InitializeMutex() { -#if SANITIZER_DEBUG && !SANITIZER_GO - // Build the "can lock" adjacency matrix. - // If [i][j]==true, then one can lock mutex j while under mutex i. - const int N = MutexTypeCount; - int cnt[N] = {}; - bool leaf[N] = {}; - for (int i = 1; i < N; i++) { - for (int j = 0; j < N; j++) { - MutexType z = CanLockTab[i][j]; - if (z == MutexTypeInvalid) - continue; - if (z == MutexTypeLeaf) { - CHECK(!leaf[i]); - leaf[i] = true; - continue; - } - CHECK(!CanLockAdj[i][(int)z]); - CanLockAdj[i][(int)z] = true; - cnt[i]++; - } - } - for (int i = 0; i < N; i++) { - CHECK(!leaf[i] || cnt[i] == 0); - } - // Add leaf mutexes. - for (int i = 0; i < N; i++) { - if (!leaf[i]) - continue; - for (int j = 0; j < N; j++) { - if (i == j || leaf[j] || j == MutexTypeInvalid) - continue; - CHECK(!CanLockAdj[j][i]); - CanLockAdj[j][i] = true; - } - } - // Build the transitive closure. - bool CanLockAdj2[MutexTypeCount][MutexTypeCount]; - for (int i = 0; i < N; i++) { - for (int j = 0; j < N; j++) { - CanLockAdj2[i][j] = CanLockAdj[i][j]; - } - } - for (int k = 0; k < N; k++) { - for (int i = 0; i < N; i++) { - for (int j = 0; j < N; j++) { - if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) { - CanLockAdj2[i][j] = true; - } - } - } - } -#if 0 - Printf("Can lock graph:\n"); - for (int i = 0; i < N; i++) { - for (int j = 0; j < N; j++) { - Printf("%d ", CanLockAdj[i][j]); - } - Printf("\n"); - } - Printf("Can lock graph closure:\n"); - for (int i = 0; i < N; i++) { - for (int j = 0; j < N; j++) { - Printf("%d ", CanLockAdj2[i][j]); - } - Printf("\n"); - } -#endif - // Verify that the graph is acyclic. - for (int i = 0; i < N; i++) { - if (CanLockAdj2[i][i]) { - Printf("Mutex %d participates in a cycle\n", i); - Die(); - } - } -#endif -} - -InternalDeadlockDetector::InternalDeadlockDetector() { - // Rely on zero initialization because some mutexes can be locked before ctor. -} - -#if SANITIZER_DEBUG && !SANITIZER_GO -void InternalDeadlockDetector::Lock(MutexType t) { - // Printf("LOCK %d @%zu\n", t, seq_ + 1); - CHECK_GT(t, MutexTypeInvalid); - CHECK_LT(t, MutexTypeCount); - u64 max_seq = 0; - u64 max_idx = MutexTypeInvalid; - for (int i = 0; i != MutexTypeCount; i++) { - if (locked_[i] == 0) - continue; - CHECK_NE(locked_[i], max_seq); - if (max_seq < locked_[i]) { - max_seq = locked_[i]; - max_idx = i; - } - } - locked_[t] = ++seq_; - if (max_idx == MutexTypeInvalid) - return; - // Printf(" last %d @%zu\n", max_idx, max_seq); - if (!CanLockAdj[max_idx][t]) { - Printf("ThreadSanitizer: internal deadlock detected\n"); - Printf("ThreadSanitizer: can't lock %d while under %zu\n", - t, (uptr)max_idx); - CHECK(0); - } -} - -void InternalDeadlockDetector::Unlock(MutexType t) { - // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]); - CHECK(locked_[t]); - locked_[t] = 0; -} - -void InternalDeadlockDetector::CheckNoLocks() { - for (int i = 0; i != MutexTypeCount; i++) { - CHECK_EQ(locked_[i], 0); - } -} -#endif - -void CheckNoLocks(ThreadState *thr) { -#if SANITIZER_DEBUG && !SANITIZER_GO - thr->internal_deadlock_detector.CheckNoLocks(); -#endif -} - -const uptr kUnlocked = 0; -const uptr kWriteLock = 1; -const uptr kReadLock = 2; - -class Backoff { - public: - Backoff() - : iter_() { - } - - bool Do() { - if (iter_++ < kActiveSpinIters) - proc_yield(kActiveSpinCnt); - else - internal_sched_yield(); - return true; - } - - u64 Contention() const { - u64 active = iter_ % kActiveSpinIters; - u64 passive = iter_ - active; - return active + 10 * passive; - } - - private: - int iter_; - static const int kActiveSpinIters = 10; - static const int kActiveSpinCnt = 20; -}; - -Mutex::Mutex(MutexType type) { - CHECK_GT(type, MutexTypeInvalid); - CHECK_LT(type, MutexTypeCount); -#if SANITIZER_DEBUG - type_ = type; -#endif - atomic_store(&state_, kUnlocked, memory_order_relaxed); -} - -Mutex::~Mutex() { - CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); -} - -void Mutex::Lock() { -#if SANITIZER_DEBUG && !SANITIZER_GO - cur_thread()->internal_deadlock_detector.Lock(type_); -#endif - uptr cmp = kUnlocked; - if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, - memory_order_acquire)) - return; - for (Backoff backoff; backoff.Do();) { - if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) { - cmp = kUnlocked; - if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, - memory_order_acquire)) { - return; - } - } - } -} - -void Mutex::Unlock() { - uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); - (void)prev; - DCHECK_NE(prev & kWriteLock, 0); -#if SANITIZER_DEBUG && !SANITIZER_GO - cur_thread()->internal_deadlock_detector.Unlock(type_); -#endif -} - -void Mutex::ReadLock() { -#if SANITIZER_DEBUG && !SANITIZER_GO - cur_thread()->internal_deadlock_detector.Lock(type_); -#endif - uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); - if ((prev & kWriteLock) == 0) - return; - for (Backoff backoff; backoff.Do();) { - prev = atomic_load(&state_, memory_order_acquire); - if ((prev & kWriteLock) == 0) { - return; - } - } -} - -void Mutex::ReadUnlock() { - uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); - (void)prev; - DCHECK_EQ(prev & kWriteLock, 0); - DCHECK_GT(prev & ~kWriteLock, 0); -#if SANITIZER_DEBUG && !SANITIZER_GO - cur_thread()->internal_deadlock_detector.Unlock(type_); -#endif -} - -void Mutex::CheckLocked() { - CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0); -} - -} // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -422,7 +422,6 @@ InitializeInterceptors(); CheckShadowMapping(); InitializePlatform(); - InitializeMutex(); InitializeDynamicAnnotations(); #if !SANITIZER_GO InitializeShadowMemory(); @@ -1133,7 +1132,28 @@ } // namespace __tsan +#if SANITIZER_CHECK_DEADLOCKS +namespace __sanitizer { +using namespace __tsan; +MutexMeta mutex_meta[] = { + {MutexInvalid, "Invalid", {}}, + {MutexThreadRegistry, "ThreadRegistry", {}}, + {MutexTypeTrace, "Trace", {MutexLeaf}}, + {MutexTypeReport, "Report", {MutexTypeSyncVar}}, + {MutexTypeSyncVar, "SyncVar", {}}, + {MutexTypeAnnotations, "Annotations", {}}, + {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}}, + {MutexTypeFired, "Fired", {MutexLeaf}}, + {MutexTypeRacy, "Racy", {MutexLeaf}}, + {MutexTypeGlobalProc, "GlobalProc", {}}, + {}, +}; + +void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } +} // namespace __sanitizer +#endif + #if !SANITIZER_GO // Must be included in this file to make sure everything is inlined. -#include "tsan_interface_inl.h" +# include "tsan_interface_inl.h" #endif diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -63,7 +63,8 @@ OutputReport(thr, rep); } -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); @@ -78,7 +79,8 @@ s->mtx.Unlock(); } -void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s == 0) @@ -138,7 +140,8 @@ // s will be destroyed and freed in MetaMap::FreeBlock. } -void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); @@ -154,7 +157,8 @@ } } -void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { +void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, + int rec) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", thr->tid, addr, flagz, rec); if (flagz & MutexFlagRecursiveLock) @@ -207,7 +211,8 @@ } } -int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -248,7 +253,8 @@ return rec; } -void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); @@ -260,7 +266,8 @@ } } -void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, + u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -299,7 +306,8 @@ } } -void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { +void MutexReadUnlock(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -330,7 +338,8 @@ } } -void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { +void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -374,7 +383,8 @@ } } -void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { +void MutexRepair(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = kInvalidTid; @@ -382,7 +392,8 @@ s->mtx.Unlock(); } -void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { +void MutexInvalidAccess(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); u64 mid = s->GetId(); @@ -390,7 +401,7 @@ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid); } -void Acquire(ThreadState *thr, uptr pc, uptr addr) { +void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -421,7 +432,8 @@ UpdateClockCallback, thr); } -void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) { +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -433,7 +445,7 @@ s->mtx.Unlock(); } -void Release(ThreadState *thr, uptr pc, uptr addr) { +void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -445,7 +457,8 @@ s->mtx.Unlock(); } -void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { +void ReleaseStore(ThreadState *thr, uptr pc, + uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -129,7 +129,7 @@ // We set thr->suppress_reports in the fork context. // Taking any locking in the fork context can lead to deadlocks. // If any locks are already taken, it's too late to do this check. - CheckNoLocks(thr); + CheckedMutex::CheckNoLocks(); // For the same reason check we didn't lock thread_registry yet. if (SANITIZER_DEBUG) ThreadRegistryLock l(ctx->thread_registry); @@ -285,7 +285,7 @@ rm->stack = SymbolizeStackId(s->creation_stack_id); } -u64 ScopedReportBase::AddMutex(u64 id) { +u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); @@ -596,7 +596,7 @@ } void ReportRace(ThreadState *thr) { - CheckNoLocks(thr); + CheckedMutex::CheckNoLocks(); // Symbolizer makes lots of intercepted calls. If we try to process them, // at best it will cause deadlocks on internal mutexes. diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -17,7 +17,6 @@ #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" #include "tsan_defs.h" #include "tsan_clock.h" -#include "tsan_mutex.h" #include "tsan_dense_alloc.h" namespace __tsan { diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -199,8 +199,9 @@ return GetAndLock(0, 0, addr, write_lock, false); } -SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock, bool create) { +SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, + bool write_lock, + bool create) NO_THREAD_SAFETY_ANALYSIS { u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; diff --git a/compiler-rt/lib/tsan/rtl/tsan_trace.h b/compiler-rt/lib/tsan/rtl/tsan_trace.h --- a/compiler-rt/lib/tsan/rtl/tsan_trace.h +++ b/compiler-rt/lib/tsan/rtl/tsan_trace.h @@ -13,7 +13,6 @@ #define TSAN_TRACE_H #include "tsan_defs.h" -#include "tsan_mutex.h" #include "tsan_stack_trace.h" #include "tsan_mutexset.h" diff --git a/compiler-rt/lib/tsan/tests/rtl/tsan_bench.cpp b/compiler-rt/lib/tsan/tests/rtl/tsan_bench.cpp --- a/compiler-rt/lib/tsan/tests/rtl/tsan_bench.cpp +++ b/compiler-rt/lib/tsan/tests/rtl/tsan_bench.cpp @@ -89,7 +89,7 @@ } TEST(DISABLED_BENCH, MutexLocal) { - Mutex m; + UserMutex m; ScopedThread().Create(m); for (int i = 0; i < 50; i++) { ScopedThread t; diff --git a/compiler-rt/lib/tsan/tests/rtl/tsan_mop.cpp b/compiler-rt/lib/tsan/tests/rtl/tsan_mop.cpp --- a/compiler-rt/lib/tsan/tests/rtl/tsan_mop.cpp +++ b/compiler-rt/lib/tsan/tests/rtl/tsan_mop.cpp @@ -65,7 +65,7 @@ } TEST(ThreadSanitizer, WriteThenLockedRead) { - Mutex m(Mutex::RW); + UserMutex m(UserMutex::RW); MainThread t0; t0.Create(m); MemLoc l; @@ -84,7 +84,7 @@ } TEST(ThreadSanitizer, LockedWriteThenRead) { - Mutex m(Mutex::RW); + UserMutex m(UserMutex::RW); MainThread t0; t0.Create(m); MemLoc l; diff --git a/compiler-rt/lib/tsan/tests/rtl/tsan_mutex.cpp b/compiler-rt/lib/tsan/tests/rtl/tsan_mutex.cpp --- a/compiler-rt/lib/tsan/tests/rtl/tsan_mutex.cpp +++ b/compiler-rt/lib/tsan/tests/rtl/tsan_mutex.cpp @@ -20,7 +20,7 @@ TEST(ThreadSanitizer, BasicMutex) { ScopedThread t; - Mutex m; + UserMutex m; t.Create(m); t.Lock(m); @@ -38,7 +38,7 @@ TEST(ThreadSanitizer, BasicSpinMutex) { ScopedThread t; - Mutex m(Mutex::Spin); + UserMutex m(UserMutex::Spin); t.Create(m); t.Lock(m); @@ -56,7 +56,7 @@ TEST(ThreadSanitizer, BasicRwMutex) { ScopedThread t; - Mutex m(Mutex::RW); + UserMutex m(UserMutex::RW); t.Create(m); t.Lock(m); @@ -92,7 +92,7 @@ } TEST(ThreadSanitizer, Mutex) { - Mutex m; + UserMutex m; MainThread t0; t0.Create(m); @@ -108,7 +108,7 @@ } TEST(ThreadSanitizer, SpinMutex) { - Mutex m(Mutex::Spin); + UserMutex m(UserMutex::Spin); MainThread t0; t0.Create(m); @@ -124,7 +124,7 @@ } TEST(ThreadSanitizer, RwMutex) { - Mutex m(Mutex::RW); + UserMutex m(UserMutex::RW); MainThread t0; t0.Create(m); @@ -150,7 +150,7 @@ TEST(ThreadSanitizer, StaticMutex) { // Emulates statically initialized mutex. - Mutex m; + UserMutex m; m.StaticInit(); { ScopedThread t1, t2; diff --git a/compiler-rt/lib/tsan/tests/rtl/tsan_test_util.h b/compiler-rt/lib/tsan/tests/rtl/tsan_test_util.h --- a/compiler-rt/lib/tsan/tests/rtl/tsan_test_util.h +++ b/compiler-rt/lib/tsan/tests/rtl/tsan_test_util.h @@ -28,7 +28,7 @@ void operator = (const MemLoc&); }; -class Mutex { +class UserMutex { public: enum Type { Normal, @@ -40,8 +40,8 @@ #endif }; - explicit Mutex(Type type = Normal); - ~Mutex(); + explicit UserMutex(Type type = Normal); + ~UserMutex(); void Init(); void StaticInit(); // Emulates static initialization (tsan invisible). @@ -59,8 +59,8 @@ bool alive_; const Type type_; - Mutex(const Mutex&); - void operator = (const Mutex&); + UserMutex(const UserMutex&); + void operator = (const UserMutex&); }; // A thread is started in CTOR and joined in DTOR. @@ -100,14 +100,14 @@ void Call(void(*pc)()); void Return(); - void Create(const Mutex &m); - void Destroy(const Mutex &m); - void Lock(const Mutex &m); - bool TryLock(const Mutex &m); - void Unlock(const Mutex &m); - void ReadLock(const Mutex &m); - bool TryReadLock(const Mutex &m); - void ReadUnlock(const Mutex &m); + void Create(const UserMutex &m); + void Destroy(const UserMutex &m); + void Lock(const UserMutex &m); + bool TryLock(const UserMutex &m); + void Unlock(const UserMutex &m); + void ReadLock(const UserMutex &m); + bool TryReadLock(const UserMutex &m); + void ReadUnlock(const UserMutex &m); void Memcpy(void *dst, const void *src, int size, bool expect_race = false); void Memset(void *dst, int val, int size, bool expect_race = false); diff --git a/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp b/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp --- a/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp +++ b/compiler-rt/lib/tsan/tests/rtl/tsan_test_util_posix.cpp @@ -90,16 +90,16 @@ MemLoc::~MemLoc() { } -Mutex::Mutex(Type type) +UserMutex::UserMutex(Type type) : alive_() , type_(type) { } -Mutex::~Mutex() { +UserMutex::~UserMutex() { CHECK(!alive_); } -void Mutex::Init() { +void UserMutex::Init() { CHECK(!alive_); alive_ = true; if (type_ == Normal) @@ -114,7 +114,7 @@ CHECK(0); } -void Mutex::StaticInit() { +void UserMutex::StaticInit() { CHECK(!alive_); CHECK(type_ == Normal); alive_ = true; @@ -122,7 +122,7 @@ memcpy(mtx_, &tmp, sizeof(tmp)); } -void Mutex::Destroy() { +void UserMutex::Destroy() { CHECK(alive_); alive_ = false; if (type_ == Normal) @@ -135,7 +135,7 @@ CHECK_EQ(__interceptor_pthread_rwlock_destroy((pthread_rwlock_t*)mtx_), 0); } -void Mutex::Lock() { +void UserMutex::Lock() { CHECK(alive_); if (type_ == Normal) CHECK_EQ(__interceptor_pthread_mutex_lock((pthread_mutex_t*)mtx_), 0); @@ -147,7 +147,7 @@ CHECK_EQ(__interceptor_pthread_rwlock_wrlock((pthread_rwlock_t*)mtx_), 0); } -bool Mutex::TryLock() { +bool UserMutex::TryLock() { CHECK(alive_); if (type_ == Normal) return __interceptor_pthread_mutex_trylock((pthread_mutex_t*)mtx_) == 0; @@ -160,7 +160,7 @@ return false; } -void Mutex::Unlock() { +void UserMutex::Unlock() { CHECK(alive_); if (type_ == Normal) CHECK_EQ(__interceptor_pthread_mutex_unlock((pthread_mutex_t*)mtx_), 0); @@ -172,19 +172,19 @@ CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0); } -void Mutex::ReadLock() { +void UserMutex::ReadLock() { CHECK(alive_); CHECK(type_ == RW); CHECK_EQ(__interceptor_pthread_rwlock_rdlock((pthread_rwlock_t*)mtx_), 0); } -bool Mutex::TryReadLock() { +bool UserMutex::TryReadLock() { CHECK(alive_); CHECK(type_ == RW); return __interceptor_pthread_rwlock_tryrdlock((pthread_rwlock_t*)mtx_) == 0; } -void Mutex::ReadUnlock() { +void UserMutex::ReadUnlock() { CHECK(alive_); CHECK(type_ == RW); CHECK_EQ(__interceptor_pthread_rwlock_unlock((pthread_rwlock_t*)mtx_), 0); @@ -310,28 +310,28 @@ __tsan_func_exit(); break; case Event::MUTEX_CREATE: - static_cast(ev->ptr)->Init(); + static_cast(ev->ptr)->Init(); break; case Event::MUTEX_DESTROY: - static_cast(ev->ptr)->Destroy(); + static_cast(ev->ptr)->Destroy(); break; case Event::MUTEX_LOCK: - static_cast(ev->ptr)->Lock(); + static_cast(ev->ptr)->Lock(); break; case Event::MUTEX_TRYLOCK: - ev->res = static_cast(ev->ptr)->TryLock(); + ev->res = static_cast(ev->ptr)->TryLock(); break; case Event::MUTEX_UNLOCK: - static_cast(ev->ptr)->Unlock(); + static_cast(ev->ptr)->Unlock(); break; case Event::MUTEX_READLOCK: - static_cast(ev->ptr)->ReadLock(); + static_cast(ev->ptr)->ReadLock(); break; case Event::MUTEX_TRYREADLOCK: - ev->res = static_cast(ev->ptr)->TryReadLock(); + ev->res = static_cast(ev->ptr)->TryReadLock(); break; case Event::MUTEX_READUNLOCK: - static_cast(ev->ptr)->ReadUnlock(); + static_cast(ev->ptr)->ReadUnlock(); break; case Event::MEMCPY: __interceptor_memcpy(ev->ptr, (void*)ev->arg, ev->arg2); @@ -440,44 +440,44 @@ impl_->send(&event); } -void ScopedThread::Create(const Mutex &m) { +void ScopedThread::Create(const UserMutex &m) { Event event(Event::MUTEX_CREATE, &m); impl_->send(&event); } -void ScopedThread::Destroy(const Mutex &m) { +void ScopedThread::Destroy(const UserMutex &m) { Event event(Event::MUTEX_DESTROY, &m); impl_->send(&event); } -void ScopedThread::Lock(const Mutex &m) { +void ScopedThread::Lock(const UserMutex &m) { Event event(Event::MUTEX_LOCK, &m); impl_->send(&event); } -bool ScopedThread::TryLock(const Mutex &m) { +bool ScopedThread::TryLock(const UserMutex &m) { Event event(Event::MUTEX_TRYLOCK, &m); impl_->send(&event); return event.res; } -void ScopedThread::Unlock(const Mutex &m) { +void ScopedThread::Unlock(const UserMutex &m) { Event event(Event::MUTEX_UNLOCK, &m); impl_->send(&event); } -void ScopedThread::ReadLock(const Mutex &m) { +void ScopedThread::ReadLock(const UserMutex &m) { Event event(Event::MUTEX_READLOCK, &m); impl_->send(&event); } -bool ScopedThread::TryReadLock(const Mutex &m) { +bool ScopedThread::TryReadLock(const UserMutex &m) { Event event(Event::MUTEX_TRYREADLOCK, &m); impl_->send(&event); return event.res; } -void ScopedThread::ReadUnlock(const Mutex &m) { +void ScopedThread::ReadUnlock(const UserMutex &m) { Event event(Event::MUTEX_READUNLOCK, &m); impl_->send(&event); } diff --git a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt --- a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt +++ b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt @@ -3,7 +3,6 @@ tsan_dense_alloc_test.cpp tsan_flags_test.cpp tsan_mman_test.cpp - tsan_mutex_test.cpp tsan_shadow_test.cpp tsan_stack_test.cpp tsan_sync_test.cpp diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_mutex_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_mutex_test.cpp deleted file mode 100644 --- a/compiler-rt/lib/tsan/tests/unit/tsan_mutex_test.cpp +++ /dev/null @@ -1,125 +0,0 @@ -//===-- tsan_mutex_test.cpp -----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of ThreadSanitizer (TSan), a race detector. -// -//===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "tsan_mutex.h" -#include "gtest/gtest.h" - -namespace __tsan { - -template -class TestData { - public: - explicit TestData(MutexType *mtx) - : mtx_(mtx) { - for (int i = 0; i < kSize; i++) - data_[i] = 0; - } - - void Write() { - Lock l(mtx_); - T v0 = data_[0]; - for (int i = 0; i < kSize; i++) { - CHECK_EQ(data_[i], v0); - data_[i]++; - } - } - - void Read() { - ReadLock l(mtx_); - T v0 = data_[0]; - for (int i = 0; i < kSize; i++) { - CHECK_EQ(data_[i], v0); - } - } - - void Backoff() { - volatile T data[kSize] = {}; - for (int i = 0; i < kSize; i++) { - data[i]++; - CHECK_EQ(data[i], 1); - } - } - - private: - typedef GenericScopedLock Lock; - static const int kSize = 64; - typedef u64 T; - MutexType *mtx_; - char pad_[kCacheLineSize]; - T data_[kSize]; -}; - -const int kThreads = 8; -const int kWriteRate = 1024; -#if SANITIZER_DEBUG -const int kIters = 16*1024; -#else -const int kIters = 64*1024; -#endif - -template -static void *write_mutex_thread(void *param) { - TestData *data = (TestData*)param; - for (int i = 0; i < kIters; i++) { - data->Write(); - data->Backoff(); - } - return 0; -} - -template -static void *read_mutex_thread(void *param) { - TestData *data = (TestData*)param; - for (int i = 0; i < kIters; i++) { - if ((i % kWriteRate) == 0) - data->Write(); - else - data->Read(); - data->Backoff(); - } - return 0; -} - -TEST(Mutex, Write) { - Mutex mtx(MutexTypeAnnotations); - TestData data(&mtx); - pthread_t threads[kThreads]; - for (int i = 0; i < kThreads; i++) - pthread_create(&threads[i], 0, write_mutex_thread, &data); - for (int i = 0; i < kThreads; i++) - pthread_join(threads[i], 0); -} - -TEST(Mutex, ReadWrite) { - Mutex mtx(MutexTypeAnnotations); - TestData data(&mtx); - pthread_t threads[kThreads]; - for (int i = 0; i < kThreads; i++) - pthread_create(&threads[i], 0, read_mutex_thread, &data); - for (int i = 0; i < kThreads; i++) - pthread_join(threads[i], 0); -} - -TEST(Mutex, SpinWrite) { - SpinMutex mtx; - TestData data(&mtx); - pthread_t threads[kThreads]; - for (int i = 0; i < kThreads; i++) - pthread_create(&threads[i], 0, write_mutex_thread, &data); - for (int i = 0; i < kThreads; i++) - pthread_join(threads[i], 0); -} - -} // namespace __tsan diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp @@ -47,7 +47,10 @@ EXPECT_EQ(mb2, (MBlock*)0); } -TEST(MetaMap, Sync) { +TEST(MetaMap, Sync) NO_THREAD_SAFETY_ANALYSIS { + // EXPECT can call memset/etc. Disable interceptors to prevent + // them from detecting that we exit runtime with mutexes held. + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block[4] = {}; // fake malloc block @@ -70,7 +73,8 @@ m->OnProcIdle(thr->proc()); } -TEST(MetaMap, MoveMemory) { +TEST(MetaMap, MoveMemory) NO_THREAD_SAFETY_ANALYSIS { + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block1[4] = {}; // fake malloc block @@ -107,7 +111,8 @@ m->FreeRange(thr->proc(), (uptr)&block2[0], 4 * sizeof(u64)); } -TEST(MetaMap, ResetSync) { +TEST(MetaMap, ResetSync) NO_THREAD_SAFETY_ANALYSIS { + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block[1] = {}; // fake malloc block