diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -218,8 +218,9 @@ } #endif -template -static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { +template +static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, + morder mo) NO_THREAD_SAFETY_ANALYSIS { CHECK(IsLoadOrder(mo)); // This fast-path is critical for performance. // Assume the access is atomic. @@ -254,9 +255,9 @@ } #endif -template +template static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, - morder mo) { + morder mo) NO_THREAD_SAFETY_ANALYSIS { CHECK(IsStoreOrder(mo)); MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); // This fast-path is critical for performance. @@ -277,8 +278,9 @@ s->mtx.Unlock(); } -template -static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { +template +static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, + morder mo) NO_THREAD_SAFETY_ANALYSIS { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); SyncVar *s = 0; if (mo != mo_relaxed) { @@ -399,9 +401,9 @@ return c; } -template -static bool AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T *c, T v, morder mo, morder fmo) { +template +static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, + morder fmo) NO_THREAD_SAFETY_ANALYSIS { // 31.7.2.18: "The failure argument shall not be memory_order_release // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic // (mo_relaxed) when those are used. diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -63,7 +63,7 @@ OutputReport(thr, rep); } -void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); @@ -78,7 +78,7 @@ s->mtx.Unlock(); } -void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); if (s == 0) @@ -138,7 +138,7 @@ // s will be destroyed and freed in MetaMap::FreeBlock. } -void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); @@ -154,7 +154,8 @@ } } -void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) { +void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, + int rec) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n", thr->tid, addr, flagz, rec); if (flagz & MutexFlagRecursiveLock) @@ -207,7 +208,7 @@ } } -int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -248,7 +249,7 @@ return rec; } -void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false); @@ -260,7 +261,7 @@ } } -void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { +void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -299,7 +300,7 @@ } } -void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { +void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -330,7 +331,7 @@ } } -void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { +void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) MemoryReadAtomic(thr, pc, addr, kSizeLog1); @@ -374,7 +375,7 @@ } } -void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { +void MutexRepair(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); s->owner_tid = kInvalidTid; @@ -382,7 +383,7 @@ s->mtx.Unlock(); } -void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { +void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true); u64 mid = s->GetId(); @@ -390,7 +391,7 @@ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid); } -void Acquire(ThreadState *thr, uptr pc, uptr addr) { +void Acquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: Acquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -421,7 +422,7 @@ UpdateClockCallback, thr); } -void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) { +void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -433,7 +434,7 @@ s->mtx.Unlock(); } -void Release(ThreadState *thr, uptr pc, uptr addr) { +void Release(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; @@ -445,7 +446,7 @@ s->mtx.Unlock(); } -void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { +void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) NO_THREAD_SAFETY_ANALYSIS { DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -285,7 +285,7 @@ rm->stack = SymbolizeStackId(s->creation_stack_id); } -u64 ScopedReportBase::AddMutex(u64 id) { +u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS { u64 uid = 0; u64 mid = id; uptr addr = SyncVar::SplitId(id, &uid); diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -199,8 +199,8 @@ return GetAndLock(0, 0, addr, write_lock, false); } -SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, - uptr addr, bool write_lock, bool create) { +SyncVar *MetaMap::GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, + bool create) NO_THREAD_SAFETY_ANALYSIS { u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp @@ -47,7 +47,10 @@ EXPECT_EQ(mb2, (MBlock*)0); } -TEST(MetaMap, Sync) { +TEST(MetaMap, Sync) NO_THREAD_SAFETY_ANALYSIS { + // EXPECT can call memset/etc. Disable interceptors to prevent + // them from detecting that we exit runtime with mutexes held. + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block[4] = {}; // fake malloc block @@ -70,7 +73,8 @@ m->OnProcIdle(thr->proc()); } -TEST(MetaMap, MoveMemory) { +TEST(MetaMap, MoveMemory) NO_THREAD_SAFETY_ANALYSIS { + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block1[4] = {}; // fake malloc block @@ -107,7 +111,8 @@ m->FreeRange(thr->proc(), (uptr)&block2[0], 4 * sizeof(u64)); } -TEST(MetaMap, ResetSync) { +TEST(MetaMap, ResetSync) NO_THREAD_SAFETY_ANALYSIS { + ScopedIgnoreInterceptors ignore; ThreadState *thr = cur_thread(); MetaMap *m = &ctx->metamap; u64 block[1] = {}; // fake malloc block