diff --git a/compiler-rt/CMakeLists.txt b/compiler-rt/CMakeLists.txt --- a/compiler-rt/CMakeLists.txt +++ b/compiler-rt/CMakeLists.txt @@ -359,6 +359,14 @@ append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS) +if(CMAKE_CXX_COMPILER_ID MATCHES Clang) + list(APPEND SANITIZER_COMMON_CFLAGS + "-Werror=thread-safety" + "-Werror=thread-safety-reference" + "-Werror=thread-safety-beta" +) +endif() + # If we're using MSVC, # always respect the optimization flags set by CMAKE_BUILD_TYPE instead. if (NOT MSVC) diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -852,12 +852,12 @@ quarantine.PrintStats(); } - void ForceLock() { + void ForceLock() ACQUIRE(fallback_mutex) { allocator.ForceLock(); fallback_mutex.Lock(); } - void ForceUnlock() { + void ForceUnlock() RELEASE(fallback_mutex) { fallback_mutex.Unlock(); allocator.ForceUnlock(); } @@ -1081,11 +1081,9 @@ return instance.AllocationSize(reinterpret_cast(ptr)); } -void asan_mz_force_lock() { - instance.ForceLock(); -} +void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); } -void asan_mz_force_unlock() { +void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceUnlock(); } diff --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp --- a/compiler-rt/lib/cfi/cfi.cpp +++ b/compiler-rt/lib/cfi/cfi.cpp @@ -322,14 +322,14 @@ THREADLOCAL int in_loader; BlockingMutex shadow_update_lock(LINKER_INITIALIZED); -void EnterLoader() { +void EnterLoader() NO_THREAD_SAFETY_ANALYSIS { if (in_loader == 0) { shadow_update_lock.Lock(); } ++in_loader; } -void ExitLoader() { +void ExitLoader() NO_THREAD_SAFETY_ANALYSIS { CHECK(in_loader > 0); --in_loader; UpdateShadow(); diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -221,8 +221,8 @@ // Returns true if [addr, addr + sizeof(void *)) is poisoned. bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. -void LockThreadRegistry(); -void UnlockThreadRegistry(); +void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; +void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; ThreadRegistry *GetThreadRegistryLocked(); bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end, uptr *tls_begin, uptr *tls_end, uptr *cache_begin, diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp --- a/compiler-rt/lib/memprof/memprof_allocator.cpp +++ b/compiler-rt/lib/memprof/memprof_allocator.cpp @@ -736,12 +736,12 @@ void PrintStats() { allocator.PrintStats(); } - void ForceLock() { + void ForceLock() NO_THREAD_SAFETY_ANALYSIS { allocator.ForceLock(); fallback_mutex.Lock(); } - void ForceUnlock() { + void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { fallback_mutex.Unlock(); allocator.ForceUnlock(); } diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt --- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt @@ -185,6 +185,7 @@ sanitizer_syscall_linux_riscv64.inc sanitizer_syscalls_netbsd.inc sanitizer_thread_registry.h + sanitizer_thread_safety.h sanitizer_tls_get_addr.h sanitizer_vector.h sanitizer_win.h diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h @@ -162,8 +162,8 @@ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap"); } -template -void AddrHashMap::acquire(Handle *h) { +template +void AddrHashMap::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS { uptr addr = h->addr_; uptr hash = calcHash(addr); Bucket *b = &table_[hash]; @@ -289,57 +289,57 @@ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0); h->addidx_ = i; h->cell_ = c; -} - -template -void AddrHashMap::release(Handle *h) { - if (!h->cell_) - return; - Bucket *b = h->bucket_; - Cell *c = h->cell_; - uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); - if (h->created_) { - // Denote completion of insertion. - CHECK_EQ(addr1, 0); - // After the following store, the element becomes available - // for lock-free reads. - atomic_store(&c->addr, h->addr_, memory_order_release); - b->mtx.Unlock(); - } else if (h->remove_) { - // Denote that the cell is empty now. - CHECK_EQ(addr1, h->addr_); - atomic_store(&c->addr, 0, memory_order_release); - // See if we need to compact the bucket. - AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed); - if (h->addidx_ == -1U) { - // Removed from embed array, move an add element into the freed cell. - if (add && add->size != 0) { - uptr last = --add->size; - Cell *c1 = &add->cells[last]; - c->val = c1->val; - uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed); - atomic_store(&c->addr, addr1, memory_order_release); - atomic_store(&c1->addr, 0, memory_order_release); - } - } else { - // Removed from add array, compact it. - uptr last = --add->size; - Cell *c1 = &add->cells[last]; - if (c != c1) { - *c = *c1; - atomic_store(&c1->addr, 0, memory_order_relaxed); - } - } - if (add && add->size == 0) { - // FIXME(dvyukov): free add? - } - b->mtx.Unlock(); - } else { - CHECK_EQ(addr1, h->addr_); - if (h->addidx_ != -1U) - b->mtx.ReadUnlock(); - } -} + } + + template + void AddrHashMap::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS { + if (!h->cell_) + return; + Bucket *b = h->bucket_; + Cell *c = h->cell_; + uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); + if (h->created_) { + // Denote completion of insertion. + CHECK_EQ(addr1, 0); + // After the following store, the element becomes available + // for lock-free reads. + atomic_store(&c->addr, h->addr_, memory_order_release); + b->mtx.Unlock(); + } else if (h->remove_) { + // Denote that the cell is empty now. + CHECK_EQ(addr1, h->addr_); + atomic_store(&c->addr, 0, memory_order_release); + // See if we need to compact the bucket. + AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed); + if (h->addidx_ == -1U) { + // Removed from embed array, move an add element into the freed cell. + if (add && add->size != 0) { + uptr last = --add->size; + Cell *c1 = &add->cells[last]; + c->val = c1->val; + uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed); + atomic_store(&c->addr, addr1, memory_order_release); + atomic_store(&c1->addr, 0, memory_order_release); + } + } else { + // Removed from add array, compact it. + uptr last = --add->size; + Cell *c1 = &add->cells[last]; + if (c != c1) { + *c = *c1; + atomic_store(&c1->addr, 0, memory_order_relaxed); + } + } + if (add && add->size == 0) { + // FIXME(dvyukov): free add? + } + b->mtx.Unlock(); + } else { + CHECK_EQ(addr1, h->addr_); + if (h->addidx_ != -1U) + b->mtx.ReadUnlock(); + } + } template uptr AddrHashMap::calcHash(uptr addr) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -177,12 +177,12 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() { + void ForceLock() NO_THREAD_SAFETY_ANALYSIS { primary_.ForceLock(); secondary_.ForceLock(); } - void ForceUnlock() { + void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { secondary_.ForceUnlock(); primary_.ForceUnlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -312,13 +312,13 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() { + void ForceLock() NO_THREAD_SAFETY_ANALYSIS { for (uptr i = 0; i < kNumClasses; i++) { GetRegionInfo(i)->mutex.Lock(); } } - void ForceUnlock() { + void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { for (int i = (int)kNumClasses - 1; i >= 0; i--) { GetRegionInfo(i)->mutex.Unlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -267,13 +267,9 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() { - mutex_.Lock(); - } + void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); } - void ForceUnlock() { - mutex_.Unlock(); - } + void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); } // Iterate over all existing chunks. // The allocator must be locked when calling this function. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp @@ -136,8 +136,8 @@ } } -void BlockingMutex::CheckLocked() { - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); +void BlockingMutex::CheckLocked() const { + auto m = reinterpret_cast(&opaque_storage_); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -677,11 +677,11 @@ } } -void BlockingMutex::CheckLocked() { - atomic_uint32_t *m = reinterpret_cast(&opaque_storage_); +void BlockingMutex::CheckLocked() const { + auto m = reinterpret_cast(&opaque_storage_); CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); } -#endif // !SANITIZER_SOLARIS +# endif // !SANITIZER_SOLARIS // ----------------- sanitizer_linux.h // The actual size of this structure is specified by d_reclen. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp @@ -524,7 +524,7 @@ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); } -void BlockingMutex::CheckLocked() { +void BlockingMutex::CheckLocked() const { CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -16,30 +16,29 @@ #include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" +#include "sanitizer_thread_safety.h" namespace __sanitizer { -class StaticSpinMutex { +class MUTEX StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } - void Lock() { + void Lock() ACQUIRE() { if (TryLock()) return; LockSlow(); } - bool TryLock() { + bool TryLock() TRY_ACQUIRE(true) { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } - void Unlock() { - atomic_store(&state_, 0, memory_order_release); - } + void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); } - void CheckLocked() { + void CheckLocked() const CHECK_LOCKED { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); } @@ -59,7 +58,7 @@ } }; -class SpinMutex : public StaticSpinMutex { +class MUTEX SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); @@ -70,13 +69,13 @@ void operator=(const SpinMutex &) = delete; }; -class BlockingMutex { +class MUTEX BlockingMutex { public: explicit constexpr BlockingMutex(LinkerInitialized) : opaque_storage_ {0, }, owner_ {0} {} BlockingMutex(); - void Lock(); - void Unlock(); + void Lock() ACQUIRE(); + void Unlock() RELEASE(); // This function does not guarantee an explicit check that the calling thread // is the thread which owns the mutex. This behavior, while more strictly @@ -85,7 +84,7 @@ // maintaining complex state to work around those situations, the check only // checks that the mutex is owned, and assumes callers to be generally // well-behaved. - void CheckLocked(); + void CheckLocked() const CHECK_LOCKED; private: // Solaris mutex_t has a member that requires 64-bit alignment. @@ -94,7 +93,7 @@ }; // Reader-writer spin mutex. -class RWMutex { +class MUTEX RWMutex { public: RWMutex() { atomic_store(&state_, kUnlocked, memory_order_relaxed); @@ -104,7 +103,7 @@ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); } - void Lock() { + void Lock() ACQUIRE() { u32 cmp = kUnlocked; if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, memory_order_acquire)) @@ -112,27 +111,27 @@ LockSlow(); } - void Unlock() { + void Unlock() RELEASE() { u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); DCHECK_NE(prev & kWriteLock, 0); (void)prev; } - void ReadLock() { + void ReadLock() ACQUIRE_SHARED() { u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); if ((prev & kWriteLock) == 0) return; ReadLockSlow(); } - void ReadUnlock() { + void ReadUnlock() RELEASE_SHARED() { u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); DCHECK_EQ(prev & kWriteLock, 0); DCHECK_GT(prev & ~kWriteLock, 0); (void)prev; } - void CheckLocked() { + void CheckLocked() const CHECK_LOCKED { CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); } @@ -175,17 +174,14 @@ void operator=(const RWMutex &) = delete; }; -template -class GenericScopedLock { +template +class SCOPED_LOCK GenericScopedLock { public: - explicit GenericScopedLock(MutexType *mu) - : mu_(mu) { + explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { mu_->Lock(); } - ~GenericScopedLock() { - mu_->Unlock(); - } + ~GenericScopedLock() RELEASE() { mu_->Unlock(); } private: MutexType *mu_; @@ -194,17 +190,14 @@ void operator=(const GenericScopedLock &) = delete; }; -template -class GenericScopedReadLock { +template +class SCOPED_LOCK GenericScopedReadLock { public: - explicit GenericScopedReadLock(MutexType *mu) - : mu_(mu) { + explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { mu_->ReadLock(); } - ~GenericScopedReadLock() { - mu_->ReadUnlock(); - } + ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); } private: MutexType *mu_; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h @@ -149,7 +149,8 @@ Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(uptr min_size, Callback cb) { + void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_) + RELEASE(recycle_mutex_) { Cache tmp; { SpinMutexLock l(&cache_mutex_); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp @@ -231,9 +231,7 @@ CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0); } -void BlockingMutex::CheckLocked() { - CHECK_EQ((uptr)thr_self(), owner_); -} +void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); } } // namespace __sanitizer diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h @@ -85,7 +85,7 @@ typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid); -class ThreadRegistry { +class MUTEX ThreadRegistry { public: ThreadRegistry(ThreadContextFactory factory); ThreadRegistry(ThreadContextFactory factory, u32 max_threads, @@ -94,9 +94,9 @@ uptr *alive = nullptr); uptr GetMaxAliveThreads(); - void Lock() { mtx_.Lock(); } - void CheckLocked() { mtx_.CheckLocked(); } - void Unlock() { mtx_.Unlock(); } + void Lock() ACQUIRE() { mtx_.Lock(); } + void CheckLocked() const CHECK_LOCKED { mtx_.CheckLocked(); } + void Unlock() RELEASE() { mtx_.Unlock(); } // Should be guarded by ThreadRegistryLock. ThreadContextBase *GetThreadLocked(u32 tid) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h @@ -0,0 +1,42 @@ +//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is shared between sanitizer tools. +// +// Wrappers around thread safety annotations. +// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_THREAD_SAFETY_H +#define SANITIZER_THREAD_SAFETY_H + +#if defined(__clang__) +# define THREAD_ANNOTATION(x) __attribute__((x)) +#else +# define THREAD_ANNOTATION(x) +#endif + +#define MUTEX THREAD_ANNOTATION(capability("mutex")) +#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable) +#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x)) +#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x)) +#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__)) +#define REQUIRES_SHARED(...) \ + THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__)) +#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__)) +#define ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__)) +#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__)) +#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__)) +#define RELEASE_SHARED(...) \ + THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__)) +#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__)) +#define CHECK_LOCKED THREAD_ANNOTATION(assert_capability(this)) +#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis) + +#endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -832,9 +832,7 @@ ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_); } -void BlockingMutex::CheckLocked() { - CHECK_EQ(owner_, GetThreadSelf()); -} +void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); } uptr GetTlsSize() { return 0; diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -300,7 +300,7 @@ // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, - bool ForceZeroContents = false) { + bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS { initThreadMaybe(); if (UNLIKELY(Alignment > MaxAlignment)) { @@ -405,7 +405,7 @@ // a zero-sized quarantine, or if the size of the chunk is greater than the // quarantine chunk size threshold. void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, - uptr Size) { + uptr Size) NO_THREAD_SAFETY_ANALYSIS { const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize); if (BypassQuarantine) { UnpackedHeader NewHeader = *Header; diff --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h --- a/compiler-rt/lib/scudo/scudo_tsd.h +++ b/compiler-rt/lib/scudo/scudo_tsd.h @@ -29,7 +29,7 @@ void init(); void commitBack(); - inline bool tryLock() { + inline bool tryLock() TRY_ACQUIRE(true, Mutex) { if (Mutex.TryLock()) { atomic_store_relaxed(&Precedence, 0); return true; @@ -40,12 +40,12 @@ return false; } - inline void lock() { + inline void lock() ACQUIRE(Mutex) { atomic_store_relaxed(&Precedence, 0); Mutex.Lock(); } - inline void unlock() { Mutex.Unlock(); } + inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); } inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } diff --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc --- a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc +++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc @@ -34,7 +34,7 @@ initThread(MinimalInit); } -ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) { +ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS { if (UNLIKELY(ScudoThreadState != ThreadInitialized)) { FallbackTSD.lock(); *UnlockRequired = true; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -531,7 +531,7 @@ } #if !SANITIZER_GO -void ForkBefore(ThreadState *thr, uptr pc) { +void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); // Suppress all reports in the pthread_atfork callbacks. @@ -545,14 +545,14 @@ thr->ignore_interceptors++; } -void ForkParentAfter(ThreadState *thr, uptr pc) { +void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { thr->suppress_reports--; // Enabled in ForkBefore. thr->ignore_interceptors--; ctx->report_mtx.Unlock(); ctx->thread_registry->Unlock(); } -void ForkChildAfter(ThreadState *thr, uptr pc) { +void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { thr->suppress_reports--; // Enabled in ForkBefore. thr->ignore_interceptors--; ctx->report_mtx.Unlock();