diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -840,12 +840,12 @@ quarantine.PrintStats(); } - void ForceLock() ACQUIRE(fallback_mutex) { + void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) { allocator.ForceLock(); fallback_mutex.Lock(); } - void ForceUnlock() RELEASE(fallback_mutex) { + void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) { fallback_mutex.Unlock(); allocator.ForceUnlock(); } @@ -1054,9 +1054,11 @@ return instance.AllocationSize(reinterpret_cast(ptr)); } -void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); } +void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { + instance.ForceLock(); +} -void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS { +void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { instance.ForceUnlock(); } diff --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp --- a/compiler-rt/lib/cfi/cfi.cpp +++ b/compiler-rt/lib/cfi/cfi.cpp @@ -322,14 +322,14 @@ THREADLOCAL int in_loader; Mutex shadow_update_lock; -void EnterLoader() NO_THREAD_SAFETY_ANALYSIS { +void EnterLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { if (in_loader == 0) { shadow_update_lock.Lock(); } ++in_loader; } -void ExitLoader() NO_THREAD_SAFETY_ANALYSIS { +void ExitLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { CHECK(in_loader > 0); --in_loader; UpdateShadow(); diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -230,8 +230,8 @@ // Returns true if [addr, addr + sizeof(void *)) is poisoned. bool WordIsPoisoned(uptr addr); // Wrappers for ThreadRegistry access. -void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; -void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS; +void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; +void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; struct ScopedStopTheWorldLock { ScopedStopTheWorldLock() { diff --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp --- a/compiler-rt/lib/memprof/memprof_allocator.cpp +++ b/compiler-rt/lib/memprof/memprof_allocator.cpp @@ -524,12 +524,12 @@ void PrintStats() { allocator.PrintStats(); } - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { allocator.ForceLock(); fallback_mutex.Lock(); } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { fallback_mutex.Unlock(); allocator.ForceUnlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h @@ -201,7 +201,8 @@ } template -void AddrHashMap::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS { +void AddrHashMap::acquire(Handle *h) + SANITIZER_NO_THREAD_SAFETY_ANALYSIS { uptr addr = h->addr_; uptr hash = calcHash(addr); Bucket *b = &table_[hash]; @@ -330,7 +331,8 @@ } template - void AddrHashMap::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS { + void AddrHashMap::release(Handle *h) + SANITIZER_NO_THREAD_SAFETY_ANALYSIS { if (!h->cell_) return; Bucket *b = h->bucket_; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp @@ -126,12 +126,12 @@ RawInternalFree(addr, cache); } -void InternalAllocatorLock() NO_THREAD_SAFETY_ANALYSIS { +void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { internal_allocator_cache_mu.Lock(); internal_allocator()->ForceLock(); } -void InternalAllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS { +void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { internal_allocator()->ForceUnlock(); internal_allocator_cache_mu.Unlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -175,12 +175,12 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { primary_.ForceLock(); secondary_.ForceLock(); } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { secondary_.ForceUnlock(); primary_.ForceUnlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -238,13 +238,13 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (uptr i = 0; i < kNumClasses; i++) { GetSizeClassInfo(i)->mutex.Lock(); } } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (int i = kNumClasses - 1; i >= 0; i--) { GetSizeClassInfo(i)->mutex.Unlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -354,13 +354,13 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() NO_THREAD_SAFETY_ANALYSIS { + void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (uptr i = 0; i < kNumClasses; i++) { GetRegionInfo(i)->mutex.Lock(); } } - void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { + void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { for (int i = (int)kNumClasses - 1; i >= 0; i--) { GetRegionInfo(i)->mutex.Unlock(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -267,9 +267,9 @@ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone // introspection API. - void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); } + void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); } - void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); } + void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); } // Iterate over all existing chunks. // The allocator must be locked when calling this function. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -238,12 +238,12 @@ // Lock sanitizer error reporting and protects against nested errors. class ScopedErrorReportLock { public: - ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); } - ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); } + ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); } + ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); } - static void Lock() ACQUIRE(mutex_); - static void Unlock() RELEASE(mutex_); - static void CheckLocked() CHECK_LOCKED(mutex_); + static void Lock() SANITIZER_ACQUIRE(mutex_); + static void Unlock() SANITIZER_RELEASE(mutex_); + static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_); private: static atomic_uintptr_t reporting_thread_; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h @@ -20,25 +20,27 @@ namespace __sanitizer { -class MUTEX StaticSpinMutex { +class SANITIZER_MUTEX StaticSpinMutex { public: void Init() { atomic_store(&state_, 0, memory_order_relaxed); } - void Lock() ACQUIRE() { + void Lock() SANITIZER_ACQUIRE() { if (LIKELY(TryLock())) return; LockSlow(); } - bool TryLock() TRY_ACQUIRE(true) { + bool TryLock() SANITIZER_TRY_ACQUIRE(true) { return atomic_exchange(&state_, 1, memory_order_acquire) == 0; } - void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); } + void Unlock() SANITIZER_RELEASE() { + atomic_store(&state_, 0, memory_order_release); + } - void CheckLocked() const CHECK_LOCKED() { + void CheckLocked() const SANITIZER_CHECK_LOCKED() { CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1); } @@ -48,7 +50,7 @@ void LockSlow(); }; -class MUTEX SpinMutex : public StaticSpinMutex { +class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex { public: SpinMutex() { Init(); @@ -156,12 +158,12 @@ // Derive from CheckedMutex for the purposes of EBO. // We could make it a field marked with [[no_unique_address]], // but this attribute is not supported by some older compilers. -class MUTEX Mutex : CheckedMutex { +class SANITIZER_MUTEX Mutex : CheckedMutex { public: explicit constexpr Mutex(MutexType type = MutexUnchecked) : CheckedMutex(type) {} - void Lock() ACQUIRE() { + void Lock() SANITIZER_ACQUIRE() { CheckedMutex::Lock(); u64 reset_mask = ~0ull; u64 state = atomic_load_relaxed(&state_); @@ -206,7 +208,7 @@ } } - void Unlock() RELEASE() { + void Unlock() SANITIZER_RELEASE() { CheckedMutex::Unlock(); bool wake_writer; u64 wake_readers; @@ -234,7 +236,7 @@ readers_.Post(wake_readers); } - void ReadLock() ACQUIRE_SHARED() { + void ReadLock() SANITIZER_ACQUIRE_SHARED() { CheckedMutex::Lock(); u64 reset_mask = ~0ull; u64 state = atomic_load_relaxed(&state_); @@ -271,7 +273,7 @@ } } - void ReadUnlock() RELEASE_SHARED() { + void ReadUnlock() SANITIZER_RELEASE_SHARED() { CheckedMutex::Unlock(); bool wake; u64 new_state; @@ -297,13 +299,13 @@ // owns the mutex but a child checks that it is locked. Rather than // maintaining complex state to work around those situations, the check only // checks that the mutex is owned. - void CheckWriteLocked() const CHECK_LOCKED() { + void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() { CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock); } - void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); } + void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); } - void CheckReadLocked() const CHECK_LOCKED() { + void CheckReadLocked() const SANITIZER_CHECK_LOCKED() { CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask); } @@ -361,13 +363,13 @@ void FutexWake(atomic_uint32_t *p, u32 count); template -class SCOPED_LOCK GenericScopedLock { +class SANITIZER_SCOPED_LOCK GenericScopedLock { public: - explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { + explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) { mu_->Lock(); } - ~GenericScopedLock() RELEASE() { mu_->Unlock(); } + ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); } private: MutexType *mu_; @@ -377,13 +379,14 @@ }; template -class SCOPED_LOCK GenericScopedReadLock { +class SANITIZER_SCOPED_LOCK GenericScopedReadLock { public: - explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) { + explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu) + : mu_(mu) { mu_->ReadLock(); } - ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); } + ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); } private: MutexType *mu_; @@ -393,10 +396,10 @@ }; template -class SCOPED_LOCK GenericScopedRWLock { +class SANITIZER_SCOPED_LOCK GenericScopedRWLock { public: ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write) - ACQUIRE(mu) + SANITIZER_ACQUIRE(mu) : mu_(mu), write_(write) { if (write_) mu_->Lock(); @@ -404,7 +407,7 @@ mu_->ReadLock(); } - ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() { + ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() { if (write_) mu_->Unlock(); else diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -170,13 +170,9 @@ #endif // Include these after system headers to avoid name clashes and ambiguities. -#include "sanitizer_internal_defs.h" -#include "sanitizer_platform_limits_posix.h" - -// To prevent macro redefinition warning between our sanitizer_thread_safety.h -// and system's scsi.h. -# undef RELEASE # include "sanitizer_common.h" +# include "sanitizer_internal_defs.h" +# include "sanitizer_platform_limits_posix.h" namespace __sanitizer { unsigned struct_utsname_sz = sizeof(struct utsname); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h @@ -149,8 +149,8 @@ Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_) - RELEASE(recycle_mutex_) { + void NOINLINE Recycle(uptr min_size, Callback cb) + SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) { Cache tmp; { SpinMutexLock l(&cache_mutex_); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -97,7 +97,7 @@ Packed, Unpacked, }; - State state GUARDED_BY(mtx_); + State state SANITIZER_GUARDED_BY(mtx_); uptr *Create(StackStore *store); @@ -109,8 +109,8 @@ void TestOnlyUnmap(StackStore *store); bool Stored(uptr n); bool IsPacked() const; - void Lock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); } - void Unlock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); } + void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); } + void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); } }; BlockInfo blocks_[kBlockCount] = {}; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -94,8 +94,8 @@ constexpr CompressThread() = default; void NewWorkNotify(); void Stop(); - void LockAndStop() NO_THREAD_SAFETY_ANALYSIS; - void Unlock() NO_THREAD_SAFETY_ANALYSIS; + void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; + void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS; private: enum class State { @@ -114,8 +114,8 @@ Semaphore semaphore_ = {}; StaticSpinMutex mutex_ = {}; - State state_ GUARDED_BY(mutex_) = State::NotStarted; - void *thread_ GUARDED_BY(mutex_) = nullptr; + State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted; + void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr; atomic_uint8_t run_ = {}; }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h @@ -86,7 +86,7 @@ typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid); -class MUTEX ThreadRegistry { +class SANITIZER_MUTEX ThreadRegistry { public: ThreadRegistry(ThreadContextFactory factory); ThreadRegistry(ThreadContextFactory factory, u32 max_threads, @@ -95,9 +95,9 @@ uptr *alive = nullptr); uptr GetMaxAliveThreads(); - void Lock() ACQUIRE() { mtx_.Lock(); } - void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); } - void Unlock() RELEASE() { mtx_.Unlock(); } + void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); } + void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); } + void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); } // Should be guarded by ThreadRegistryLock. ThreadContextBase *GetThreadLocked(u32 tid) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h @@ -16,27 +16,34 @@ #define SANITIZER_THREAD_SAFETY_H #if defined(__clang__) -# define THREAD_ANNOTATION(x) __attribute__((x)) +# define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x)) #else -# define THREAD_ANNOTATION(x) +# define SANITIZER_THREAD_ANNOTATION(x) #endif -#define MUTEX THREAD_ANNOTATION(capability("mutex")) -#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable) -#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x)) -#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x)) -#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__)) -#define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__)) -#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__)) -#define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__)) -#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__)) -#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__)) -#define RELEASE_SHARED(...) \ - THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__)) -#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__)) -#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__)) -#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis) +#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex")) +#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable) +#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x)) +#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x)) +#define SANITIZER_REQUIRES(...) \ + SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__)) +#define SANITIZER_REQUIRES_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__)) +#define SANITIZER_ACQUIRE(...) \ + SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__)) +#define SANITIZER_ACQUIRE_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__)) +#define SANITIZER_TRY_ACQUIRE(...) \ + SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__)) +#define SANITIZER_RELEASE(...) \ + SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__)) +#define SANITIZER_RELEASE_SHARED(...) \ + SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__)) +#define SANITIZER_EXCLUDES(...) \ + SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__)) +#define SANITIZER_CHECK_LOCKED(...) \ + SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__)) +#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \ + SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis) #endif diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -299,8 +299,9 @@ NOINLINE bool isRssLimitExceeded(); // Allocates a chunk. - void *allocate(uptr Size, uptr Alignment, AllocType Type, - bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS { + void * + allocate(uptr Size, uptr Alignment, AllocType Type, + bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { initThreadMaybe(); if (UNLIKELY(Alignment > MaxAlignment)) { @@ -404,8 +405,8 @@ // Place a chunk in the quarantine or directly deallocate it in the event of // a zero-sized quarantine, or if the size of the chunk is greater than the // quarantine chunk size threshold. - void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, - uptr Size) NO_THREAD_SAFETY_ANALYSIS { + void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size) + SANITIZER_NO_THREAD_SAFETY_ANALYSIS { const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize); if (BypassQuarantine) { UnpackedHeader NewHeader = *Header; diff --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h --- a/compiler-rt/lib/scudo/scudo_tsd.h +++ b/compiler-rt/lib/scudo/scudo_tsd.h @@ -29,7 +29,7 @@ void init(); void commitBack(); - inline bool tryLock() TRY_ACQUIRE(true, Mutex) { + inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) { if (Mutex.TryLock()) { atomic_store_relaxed(&Precedence, 0); return true; @@ -40,12 +40,12 @@ return false; } - inline void lock() ACQUIRE(Mutex) { + inline void lock() SANITIZER_ACQUIRE(Mutex) { atomic_store_relaxed(&Precedence, 0); Mutex.Lock(); } - inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); } + inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); } inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } diff --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc --- a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc +++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc @@ -34,7 +34,7 @@ } ALWAYS_INLINE ScudoTSD * -getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS { +getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { if (UNLIKELY(ScudoThreadState != ThreadInitialized)) { FallbackTSD.lock(); *UnlockRequired = true; diff --git a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp --- a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp +++ b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp @@ -64,7 +64,7 @@ setCurrentTSD(&TSDs[Index % NumberOfTSDs]); } -ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS { +ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { if (NumberOfTSDs > 1) { // Use the Precedence of the current TSD as our random seed. Since we are in // the slow path, it means that tryLock failed, and as a result it's very diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp --- a/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp @@ -124,13 +124,13 @@ gp->mtx.Unlock(); } -void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS { +void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { global_proc()->mtx.Lock(); global_proc()->internal_alloc_mtx.Lock(); InternalAllocatorLock(); } -void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS { +void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { InternalAllocatorUnlock(); global_proc()->internal_alloc_mtx.Unlock(); global_proc()->mtx.Unlock(); diff --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp @@ -521,7 +521,7 @@ } #if !SANITIZER_GO -void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { +void ForkBefore(ThreadState *thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { ctx->thread_registry.Lock(); ctx->report_mtx.Lock(); ScopedErrorReportLock::Lock(); @@ -543,7 +543,8 @@ __tsan_test_only_on_fork(); } -void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { +void ForkParentAfter(ThreadState *thr, + uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { thr->suppress_reports--; // Enabled in ForkBefore. thr->ignore_interceptors--; thr->ignore_reads_and_writes--; @@ -554,7 +555,7 @@ } void ForkChildAfter(ThreadState *thr, uptr pc, - bool start_thread) NO_THREAD_SAFETY_ANALYSIS { + bool start_thread) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { thr->suppress_reports--; // Enabled in ForkBefore. thr->ignore_interceptors--; thr->ignore_reads_and_writes--; diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -124,21 +124,21 @@ gp->mtx.Unlock(); } -void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS { +void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { global_proc()->internal_alloc_mtx.Lock(); InternalAllocatorLock(); } -void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS { +void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { InternalAllocatorUnlock(); global_proc()->internal_alloc_mtx.Unlock(); } -void GlobalProcessorLock() NO_THREAD_SAFETY_ANALYSIS { +void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { global_proc()->mtx.Lock(); } -void GlobalProcessorUnlock() NO_THREAD_SAFETY_ANALYSIS { +void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { global_proc()->mtx.Unlock(); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -332,12 +332,12 @@ Mutex slot_mtx; uptr global_epoch; // guarded by slot_mtx and by all slot mutexes bool resetting; // global reset is in progress - IList slot_queue GUARDED_BY(slot_mtx); + IList slot_queue SANITIZER_GUARDED_BY(slot_mtx); IList trace_part_recycle - GUARDED_BY(slot_mtx); - uptr trace_part_total_allocated GUARDED_BY(slot_mtx); - uptr trace_part_recycle_finished GUARDED_BY(slot_mtx); - uptr trace_part_finished_excess GUARDED_BY(slot_mtx); + SANITIZER_GUARDED_BY(slot_mtx); + uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx); + uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx); + uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx); }; extern Context *ctx; // The one and the only global runtime context. @@ -566,10 +566,10 @@ } #endif -void SlotAttachAndLock(ThreadState *thr) ACQUIRE(thr->slot->mtx); +void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx); void SlotDetach(ThreadState *thr); -void SlotLock(ThreadState *thr) ACQUIRE(thr->slot->mtx); -void SlotUnlock(ThreadState *thr) RELEASE(thr->slot->mtx); +void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx); +void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx); void DoReset(ThreadState *thr, uptr epoch); void FlushShadowMemory(); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -113,7 +113,7 @@ return part; } -static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) { +static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) { DCHECK(part->trace); part->trace = nullptr; ctx->trace_part_recycle.PushFront(part); @@ -208,7 +208,7 @@ // Clang does not understand locking all slots in the loop: // error: expecting mutex 'slot.mtx' to be held at start of each loop -void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS { +void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { { for (auto& slot : ctx->slots) { slot.mtx.Lock(); @@ -230,7 +230,7 @@ void FlushShadowMemory() { DoReset(nullptr, 0); } static TidSlot* FindSlotAndLock(ThreadState* thr) - ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS { + SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { CHECK(!thr->slot); TidSlot* slot = nullptr; for (;;) { @@ -334,7 +334,7 @@ SlotDetachImpl(thr, true); } -void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS { +void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { DCHECK(!thr->slot_locked); #if SANITIZER_DEBUG // Check these mutexes are not locked. @@ -756,7 +756,7 @@ } #if !SANITIZER_GO -void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { +void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { GlobalProcessorLock(); // Detaching from the slot makes OnUserFree skip writing to the shadow. // The slot will be locked so any attempts to use it will deadlock anyway. @@ -783,7 +783,7 @@ __tsan_test_only_on_fork(); } -static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS { +static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { thr->suppress_reports--; // Enabled in ForkBefore. thr->ignore_interceptors--; thr->ignore_reads_and_writes--; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp @@ -156,7 +156,7 @@ NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur, Shadow old, - AccessType typ) NO_THREAD_SAFETY_ANALYSIS { + AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS { // For the free shadow markers the first element (that contains kFreeSid) // triggers the race, but the second element contains info about the freeing // thread, take it.