diff --git a/compiler-rt/lib/tsan/go/tsan_go.cpp b/compiler-rt/lib/tsan/go/tsan_go.cpp --- a/compiler-rt/lib/tsan/go/tsan_go.cpp +++ b/compiler-rt/lib/tsan/go/tsan_go.cpp @@ -167,25 +167,25 @@ } void __tsan_read(ThreadState *thr, void *addr, void *pc) { - MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1); + MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead); } void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) { if (callpc != 0) FuncEntry(thr, callpc); - MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1); + MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessRead); if (callpc != 0) FuncExit(thr); } void __tsan_write(ThreadState *thr, void *addr, void *pc) { - MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1); + MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite); } void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) { if (callpc != 0) FuncEntry(thr, callpc); - MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1); + MemoryAccess(thr, (uptr)pc, (uptr)addr, 1, kAccessWrite); if (callpc != 0) FuncExit(thr); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_external.cpp b/compiler-rt/lib/tsan/rtl/tsan_external.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_external.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_external.cpp @@ -57,16 +57,14 @@ #if !SANITIZER_GO -typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int); -void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) { +void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessType typ) { CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed)); ThreadState *thr = cur_thread(); if (caller_pc) FuncEntry(thr, caller_pc); InsertShadowStackFrameForTag(thr, (uptr)tag); bool in_ignored_lib; - if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) { - access(thr, CALLERPC, (uptr)addr, kSizeLog1); - } + if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) + MemoryAccess(thr, CALLERPC, (uptr)addr, 1, typ); FuncExit(thr); if (caller_pc) FuncExit(thr); } @@ -111,12 +109,12 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_external_read(void *addr, void *caller_pc, void *tag) { - ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryRead); + ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessRead); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_external_write(void *addr, void *caller_pc, void *tag) { - ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, MemoryWrite); + ExternalAccess(addr, STRIP_PAC_PC(caller_pc), tag, kAccessWrite); } } // extern "C" diff --git a/compiler-rt/lib/tsan/rtl/tsan_fd.cpp b/compiler-rt/lib/tsan/rtl/tsan_fd.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_fd.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_fd.cpp @@ -115,7 +115,7 @@ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); } else { // See the dup-related comment in FdClose. - MemoryRead(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); } } @@ -163,7 +163,7 @@ FdDesc *d = fddesc(thr, pc, fd); FdSync *s = d->sync; DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s); - MemoryRead(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); if (s) Acquire(thr, pc, (uptr)s); } @@ -174,7 +174,7 @@ FdDesc *d = fddesc(thr, pc, fd); FdSync *s = d->sync; DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s); - MemoryRead(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); if (s) Release(thr, pc, (uptr)s); } @@ -184,7 +184,7 @@ if (bogusfd(fd)) return; FdDesc *d = fddesc(thr, pc, fd); - MemoryRead(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); } void FdClose(ThreadState *thr, uptr pc, int fd, bool write) { @@ -194,7 +194,7 @@ FdDesc *d = fddesc(thr, pc, fd); if (write) { // To catch races between fd usage and close. - MemoryWrite(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite); } else { // This path is used only by dup2/dup3 calls. // We do read instead of write because there is a number of legitimate @@ -204,7 +204,7 @@ // 2. Some daemons dup /dev/null in place of stdin/stdout. // On the other hand we have not seen cases when write here catches real // bugs. - MemoryRead(thr, pc, (uptr)d, kSizeLog8); + MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead); } // We need to clear it, because if we do not intercept any call out there // that creates fd, we will hit false postives. @@ -228,7 +228,7 @@ return; // Ignore the case when user dups not yet connected socket. FdDesc *od = fddesc(thr, pc, oldfd); - MemoryRead(thr, pc, (uptr)od, kSizeLog8); + MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead); FdClose(thr, pc, newfd, write); init(thr, pc, newfd, ref(od->sync), write); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -1445,14 +1445,14 @@ #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); - MemoryWrite(thr, pc, (uptr)b, kSizeLog1); + MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite); int res = REAL(pthread_barrier_init)(b, a, count); return res; } TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); - MemoryWrite(thr, pc, (uptr)b, kSizeLog1); + MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite); int res = REAL(pthread_barrier_destroy)(b); return res; } @@ -1460,9 +1460,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); Release(thr, pc, (uptr)b); - MemoryRead(thr, pc, (uptr)b, kSizeLog1); + MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead); int res = REAL(pthread_barrier_wait)(b); - MemoryRead(thr, pc, (uptr)b, kSizeLog1); + MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead); if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { Acquire(thr, pc, (uptr)b); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface.cpp @@ -30,57 +30,65 @@ } void __tsan_read16(void *addr) { - MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); - MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); + uptr pc = CALLERPC; + ThreadState *thr = cur_thread(); + MemoryAccess(thr, pc, (uptr)addr, 8, kAccessRead); + MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessRead); } void __tsan_write16(void *addr) { - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); + uptr pc = CALLERPC; + ThreadState *thr = cur_thread(); + MemoryAccess(thr, pc, (uptr)addr, 8, kAccessWrite); + MemoryAccess(thr, pc, (uptr)addr + 8, 8, kAccessWrite); } void __tsan_read16_pc(void *addr, void *pc) { - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8); - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8); + uptr pc_no_pac = STRIP_PAC_PC(pc); + ThreadState *thr = cur_thread(); + MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessRead); + MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessRead); } void __tsan_write16_pc(void *addr, void *pc) { - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8); - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr + 8, kSizeLog8); + uptr pc_no_pac = STRIP_PAC_PC(pc); + ThreadState *thr = cur_thread(); + MemoryAccess(thr, pc_no_pac, (uptr)addr, 8, kAccessWrite); + MemoryAccess(thr, pc_no_pac, (uptr)addr + 8, 8, kAccessWrite); } // __tsan_unaligned_read/write calls are emitted by compiler. void __tsan_unaligned_read2(const void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead); } void __tsan_unaligned_read4(const void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead); } void __tsan_unaligned_read8(const void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead); } void __tsan_unaligned_read16(const void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessRead); } void __tsan_unaligned_write2(void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite); } void __tsan_unaligned_write4(void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite); } void __tsan_unaligned_write8(void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite); } void __tsan_unaligned_write16(void *addr) { - UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false); + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, kAccessWrite); } // __sanitizer_unaligned_load/store are for user instrumentation. diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -161,16 +161,16 @@ } #endif -template -static int SizeLog() { +template +static int AccessSize() { if (sizeof(T) <= 1) - return kSizeLog1; + return 1; else if (sizeof(T) <= 2) - return kSizeLog2; + return 2; else if (sizeof(T) <= 4) - return kSizeLog4; + return 4; else - return kSizeLog8; + return 8; // For 16-byte atomics we also use 8-byte memory access, // this leads to false negatives only in very obscure cases. } @@ -224,7 +224,8 @@ // This fast-path is critical for performance. // Assume the access is atomic. if (!IsAcquireOrder(mo)) { - MemoryReadAtomic(thr, pc, (uptr)a, SizeLog()); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), + kAccessRead | kAccessAtomic); return NoTsanAtomicLoad(a, mo); } // Don't create sync object if it does not exist yet. For example, an atomic @@ -238,7 +239,7 @@ // of the value and the clock we acquire. v = NoTsanAtomicLoad(a, mo); } - MemoryReadAtomic(thr, pc, (uptr)a, SizeLog()); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessRead | kAccessAtomic); return v; } @@ -258,7 +259,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { CHECK(IsStoreOrder(mo)); - MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); // This fast-path is critical for performance. // Assume the access is atomic. // Strictly saying even relaxed store cuts off release sequence, @@ -279,7 +280,7 @@ template static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { - MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); if (LIKELY(mo == mo_relaxed)) return F(a, v); SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); @@ -404,7 +405,7 @@ // (mo_relaxed) when those are used. CHECK(IsLoadOrder(fmo)); - MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); + MemoryAccess(thr, pc, (uptr)a, AccessSize(), kAccessWrite | kAccessAtomic); if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) { T cc = *c; T pr = func_cas(a, cc, v); diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h b/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h --- a/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_inl.h @@ -19,67 +19,67 @@ using namespace __tsan; void __tsan_read1(void *addr) { - MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead); } void __tsan_read2(void *addr) { - MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead); } void __tsan_read4(void *addr) { - MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead); } void __tsan_read8(void *addr) { - MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead); } void __tsan_write1(void *addr) { - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessWrite); } void __tsan_write2(void *addr) { - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessWrite); } void __tsan_write4(void *addr) { - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessWrite); } void __tsan_write8(void *addr) { - MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); + MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessWrite); } void __tsan_read1_pc(void *addr, void *pc) { - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessRead); } void __tsan_read2_pc(void *addr, void *pc) { - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessRead); } void __tsan_read4_pc(void *addr, void *pc) { - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessRead); } void __tsan_read8_pc(void *addr, void *pc) { - MemoryRead(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessRead); } void __tsan_write1_pc(void *addr, void *pc) { - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog1); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 1, kAccessWrite); } void __tsan_write2_pc(void *addr, void *pc) { - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog2); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 2, kAccessWrite); } void __tsan_write4_pc(void *addr, void *pc) { - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog4); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 4, kAccessWrite); } void __tsan_write8_pc(void *addr, void *pc) { - MemoryWrite(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, kSizeLog8); + MemoryAccess(cur_thread(), STRIP_PAC_PC(pc), (uptr)addr, 8, kAccessWrite); } void __tsan_vptr_update(void **vptr_p, void *new_val) { @@ -87,7 +87,7 @@ if (*vptr_p != new_val) { ThreadState *thr = cur_thread(); thr->is_vptr_access = true; - MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + MemoryAccess(thr, CALLERPC, (uptr)vptr_p, 8, kAccessWrite); thr->is_vptr_access = false; } } @@ -96,7 +96,7 @@ CHECK_EQ(sizeof(vptr_p), 8); ThreadState *thr = cur_thread(); thr->is_vptr_access = true; - MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8); + MemoryAccess(thr, CALLERPC, (uptr)vptr_p, 8, kAccessRead); thr->is_vptr_access = false; } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -692,6 +692,14 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write); void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write); +typedef uptr AccessType; + +enum : AccessType { + kAccessWrite = 0, + kAccessRead = 1 << 0, + kAccessAtomic = 1 << 1, +}; + void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic); void MemoryAccessImpl(ThreadState *thr, uptr addr, @@ -701,32 +709,36 @@ uptr size, bool is_write); void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr, uptr size, uptr step, bool is_write); -void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, - int size, bool kAccessIsWrite, bool kIsAtomic); +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size, + AccessType typ); const int kSizeLog1 = 0; const int kSizeLog2 = 1; const int kSizeLog4 = 2; const int kSizeLog8 = 3; -void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc, - uptr addr, int kAccessSizeLog) { - MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false); -} - -void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc, - uptr addr, int kAccessSizeLog) { - MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false); -} - -void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc, - uptr addr, int kAccessSizeLog) { - MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true); -} - -void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc, - uptr addr, int kAccessSizeLog) { - MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true); +ALWAYS_INLINE +void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size, + AccessType typ) { + int size_log; + switch (size) { + case 1: + size_log = kSizeLog1; + break; + case 2: + size_log = kSizeLog2; + break; + case 4: + size_log = kSizeLog4; + break; + default: + DCHECK_EQ(size, 8); + size_log = kSizeLog8; + break; + } + bool is_write = !(typ & kAccessRead); + bool is_atomic = typ & kAccessAtomic; + MemoryAccess(thr, pc, addr, size_log, is_write, is_atomic); } void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -733,8 +733,11 @@ return; } -void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, - int size, bool kAccessIsWrite, bool kIsAtomic) { +void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size, + AccessType typ) { + DCHECK(!(typ & kAccessAtomic)); + const bool kAccessIsWrite = !(typ & kAccessRead); + const bool kIsAtomic = false; while (size) { int size1 = 1; int kAccessSizeLog = kSizeLog1; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -68,7 +68,7 @@ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; - MemoryWrite(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessWrite); thr->is_freeing = false; } SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); @@ -135,7 +135,7 @@ if (IsAppMem(addr)) { CHECK(!thr->is_freeing); thr->is_freeing = true; - MemoryWrite(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessWrite); thr->is_freeing = false; } // s will be destroyed and freed in MetaMap::FreeBlock. @@ -166,7 +166,7 @@ else rec = 1; if (IsAppMem(addr)) - MemoryReadAtomic(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic); u64 mid = 0; bool pre_lock = false; bool first = false; @@ -216,7 +216,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) - MemoryReadAtomic(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic); u64 mid = 0; bool report_bad_unlock = false; int rec = 0; @@ -274,7 +274,7 @@ void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (IsAppMem(addr)) - MemoryReadAtomic(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic); u64 mid = 0; bool report_bad_lock = false; bool pre_lock = false; @@ -314,7 +314,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) - MemoryReadAtomic(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic); u64 mid = 0; bool report_bad_unlock = false; { @@ -347,7 +347,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr); if (IsAppMem(addr)) - MemoryReadAtomic(thr, pc, addr, kSizeLog1); + MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic); u64 mid = 0; bool report_bad_unlock = false; {