diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -268,7 +268,7 @@ return; } __sync_synchronize(); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. @@ -282,7 +282,7 @@ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); if (LIKELY(mo == mo_relaxed)) return F(a, v); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. @@ -415,7 +415,7 @@ } bool release = IsReleaseOrder(mo); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false); RWLock l(&s->mtx, release); T cc = *c; T pr = func_cas(a, cc, v); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -71,9 +71,10 @@ MemoryWrite(thr, pc, addr, kSizeLog1); thr->is_freeing = false; } - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); s->SetFlags(flagz & MutexCreationFlagMask); + // Save stack in the case the sync object was created before as atomic. if (!SANITIZER_GO && s->creation_stack_id == 0) s->creation_stack_id = CurrentStackId(thr, pc); } @@ -143,7 +144,7 @@ void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) { DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); { ReadLock l(&s->mtx); s->UpdateFlags(flagz); @@ -171,7 +172,7 @@ bool first = false; bool report_double_lock = false; { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); @@ -221,7 +222,7 @@ bool report_bad_unlock = false; int rec = 0; { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId()); @@ -260,7 +261,7 @@ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz); if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) { { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); ReadLock l(&s->mtx); s->UpdateFlags(flagz); Callback cb(thr, pc); @@ -279,7 +280,7 @@ bool report_bad_lock = false; bool pre_lock = false; { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); ReadLock l(&s->mtx); s->UpdateFlags(flagz); thr->fast_state.IncrementEpoch(); @@ -318,7 +319,7 @@ u64 mid = 0; bool report_bad_unlock = false; { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId()); @@ -351,7 +352,7 @@ u64 mid = 0; bool report_bad_unlock = false; { - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); bool write = true; if (s->owner_tid == kInvalidTid) { @@ -392,7 +393,7 @@ void MutexRepair(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); Lock l(&s->mtx); s->owner_tid = kInvalidTid; s->recursion = 0; @@ -400,7 +401,7 @@ void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr); - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true); ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId()); } @@ -438,7 +439,7 @@ DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. @@ -450,7 +451,7 @@ DPrintf("#%d: Release %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. @@ -462,7 +463,7 @@ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr); if (thr->ignore_sync) return; - SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr); + SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false); Lock l(&s->mtx); thr->fast_state.IncrementEpoch(); // Can't increment epoch w/o writing to the trace as well. diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -64,7 +64,7 @@ // with the mtx. This reduces contention for hot sync objects. SyncClock clock; - void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid); + void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, bool save_stack); void Reset(Processor *proc); u64 GetId() const { @@ -115,11 +115,12 @@ void ResetRange(Processor *proc, uptr p, uptr sz); MBlock* GetBlock(uptr p); - SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr) { - return GetSync(thr, pc, addr, true); + SyncVar *GetSyncOrCreate(ThreadState *thr, uptr pc, uptr addr, + bool save_stack) { + return GetSync(thr, pc, addr, true, save_stack); } SyncVar *GetSyncIfExists(uptr addr) { - return GetSync(nullptr, 0, addr, false); + return GetSync(nullptr, 0, addr, false, false); } void MoveMemory(uptr src, uptr dst, uptr sz); @@ -136,7 +137,8 @@ SyncAlloc sync_alloc_; atomic_uint64_t uid_gen_; - SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create); + SyncVar *GetSync(ThreadState *thr, uptr pc, uptr addr, bool create, + bool save_stack); }; } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -20,13 +20,14 @@ SyncVar::SyncVar() : mtx(MutexTypeSyncVar) { Reset(0); } -void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { +void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid, + bool save_stack) { this->addr = addr; this->uid = uid; this->next = 0; creation_stack_id = 0; - if (!SANITIZER_GO) // Go does not use them + if (save_stack && !SANITIZER_GO) // Go does not use them creation_stack_id = CurrentStackId(thr, pc); if (common_flags()->detect_deadlocks) DDMutexInit(thr, pc, this); @@ -190,7 +191,8 @@ } } -SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create) { +SyncVar *MetaMap::GetSync(ThreadState *thr, uptr pc, uptr addr, bool create, + bool save_stack) { u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; @@ -219,7 +221,7 @@ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed); myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache); mys = sync_alloc_.Map(myidx); - mys->Init(thr, pc, addr, uid); + mys->Init(thr, pc, addr, uid, save_stack); } mys->next = idx0; if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0, diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_sync_test.cpp @@ -57,10 +57,10 @@ m->AllocBlock(thr, 0, (uptr)&block[0], 4 * sizeof(u64)); SyncVar *s1 = m->GetSyncIfExists((uptr)&block[0]); EXPECT_EQ(s1, (SyncVar*)0); - s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]); + s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block[0], false); EXPECT_NE(s1, (SyncVar*)0); EXPECT_EQ(s1->addr, (uptr)&block[0]); - SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block[1]); + SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block[1], false); EXPECT_NE(s2, (SyncVar*)0); EXPECT_EQ(s2->addr, (uptr)&block[1]); m->FreeBlock(thr->proc(), (uptr)&block[0]); @@ -79,8 +79,8 @@ u64 block2[4] = {}; // fake malloc block m->AllocBlock(thr, 0, (uptr)&block1[0], 3 * sizeof(u64)); m->AllocBlock(thr, 0, (uptr)&block1[3], 1 * sizeof(u64)); - SyncVar *s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[0]); - SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[1]); + SyncVar *s1 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[0], false); + SyncVar *s2 = m->GetSyncOrCreate(thr, 0, (uptr)&block1[1], false); m->MoveMemory((uptr)&block1[0], (uptr)&block2[0], 4 * sizeof(u64)); MBlock *mb1 = m->GetBlock((uptr)&block1[0]); EXPECT_EQ(mb1, (MBlock*)0); @@ -111,7 +111,7 @@ MetaMap *m = &ctx->metamap; u64 block[1] = {}; // fake malloc block m->AllocBlock(thr, 0, (uptr)&block[0], 1 * sizeof(u64)); - SyncVar *s = m->GetSyncOrCreate(thr, 0, (uptr)&block[0]); + SyncVar *s = m->GetSyncOrCreate(thr, 0, (uptr)&block[0], false); s->Reset(thr->proc()); uptr sz = m->FreeBlock(thr->proc(), (uptr)&block[0]); EXPECT_EQ(sz, 1 * sizeof(u64));