Index: lib/lsan/lsan_common.cc =================================================================== --- lib/lsan/lsan_common.cc +++ lib/lsan/lsan_common.cc @@ -375,8 +375,9 @@ Printf("Suppressions used:\n"); Printf(" count bytes template\n"); for (uptr i = 0; i < matched.size(); i++) - Printf("%7zu %10zu %s\n", static_cast(matched[i]->hit_count), - matched[i]->weight, matched[i]->templ); + Printf("%7zu %10zu %s\n", static_cast(atomic_load( + &matched[i]->hit_count, memory_order_relaxed)), + matched[i]->weight, matched[i]->templ); Printf("%s\n\n", line); } @@ -598,7 +599,8 @@ Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); if (s) { s->weight += leaks_[i].total_size; - s->hit_count += leaks_[i].hit_count; + atomic_store(&s->hit_count, atomic_load(&s->hit_count, + memory_order_relaxed) + leaks_[i].hit_count, memory_order_relaxed); leaks_[i].is_suppressed = true; } } Index: lib/sanitizer_common/sanitizer_suppressions.h =================================================================== --- lib/sanitizer_common/sanitizer_suppressions.h +++ lib/sanitizer_common/sanitizer_suppressions.h @@ -14,6 +14,7 @@ #define SANITIZER_SUPPRESSIONS_H #include "sanitizer_common.h" +#include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" namespace __sanitizer { @@ -21,7 +22,7 @@ struct Suppression { const char *type; char *templ; - unsigned hit_count; + atomic_uintptr_t hit_count; uptr weight; }; Index: lib/sanitizer_common/sanitizer_suppressions.cc =================================================================== --- lib/sanitizer_common/sanitizer_suppressions.cc +++ lib/sanitizer_common/sanitizer_suppressions.cc @@ -127,13 +127,11 @@ Printf("%s: failed to parse suppressions\n", SanitizerToolName); Die(); } - Suppression s; + Suppression s = {}; s.type = suppression_types_[type]; s.templ = (char*)InternalAlloc(end2 - line + 1); internal_memcpy(s.templ, line, end2 - line); s.templ[end2 - line] = 0; - s.hit_count = 0; - s.weight = 0; suppressions_.push_back(s); has_suppression_type_[type] = true; } @@ -163,7 +161,7 @@ void SuppressionContext::GetMatched( InternalMmapVector *matched) { for (uptr i = 0; i < suppressions_.size(); i++) - if (suppressions_[i].hit_count) + if (atomic_load(&suppressions_[i].hit_count, memory_order_relaxed)) matched->push_back(&suppressions_[i]); } Index: lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- lib/tsan/rtl/tsan_interceptors.cc +++ lib/tsan/rtl/tsan_interceptors.cc @@ -1840,7 +1840,7 @@ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); - if (!IsFiredSuppression(ctx, rep, stack)) { + if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { rep.AddStack(stack, true); OutputReport(thr, rep); } Index: lib/tsan/rtl/tsan_interface_ann.cc =================================================================== --- lib/tsan/rtl/tsan_interface_ann.cc +++ lib/tsan/rtl/tsan_interface_ann.cc @@ -63,8 +63,8 @@ struct ExpectRace { ExpectRace *next; ExpectRace *prev; - int hitcount; - int addcount; + atomic_uintptr_t hitcount; + atomic_uintptr_t addcount; uptr addr; uptr size; char *file; @@ -90,7 +90,8 @@ ExpectRace *race = list->next; for (; race != list; race = race->next) { if (race->addr == addr && race->size == size) { - race->addcount++; + atomic_store(&race->addcount, atomic_load(&race->addcount, + memory_order_relaxed) + 1, memory_order_relaxed); return; } } @@ -100,8 +101,8 @@ race->file = f; race->line = l; race->desc[0] = 0; - race->hitcount = 0; - race->addcount = 1; + atomic_store(&race->hitcount, 0, memory_order_relaxed); + atomic_store(&race->addcount, 1, memory_order_relaxed); if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) @@ -130,7 +131,7 @@ return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); - race->hitcount++; + atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); return true; } @@ -146,7 +147,7 @@ } bool IsExpectedReport(uptr addr, uptr size) { - Lock lock(&dyn_ann_ctx->mtx); + ReadLock lock(&dyn_ann_ctx->mtx); if (CheckContains(&dyn_ann_ctx->expect, addr, size)) return true; if (CheckContains(&dyn_ann_ctx->benign, addr, size)) @@ -155,20 +156,21 @@ } static void CollectMatchedBenignRaces(Vector *matched, - int *unique_count, int *hit_count, int ExpectRace::*counter) { + int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { ExpectRace *list = &dyn_ann_ctx->benign; for (ExpectRace *race = list->next; race != list; race = race->next) { (*unique_count)++; - if (race->*counter == 0) + if (atomic_load(&(race->*counter), memory_order_relaxed) == 0) continue; - (*hit_count) += race->*counter; + (*hit_count) += atomic_load(&(race->*counter), memory_order_relaxed); uptr i = 0; for (; i < matched->Size(); i++) { ExpectRace *race0 = &(*matched)[i]; if (race->line == race0->line && internal_strcmp(race->file, race0->file) == 0 && internal_strcmp(race->desc, race0->desc) == 0) { - race0->*counter += race->*counter; + atomic_fetch_add(&(race0->*counter), atomic_load(&(race->*counter), + memory_order_relaxed), memory_order_relaxed); break; } } @@ -193,8 +195,8 @@ hit_count, (int)internal_getpid()); for (uptr i = 0; i < hit_matched.Size(); i++) { Printf("%d %s:%d %s\n", - hit_matched[i].hitcount, hit_matched[i].file, - hit_matched[i].line, hit_matched[i].desc); + atomic_load(&hit_matched[i].hitcount, memory_order_relaxed), + hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); } } if (hit_matched.Size()) { @@ -203,8 +205,8 @@ add_count, unique_count, (int)internal_getpid()); for (uptr i = 0; i < add_matched.Size(); i++) { Printf("%d %s:%d %s\n", - add_matched[i].addcount, add_matched[i].file, - add_matched[i].line, add_matched[i].desc); + atomic_load(&add_matched[i].addcount, memory_order_relaxed), + add_matched[i].file, add_matched[i].line, add_matched[i].desc); } } } @@ -303,7 +305,7 @@ Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { ExpectRace *race = dyn_ann_ctx->expect.next; - if (race->hitcount == 0) { + if (atomic_load(&race->hitcount, memory_order_relaxed) == 0) { ctx->nmissed_expected++; ReportMissedExpectedRace(race); } Index: lib/tsan/rtl/tsan_mman.cc =================================================================== --- lib/tsan/rtl/tsan_mman.cc +++ lib/tsan/rtl/tsan_mman.cc @@ -85,12 +85,12 @@ return; VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack); + if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) + return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); - if (!IsFiredSuppression(ctx, rep, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); - } + rep.AddStack(stack, true); + OutputReport(thr, rep); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { Index: lib/tsan/rtl/tsan_mutex.h =================================================================== --- lib/tsan/rtl/tsan_mutex.h +++ lib/tsan/rtl/tsan_mutex.h @@ -32,6 +32,8 @@ MutexTypeMBlock, MutexTypeJavaMBlock, MutexTypeDDetector, + MutexTypeFired, + MutexTypeRacy, // This must be the last. MutexTypeCount Index: lib/tsan/rtl/tsan_mutex.cc =================================================================== --- lib/tsan/rtl/tsan_mutex.cc +++ lib/tsan/rtl/tsan_mutex.cc @@ -41,6 +41,8 @@ /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, /*11 MutexTypeDDetector*/ {}, + /*12 MutexTypeFired*/ {MutexTypeLeaf}, + /*13 MutexTypeRacy*/ {MutexTypeLeaf}, }; static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; Index: lib/tsan/rtl/tsan_rtl.h =================================================================== --- lib/tsan/rtl/tsan_rtl.h +++ lib/tsan/rtl/tsan_rtl.h @@ -458,7 +458,7 @@ struct FiredSuppression { ReportType type; - uptr pc; + uptr pc_or_addr; Suppression *supp; }; @@ -480,9 +480,11 @@ ThreadRegistry *thread_registry; + Mutex racy_mtx; Vector racy_stacks; Vector racy_addresses; // Number of fired suppressions may be large enough. + Mutex fired_suppressions_mtx; InternalMmapVector fired_suppressions; DDetector *dd; @@ -587,8 +589,7 @@ void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace); +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); Index: lib/tsan/rtl/tsan_rtl.cc =================================================================== --- lib/tsan/rtl/tsan_rtl.cc +++ lib/tsan/rtl/tsan_rtl.cc @@ -99,8 +99,10 @@ , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) + , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) + , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) , fired_suppressions(8) { } Index: lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_report.cc +++ lib/tsan/rtl/tsan_rtl_report.cc @@ -369,27 +369,20 @@ // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. - ctx->thread_registry->CheckLocked(); - ThreadContext *tctx = static_cast( - ctx->thread_registry->GetThreadLocked(tid)); - if (tctx == 0) - return; - if (tctx->status != ThreadStatusRunning - && tctx->status != ThreadStatusFinished - && tctx->status != ThreadStatusDead) - return; - Trace* trace = ThreadTrace(tctx->tid); - Lock l(&trace->mtx); + Trace* trace = ThreadTrace(tid); + ReadLock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; - if (epoch < hdr->epoch0) + if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) return; + CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); const u64 epoch0 = RoundDown(epoch, TraceSize()); const u64 eend = epoch % TraceSize(); const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); - InternalScopedBuffer stack(kShadowStackSize); + Vector stack(MBlockReportStack); + stack.Resize(hdr->stack0.size + 64); for (uptr i = 0; i < hdr->stack0.size; i++) { stack[i] = hdr->stack0.trace[i]; DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); @@ -406,6 +399,8 @@ if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { + if (stack.Size() < pos + 2) + stack.Resize(pos + 2); stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { if (pos > 0) @@ -428,50 +423,58 @@ if (pos == 0 && stack[0] == 0) return; pos++; - stk->Init(stack.data(), pos); + stk->Init(&stack[0], pos); } static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; - if (flags()->suppress_equal_stacks) { - hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); - for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { - if (hash == ctx->racy_stacks[i]) { - DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); - equal_stack = true; - break; - } - } - } bool equal_address = false; RacyAddress ra0 = {addr_min, addr_max}; - if (flags()->suppress_equal_addresses) { - for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { - RacyAddress ra2 = ctx->racy_addresses[i]; - uptr maxbeg = max(ra0.addr_min, ra2.addr_min); - uptr minend = min(ra0.addr_max, ra2.addr_max); - if (maxbeg < minend) { - DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); - equal_address = true; - break; + { + ReadLock lock(&ctx->racy_mtx); + if (flags()->suppress_equal_stacks) { + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { + if (hash == ctx->racy_stacks[i]) { + VPrintf(2, + "ThreadSanitizer: suppressing report as doubled (stack)\n"); + equal_stack = true; + break; + } + } + } + if (flags()->suppress_equal_addresses) { + for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { + RacyAddress ra2 = ctx->racy_addresses[i]; + uptr maxbeg = max(ra0.addr_min, ra2.addr_min); + uptr minend = min(ra0.addr_max, ra2.addr_max); + if (maxbeg < minend) { + VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); + equal_address = true; + break; + } } } } - if (equal_stack || equal_address) { - if (!equal_stack) - ctx->racy_stacks.PushBack(hash); - if (!equal_address) - ctx->racy_addresses.PushBack(ra0); - return true; + if (!equal_stack && !equal_address) + return false; + if (!equal_stack) { + Lock lock(&ctx->racy_mtx); + ctx->racy_stacks.PushBack(hash); } - return false; + if (!equal_address) { + Lock lock(&ctx->racy_mtx); + ctx->racy_addresses.PushBack(ra0); + } + return true; } static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { + Lock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { RacyStacks hash; hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); @@ -490,23 +493,24 @@ atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); const ReportDesc *rep = srep.GetReport(); Suppression *supp = 0; - uptr suppress_pc = 0; - for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp); - if (suppress_pc != 0) { - FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; + uptr pc_or_addr = 0; + for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); + if (pc_or_addr != 0) { + Lock lock(&ctx->fired_suppressions_mtx); + FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; ctx->fired_suppressions.push_back(s); } { bool old_is_freeing = thr->is_freeing; thr->is_freeing = false; - bool suppressed = OnReport(rep, suppress_pc != 0); + bool suppressed = OnReport(rep, pc_or_addr != 0); thr->is_freeing = old_is_freeing; if (suppressed) return false; @@ -518,16 +522,16 @@ return true; } -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace) { +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; - if (trace.trace[j] == s->pc) { + if (trace.trace[j] == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } @@ -535,16 +539,15 @@ return false; } -static bool IsFiredSuppression(Context *ctx, - const ScopedReport &srep, - uptr addr) { +static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; FiredSuppression *s = &ctx->fired_suppressions[k]; - if (addr == s->pc) { + if (addr == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } @@ -597,8 +600,6 @@ return; } - ThreadRegistryLock l0(ctx->thread_registry); - ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) typ = ReportTypeVptrUseAfterFree; @@ -606,29 +607,35 @@ typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; - ScopedReport rep(typ); - if (IsFiredSuppression(ctx, rep, addr)) + + if (IsFiredSuppression(ctx, typ, addr)) return; + const uptr kMop = 2; VarSizeStackTrace traces[kMop]; const uptr toppc = TraceTopPC(thr); ObtainCurrentStack(thr, toppc, &traces[0]); - if (IsFiredSuppression(ctx, rep, traces[0])) + if (IsFiredSuppression(ctx, typ, traces[0])) return; - InternalScopedBuffer mset2(1); - new(mset2.data()) MutexSet(); + + // MutexSet is too large to live on stack. + Vector mset_buffer(MBlockScopedBuf); + mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); + MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); + Shadow s2(thr->racy_state[1]); - RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); - if (IsFiredSuppression(ctx, rep, traces[1])) + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2); + if (IsFiredSuppression(ctx, typ, traces[1])) return; if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; + ThreadRegistryLock l0(ctx->thread_registry); + ScopedReport rep(typ); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, traces[i], - i == 0 ? &thr->mset : mset2.data()); + rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { Index: lib/tsan/rtl/tsan_stat.h =================================================================== --- lib/tsan/rtl/tsan_stat.h +++ lib/tsan/rtl/tsan_stat.h @@ -169,8 +169,9 @@ StatMtxAnnotations, StatMtxAtExit, StatMtxMBlock, - StatMtxJavaMBlock, StatMtxDeadlockDetector, + StatMtxFired, + StatMtxRacy, StatMtxFD, // This must be the last. Index: lib/tsan/rtl/tsan_stat.cc =================================================================== --- lib/tsan/rtl/tsan_stat.cc +++ lib/tsan/rtl/tsan_stat.cc @@ -164,8 +164,9 @@ name[StatMtxAtExit] = " Atexit "; name[StatMtxAnnotations] = " Annotations "; name[StatMtxMBlock] = " MBlock "; - name[StatMtxJavaMBlock] = " JavaMBlock "; name[StatMtxDeadlockDetector] = " DeadlockDetector "; + name[StatMtxFired] = " FiredSuppressions "; + name[StatMtxRacy] = " RacyStacks "; name[StatMtxFD] = " FD "; Printf("Statistics:\n"); Index: lib/tsan/rtl/tsan_suppressions.cc =================================================================== --- lib/tsan/rtl/tsan_suppressions.cc +++ lib/tsan/rtl/tsan_suppressions.cc @@ -100,8 +100,8 @@ if (suppression_ctx->Match(info.function, stype, sp) || suppression_ctx->Match(info.file, stype, sp) || suppression_ctx->Match(info.module, stype, sp)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ); - (*sp)->hit_count++; + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ); + atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed); return info.address; } return 0; @@ -138,8 +138,8 @@ const DataInfo &global = loc->global; if (suppression_ctx->Match(global.name, stype, &s) || suppression_ctx->Match(global.module, stype, &s)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); - s->hit_count++; + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ); + atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed); *sp = s; return global.start; } @@ -154,7 +154,7 @@ return; int hit_count = 0; for (uptr i = 0; i < matched.size(); i++) - hit_count += matched[i]->hit_count; + hit_count += atomic_load(&matched[i]->hit_count, memory_order_relaxed); Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count, (int)internal_getpid()); for (uptr i = 0; i < matched.size(); i++) { Index: test/tsan/race_stress.cc =================================================================== --- test/tsan/race_stress.cc +++ test/tsan/race_stress.cc @@ -0,0 +1,24 @@ +// RUN: %clangxx_tsan -O1 %s -o %t && %run %t 2>&1 | FileCheck %s +#include "test.h" + +const int kThreads = 16; +const int kIters = 1000; + +volatile int X = 0; + +void *thr(void *arg) { + for (int i = 0; i < kIters; i++) + X++; + return 0; +} + +int main() { + pthread_t th[kThreads]; + for (int i = 0; i < kThreads; i++) + pthread_create(&th[i], 0, thr, 0); + for (int i = 0; i < kThreads; i++) + pthread_join(th[i], 0); + fprintf(stderr, "DONE\n"); +} + +// CHECK: DONE