Index: lib/tsan/dd/dd_interceptors.cc =================================================================== --- lib/tsan/dd/dd_interceptors.cc +++ lib/tsan/dd/dd_interceptors.cc @@ -29,7 +29,7 @@ static bool InitThread() { if (initing) return false; - if (thr != 0) + if (thr) return true; initing = true; if (!inited) { @@ -277,7 +277,7 @@ char name[128]; bool prev_is_data = false; while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), - /*protection*/ 0)) { + /*protection*/ nullptr)) { bool is_data = offset != 0 && name[0] != 0; // BSS may get merged with [heap] in /proc/self/maps. This is not very // reliable. @@ -330,4 +330,4 @@ InitDataSeg(); } -} // namespace __dsan +} // namespace __dsan Index: lib/tsan/dd/dd_rtl.cc =================================================================== --- lib/tsan/dd/dd_rtl.cc +++ lib/tsan/dd/dd_rtl.cc @@ -22,7 +22,7 @@ static u32 CurrentStackTrace(Thread *thr, uptr skip) { BufferedStackTrace stack; thr->ignore_interceptors = true; - stack.Unwind(1000, 0, 0, 0, 0, 0, false); + stack.Unwind(1000, 0, 0, nullptr, 0, 0, false); thr->ignore_interceptors = false; if (stack.size <= skip) return 0; @@ -37,7 +37,7 @@ } static void ReportDeadlock(Thread *thr, DDReport *rep) { - if (rep == 0) + if (!rep) return; BlockingMutexLock lock(&ctx->report_mutex); Printf("==============================\n"); @@ -156,4 +156,4 @@ ctx->dd->MutexDestroy(&cb, &h->dd); } -} // namespace __dsan +} // namespace __dsan Index: lib/tsan/rtl/tsan_clock.cc =================================================================== --- lib/tsan/rtl/tsan_clock.cc +++ lib/tsan/rtl/tsan_clock.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #include "tsan_clock.h" #include "tsan_rtl.h" #include "sanitizer_common/sanitizer_placement_new.h" @@ -397,7 +398,7 @@ ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]); ctx->clock_alloc.Free(c, tab_idx_); } - tab_ = 0; + tab_ = nullptr; tab_idx_ = 0; size_ = 0; release_store_tid_ = kInvalidTid; @@ -426,4 +427,4 @@ release_store_tid_, release_store_reused_, dirty_tids_[0], dirty_tids_[1]); } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_dense_alloc.h =================================================================== --- lib/tsan/rtl/tsan_dense_alloc.h +++ lib/tsan/rtl/tsan_dense_alloc.h @@ -16,6 +16,7 @@ // the index onto the real pointer. The index is u32, that is, 2 times smaller // than uptr (hense the Dense prefix). //===----------------------------------------------------------------------===// + #ifndef TSAN_DENSE_ALLOC_H #define TSAN_DENSE_ALLOC_H @@ -53,7 +54,7 @@ ~DenseSlabAlloc() { for (uptr i = 0; i < kL1Size; i++) { - if (map_[i] != 0) + if (map_[i]) UnmapOrDie(map_[i], kL2Size * sizeof(T)); } } @@ -132,6 +133,6 @@ } }; -} // namespace __tsan +} // namespace __tsan -#endif // TSAN_DENSE_ALLOC_H +#endif // TSAN_DENSE_ALLOC_H Index: lib/tsan/rtl/tsan_fd.cc =================================================================== --- lib/tsan/rtl/tsan_fd.cc +++ lib/tsan/rtl/tsan_fd.cc @@ -98,7 +98,7 @@ // See e.g. libc __res_iclose(). if (d->sync) { unref(thr, pc, d->sync); - d->sync = 0; + d->sync = nullptr; } if (flags()->io_sync == 0) { unref(thr, pc, s); @@ -131,7 +131,7 @@ // and the close. for (int l1 = 0; l1 < kTableSizeL1; l1++) { FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); - if (tab == 0) + if (!tab) break; for (int l2 = 0; l2 < kTableSizeL2; l2++) { FdDesc *d = &tab[l2]; @@ -143,7 +143,7 @@ bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) { for (int l1 = 0; l1 < kTableSizeL1; l1++) { FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed); - if (tab == 0) + if (!tab) break; if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) { int l2 = (addr - (uptr)tab) / sizeof(FdDesc); @@ -210,7 +210,7 @@ // that creates fd, we will hit false postives. MemoryResetRange(thr, pc, (uptr)d, 8); unref(thr, pc, d->sync); - d->sync = 0; + d->sync = nullptr; d->creation_tid = 0; d->creation_stack = 0; } @@ -252,14 +252,14 @@ DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd); if (bogusfd(fd)) return; - init(thr, pc, fd, 0); + init(thr, pc, fd, nullptr); } void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) { DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd); if (bogusfd(fd)) return; - init(thr, pc, fd, 0); + init(thr, pc, fd, nullptr); } void FdPollCreate(ThreadState *thr, uptr pc, int fd) { @@ -313,4 +313,4 @@ return (uptr)&addr; } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- lib/tsan/rtl/tsan_interceptors.cc +++ lib/tsan/rtl/tsan_interceptors.cc @@ -28,7 +28,7 @@ #include "tsan_mman.h" #include "tsan_fd.h" -using namespace __tsan; // NOLINT +using namespace __tsan; // NOLINT #if SANITIZER_FREEBSD #define __errno_location __error @@ -44,7 +44,6 @@ #define PTHREAD_CREATE_DETACHED 2 #endif - #ifdef __mips__ const int kSigCount = 129; #else @@ -152,7 +151,7 @@ #endif }; -const sighandler_t SIG_DFL = (sighandler_t)0; +const sighandler_t SIG_DFL = (sighandler_t)nullptr; const sighandler_t SIG_IGN = (sighandler_t)1; const sighandler_t SIG_ERR = (sighandler_t)-1; #if SANITIZER_FREEBSD @@ -201,14 +200,14 @@ if (0 == internal_strcmp(s->type, kSuppressionLib)) libignore()->AddIgnoredLibrary(s->templ); } - libignore()->OnLibraryLoaded(0); + libignore()->OnLibraryLoaded(nullptr); } -} // namespace __tsan +} // namespace __tsan static ThreadSignalContext *SigCtx(ThreadState *thr) { ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; - if (ctx == 0 && !thr->is_dead) { + if ((!ctx) && !thr->is_dead) { ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); thr->signal_ctx = ctx; @@ -348,7 +347,7 @@ // We want to setup the atexit callback even if we are in ignored lib // or after fork. SCOPED_INTERCEPTOR_RAW(atexit, f); - return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); + return setup_at_exit_wrapper(thr, pc, (void(*)())f, nullptr, nullptr); } TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { @@ -434,7 +433,7 @@ uptr mangled_sp = env[2]; #else uptr mangled_sp = env[6]; -#endif // SANITIZER_FREEBSD +#endif // SANITIZER_FREEBSD // Find the saved buf by mangled_sp. for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { JmpBuf *buf = &thr->jmp_bufs[i]; @@ -522,7 +521,7 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) { if (cur_thread()->in_symbolizer) return __libc_malloc(size); - void *p = 0; + void *p = nullptr; { SCOPED_INTERCEPTOR_RAW(malloc, size); p = user_alloc(thr, pc, size); @@ -539,7 +538,7 @@ TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { if (cur_thread()->in_symbolizer) return __libc_calloc(size, n); - void *p = 0; + void *p = nullptr; { SCOPED_INTERCEPTOR_RAW(calloc, size, n); p = user_calloc(thr, pc, size, n); @@ -562,7 +561,7 @@ } TSAN_INTERCEPTOR(void, free, void *p) { - if (p == 0) + if (!p) return; if (cur_thread()->in_symbolizer) return __libc_free(p); @@ -572,7 +571,7 @@ } TSAN_INTERCEPTOR(void, cfree, void *p) { - if (p == 0) + if (!p) return; if (cur_thread()->in_symbolizer) return __libc_free(p); @@ -671,7 +670,7 @@ errno = EINVAL; return false; } else { - *addr = 0; + *addr = nullptr; } } } @@ -801,13 +800,12 @@ ThreadFinish(thr); ThreadSignalContext *sctx = thr->signal_ctx; if (sctx) { - thr->signal_ctx = 0; + thr->signal_ctx = nullptr; UnmapOrDie(sctx, sizeof(*sctx)); } } } - struct ThreadParam { void* (*callback)(void *arg); void *param; @@ -858,7 +856,7 @@ } } __sanitizer_pthread_attr_t myattr; - if (attr == 0) { + if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } @@ -1003,7 +1001,7 @@ BlockingCall bc(thr); res = call_pthread_cancel_with_cleanup( (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait), - cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg); + cond, m, nullptr, (void(*)(void *arg))cond_mutex_unlock, &arg); } if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); @@ -1253,7 +1251,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); - if (o == 0 || f == 0) + if ((!o) || (!f)) return EINVAL; atomic_uint32_t *a = static_cast(o); u32 v = atomic_load(a, memory_order_acquire); @@ -1850,7 +1848,7 @@ void ProcessPendingSignals(ThreadState *thr) { ThreadSignalContext *sctx = SigCtx(thr); - if (sctx == 0 || + if ((!sctx) || atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) return; atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); @@ -1867,11 +1865,11 @@ &signal->siginfo, &signal->ctx); } } - CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, 0)); + CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, nullptr)); atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); } -} // namespace __tsan +} // namespace __tsan static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || @@ -1918,7 +1916,7 @@ return; } - if (sctx == 0) + if (!sctx) return; SignalDesc *signal = &sctx->pending_signals[sig]; if (signal->armed == false) { @@ -1933,7 +1931,7 @@ } static void rtl_sighandler(int sig) { - rtl_generic_sighandler(false, sig, 0, 0); + rtl_generic_sighandler(false, sig, nullptr, nullptr); } static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) { @@ -1944,7 +1942,7 @@ SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old); if (old) internal_memcpy(old, &sigactions[sig], sizeof(*old)); - if (act == 0) + if (!act) return 0; // Copy act into sigactions[sig]. // Can't use struct copy, because compiler can emit call to memcpy. @@ -1969,7 +1967,7 @@ newact.sa_handler = rtl_sighandler; } ReleaseStore(thr, pc, (uptr)&sigactions[sig]); - int res = REAL(sigaction)(sig, &newact, 0); + int res = REAL(sigaction)(sig, &newact, nullptr); return res; } @@ -2570,7 +2568,7 @@ // Need to setup it, because interceptors check that the function is resolved. // But atexit is emitted directly into the module, so can't be resolved. REAL(atexit) = (int(*)(void(*)()))unreachable; - if (REAL(__cxa_atexit)(&finalize, 0, 0)) { + if (REAL(__cxa_atexit)(&finalize, nullptr, nullptr)) { Printf("ThreadSanitizer: failed to setup atexit callback\n"); Die(); } @@ -2583,4 +2581,4 @@ FdInit(); } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_interface_ann.cc =================================================================== --- lib/tsan/rtl/tsan_interface_ann.cc +++ lib/tsan/rtl/tsan_interface_ann.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_placement_new.h" @@ -122,12 +123,12 @@ if (maxbegin < minend) return race; } - return 0; + return nullptr; } static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { ExpectRace *race = FindRace(list, addr, size); - if (race == 0) + if (!race) return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); @@ -218,7 +219,7 @@ race->desc, race->addr, race->file, race->line); Printf("==================\n"); } -} // namespace __tsan +} // namespace __tsan using namespace __tsan; // NOLINT @@ -458,4 +459,4 @@ AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {} void INTERFACE_ATTRIBUTE AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {} -} // extern "C" +} // extern "C" Index: lib/tsan/rtl/tsan_interface_atomic.cc =================================================================== --- lib/tsan/rtl/tsan_interface_atomic.cc +++ lib/tsan/rtl/tsan_interface_atomic.cc @@ -298,7 +298,7 @@ template static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); - SyncVar *s = 0; + SyncVar *s = nullptr; if (mo != mo_relaxed) { s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true); thr->fast_state.IncrementEpoch(); @@ -422,7 +422,7 @@ volatile T *a, T *c, T v, morder mo, morder fmo) { (void)fmo; // Unused because llvm does not pass it yet. MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); - SyncVar *s = 0; + SyncVar *s = nullptr; bool write_lock = mo != mo_acquire && mo != mo_consume; if (mo != mo_relaxed) { s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); @@ -859,16 +859,16 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic_thread_fence(morder mo) { - char* a = 0; + char* a = nullptr; SCOPED_ATOMIC(Fence, mo); } SANITIZER_INTERFACE_ATTRIBUTE void __tsan_atomic_signal_fence(morder mo) { } -} // extern "C" +} // extern "C" -#else // #ifndef SANITIZER_GO +#else // SANITIZER_GO // Go @@ -950,5 +950,5 @@ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire); *(bool*)(a+24) = (cur == cmp); } -} // extern "C" -#endif // #ifndef SANITIZER_GO +} // extern "C" +#endif // SANITIZER_GO Index: lib/tsan/rtl/tsan_mman.h =================================================================== --- lib/tsan/rtl/tsan_mman.h +++ lib/tsan/rtl/tsan_mman.h @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #ifndef TSAN_MMAN_H #define TSAN_MMAN_H @@ -74,8 +75,9 @@ void DestroyAndFree(T *&p) { p->~T(); internal_free(p); - p = 0; + p = nullptr; } -} // namespace __tsan -#endif // TSAN_MMAN_H +} // namespace __tsan + +#endif // TSAN_MMAN_H Index: lib/tsan/rtl/tsan_mman.cc =================================================================== --- lib/tsan/rtl/tsan_mman.cc +++ lib/tsan/rtl/tsan_mman.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" @@ -97,8 +98,8 @@ if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) return allocator()->ReturnNullOrDie(); void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); - if (p == 0) - return 0; + if (!p) + return nullptr; if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) @@ -133,7 +134,7 @@ } void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) { - CHECK_NE(p, (void*)0); + CHECK_NE(p, nullptr); uptr sz = ctx->metamap.FreeBlock(thr, pc, p); DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz); if (write && thr->ignore_reads_and_writes == 0) @@ -141,13 +142,13 @@ } void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { - void *p2 = 0; + void *p2 = nullptr; // FIXME: Handle "shrinking" more efficiently, // it seems that some software actually does this. if (sz) { p2 = user_alloc(thr, pc, sz); - if (p2 == 0) - return 0; + if (!p2) + return nullptr; if (p) { uptr oldsz = user_alloc_usable_size(p); internal_memcpy(p2, p, min(oldsz, sz)); @@ -159,7 +160,7 @@ } uptr user_alloc_usable_size(const void *p) { - if (p == 0) + if (!p) return 0; MBlock *b = ctx->metamap.GetBlock((uptr)p); return b ? b->siz : 0; @@ -167,14 +168,14 @@ void invoke_malloc_hook(void *ptr, uptr size) { ThreadState *thr = cur_thread(); - if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) + if ((!ctx) || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_malloc_hook(ptr, size); } void invoke_free_hook(void *ptr) { ThreadState *thr = cur_thread(); - if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors) + if ((!ctx) || !ctx->initialized || thr->ignore_interceptors) return; __sanitizer_free_hook(ptr); } @@ -197,7 +198,7 @@ InternalFree(p, &thr->internal_alloc_cache); } -} // namespace __tsan +} // namespace __tsan using namespace __tsan; @@ -227,7 +228,7 @@ } int __sanitizer_get_ownership(const void *p) { - return allocator()->GetBlockBegin(p) != 0; + return allocator()->GetBlockBegin(p) != nullptr; } uptr __sanitizer_get_allocated_size(const void *p) { @@ -240,4 +241,4 @@ internal_allocator()->SwallowCache(&thr->internal_alloc_cache); ctx->metamap.OnThreadIdle(thr); } -} // extern "C" +} // extern "C" Index: lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- lib/tsan/rtl/tsan_platform_linux.cc +++ lib/tsan/rtl/tsan_platform_linux.cc @@ -12,8 +12,8 @@ // Linux- and FreeBSD-specific code. //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_platform.h" -#include "sanitizer_common/sanitizer_platform.h" #if SANITIZER_LINUX || SANITIZER_FREEBSD #include "sanitizer_common/sanitizer_common.h" @@ -127,7 +127,7 @@ void FlushShadowMemory() { #if SANITIZER_LINUX - StopTheWorld(FlushShadowMemoryCallback, 0); + StopTheWorld(FlushShadowMemoryCallback, nullptr); #endif } @@ -148,13 +148,13 @@ static void MapRodata() { // First create temp file. const char *tmpdir = GetEnv("TMPDIR"); - if (tmpdir == 0) + if (!tmpdir) tmpdir = GetEnv("TEST_TMPDIR"); #ifdef P_tmpdir - if (tmpdir == 0) + if (!tmpdir) tmpdir = P_tmpdir; #endif - if (tmpdir == 0) + if (!tmpdir) return; char name[256]; internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", @@ -172,7 +172,7 @@ *p = kShadowRodata; internal_write(fd, marker.data(), marker.size()); // Map the file into memory. - uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, + uptr page = internal_mmap(nullptr, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); if (internal_iserror(page)) { internal_close(fd); @@ -275,7 +275,7 @@ #else bool prev_is_data = false; while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), - /*protection*/ 0)) { + /*protection*/ nullptr)) { DPrintf("%p-%p %p %s\n", start, end, offset, name); bool is_data = offset != 0 && name[0] != 0; // BSS may get merged with [heap] in /proc/self/maps. This is not very @@ -299,7 +299,7 @@ // Ensure that the binary is indeed compiled with -pie. MemoryMappingLayout proc_maps(true); uptr p, end; - while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) { + while (proc_maps.Next(&p, &end, nullptr, nullptr, 0, nullptr)) { if (IsAppMem(p)) continue; if (p >= kHeapMemEnd && @@ -320,7 +320,7 @@ ProtectRange(kTraceMemEnd, kHeapMemBeg); ProtectRange(HeapEnd(), kHiAppMemBeg); } -#endif // #ifndef SANITIZER_GO +#endif // SANITIZER_GO void InitializePlatform() { DisableCoreDumperIfNecessary(); @@ -418,6 +418,6 @@ } #endif -} // namespace __tsan +} // namespace __tsan -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD Index: lib/tsan/rtl/tsan_report.cc =================================================================== --- lib/tsan/rtl/tsan_report.cc +++ lib/tsan/rtl/tsan_report.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #include "tsan_report.h" #include "tsan_platform.h" #include "tsan_rtl.h" @@ -112,7 +113,7 @@ } void PrintStack(const ReportStack *ent) { - if (ent == 0 || ent->frames == 0) { + if ((!ent) || (!ent->frames)) { Printf(" [failed to restore the stack]\n\n"); return; } @@ -249,14 +250,14 @@ return rep->mutexes[0]->stack; if (rep->threads.Size()) return rep->threads[0]->stack; - return 0; + return nullptr; } static bool FrameIsInternal(const SymbolizedStack *frame) { - if (frame == 0) + if (!frame) return false; const char *file = frame->info.file; - return file != 0 && + return file && (internal_strstr(file, "tsan_interceptors.cc") || internal_strstr(file, "sanitizer_common_interceptors.inc") || internal_strstr(file, "tsan_interface_")); @@ -343,7 +344,7 @@ Printf("==================\n"); } -#else // #ifndef SANITIZER_GO +#else // SANITIZER_GO const int kMainThreadId = 1; @@ -407,6 +408,6 @@ Printf("==================\n"); } -#endif +#endif // SANITIZER_GO -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_rtl.cc =================================================================== --- lib/tsan/rtl/tsan_rtl.cc +++ lib/tsan/rtl/tsan_rtl.cc @@ -220,14 +220,14 @@ } static void StartBackgroundThread() { - ctx->background_thread = internal_start_thread(&BackgroundThread, 0); + ctx->background_thread = internal_start_thread(&BackgroundThread, nullptr); } #ifndef __mips__ static void StopBackgroundThread() { atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); internal_join_thread(ctx->background_thread); - ctx->background_thread = 0; + ctx->background_thread = nullptr; } #endif #endif @@ -439,7 +439,8 @@ ctx->thread_registry->Unlock(); uptr nthread = 0; - ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); + ctx->thread_registry->GetNumberOfThreads(nullptr, nullptr, + &nthread /* alive threads */); VPrintf(1, "ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread); if (nthread == 1) { @@ -619,7 +620,6 @@ return; RACE: HandleRace(thr, shadow_mem, cur, old); - return; } void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, @@ -1009,7 +1009,7 @@ void build_consistency_nostats() {} #endif -} // namespace __tsan +} // namespace __tsan #ifndef SANITIZER_GO // Must be included in this file to make sure everything is inlined. Index: lib/tsan/rtl/tsan_rtl_mutex.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_mutex.cc +++ lib/tsan/rtl/tsan_rtl_mutex.cc @@ -11,8 +11,8 @@ // //===----------------------------------------------------------------------===// -#include -#include +#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" +#include "sanitizer_common/sanitizer_stackdepot.h" #include "tsan_rtl.h" #include "tsan_flags.h" @@ -97,7 +97,7 @@ thr->is_freeing = false; } SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); - if (s == 0) + if (!s) return; if (common_flags()->detect_deadlocks) { Callback cb(thr, pc); @@ -124,14 +124,14 @@ ObtainCurrentStack(thr, pc, &trace); rep.AddStack(trace); FastState last(last_lock); - RestoreStack(last.tid(), last.epoch(), &trace, 0); + RestoreStack(last.tid(), last.epoch(), &trace, nullptr); rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); } if (unlock_locked) { SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); - if (s != 0) { + if (s) { s->Reset(thr); s->mtx.Unlock(); } @@ -459,7 +459,7 @@ } void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { - if (r == 0) + if (!r) return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeDeadlock); @@ -484,4 +484,4 @@ OutputReport(thr, rep); } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_report.cc +++ lib/tsan/rtl/tsan_rtl_report.cc @@ -64,7 +64,7 @@ last_frame = cur; } - if (last_frame2 == 0) + if (!last_frame2) return; #ifndef SANITIZER_GO const char *last = last_frame->info.function; @@ -98,7 +98,7 @@ ReportStack *SymbolizeStackId(u32 stack_id) { if (stack_id == 0) - return 0; + return nullptr; StackTrace stack = StackDepotGet(stack_id); if (stack.trace == nullptr) return nullptr; @@ -107,7 +107,7 @@ static ReportStack *SymbolizeStack(StackTrace trace) { if (trace.size == 0) - return 0; + return nullptr; SymbolizedStack *top = nullptr; for (uptr si = 0; si < trace.size; si++) { const uptr pc = trace.trace[si]; @@ -193,7 +193,7 @@ rt->running = (tctx->status == ThreadStatusRunning); rt->name = internal_strdup(tctx->name); rt->parent_tid = tctx->parent_tid; - rt->stack = 0; + rt->stack = nullptr; rt->stack = SymbolizeStackId(tctx->creation_stack_id); if (rt->stack) rt->stack->suppressable = suppressable; @@ -209,7 +209,7 @@ return tctx; } } - return 0; + return nullptr; } static ThreadContext *FindThreadByTidLocked(int tid) { @@ -235,7 +235,7 @@ ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, (void*)addr)); if (!tctx) - return 0; + return nullptr; ThreadState *thr = tctx->thr; CHECK(thr); *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); @@ -294,7 +294,7 @@ rm->id = id; rm->addr = 0; rm->destroyed = true; - rm->stack = 0; + rm->stack = nullptr; } void ScopedReport::AddLocation(uptr addr, uptr size) { @@ -315,14 +315,14 @@ AddThread(tctx); return; } - MBlock *b = 0; + MBlock *b = nullptr; Allocator *a = allocator(); if (a->PointerIsMine((void*)addr)) { void *block_begin = a->GetBlockBegin((void*)addr); if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin); } - if (b != 0) { + if (b) { ThreadContext *tctx = FindThreadByTidLocked(b->tid); ReportLocation *loc = ReportLocation::New(ReportLocationHeap); loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); @@ -492,7 +492,7 @@ return false; atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); - Suppression *supp = 0; + Suppression *supp = nullptr; uptr pc_or_addr = 0; for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); @@ -674,7 +674,7 @@ BufferedStackTrace *ptrace = new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) BufferedStackTrace(); - ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); + ptrace->Unwind(kStackTraceMax, pc, 0, nullptr, 0, 0, false); for (uptr i = 0; i < ptrace->size / 2; i++) { uptr tmp = ptrace->trace_buffer[i]; ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; @@ -684,7 +684,7 @@ #endif } -} // namespace __tsan +} // namespace __tsan using namespace __tsan; @@ -693,4 +693,4 @@ void __sanitizer_print_stack_trace() { PrintCurrentStackSlow(StackTrace::GetCurrentPc()); } -} // extern "C" +} // extern "C" Index: lib/tsan/rtl/tsan_rtl_thread.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_thread.cc +++ lib/tsan/rtl/tsan_rtl_thread.cc @@ -51,7 +51,7 @@ }; void ThreadContext::OnCreated(void *arg) { - thr = 0; + thr = nullptr; if (tid == 0) return; OnCreatedArgs *args = static_cast(arg); @@ -149,7 +149,7 @@ #if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); #endif - thr = 0; + thr = nullptr; } #ifndef SANITIZER_GO @@ -224,7 +224,7 @@ int ThreadCount(ThreadState *thr) { uptr result; - ctx->thread_registry->GetNumberOfThreads(0, 0, &result); + ctx->thread_registry->GetNumberOfThreads(nullptr, nullptr, &result); return (int)result; } @@ -406,4 +406,4 @@ } } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_suppressions.cc =================================================================== --- lib/tsan/rtl/tsan_suppressions.cc +++ lib/tsan/rtl/tsan_suppressions.cc @@ -35,7 +35,7 @@ // Can be overriden in frontend. extern "C" const char *WEAK __tsan_default_suppressions() { - return 0; + return nullptr; } #endif @@ -109,7 +109,7 @@ uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) { CHECK(suppression_ctx); - if (!suppression_ctx->SuppressionCount() || stack == 0 || + if (!suppression_ctx->SuppressionCount() || (!stack) || !stack->suppressable) return 0; const char *stype = conv(typ); @@ -128,7 +128,7 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { CHECK(suppression_ctx); - if (!suppression_ctx->SuppressionCount() || loc == 0 || + if (!suppression_ctx->SuppressionCount() || (!loc) || loc->type != ReportLocationGlobal || !loc->suppressable) return 0; const char *stype = conv(typ); @@ -162,4 +162,4 @@ matched[i]->templ); } } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_symbolize.cc =================================================================== --- lib/tsan/rtl/tsan_symbolize.cc +++ lib/tsan/rtl/tsan_symbolize.cc @@ -69,7 +69,7 @@ ReportLocation *SymbolizeData(uptr addr) { DataInfo info; if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) - return 0; + return nullptr; ReportLocation *ent = ReportLocation::New(ReportLocationGlobal); ent->global = info; return ent; @@ -79,4 +79,4 @@ Symbolizer::GetOrInit()->Flush(); } -} // namespace __tsan +} // namespace __tsan Index: lib/tsan/rtl/tsan_sync.cc =================================================================== --- lib/tsan/rtl/tsan_sync.cc +++ lib/tsan/rtl/tsan_sync.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// + #include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_sync.h" #include "tsan_rtl.h" @@ -21,7 +22,7 @@ SyncVar::SyncVar() : mtx(MutexTypeSyncVar, StatMtxSyncVar) { - Reset(0); + Reset(nullptr); } void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) { @@ -47,7 +48,7 @@ is_broken = 0; is_linker_init = 0; - if (thr == 0) { + if (!thr) { CHECK_EQ(clock.size(), 0); CHECK_EQ(read_clock.size(), 0); } else { @@ -73,7 +74,7 @@ uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { MBlock* b = GetBlock(p); - if (b == 0) + if (!b) return 0; uptr sz = RoundUpTo(b->siz, kMetaShadowCell); FreeRange(thr, pc, p, sz); @@ -175,7 +176,7 @@ u32 idx = *meta; for (;;) { if (idx == 0) - return 0; + return nullptr; if (idx & kFlagBlock) return block_alloc_.Map(idx & ~kFlagMask); DCHECK(idx & kFlagSync); @@ -190,7 +191,7 @@ } SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) { - return GetAndLock(0, 0, addr, true, false); + return GetAndLock(nullptr, 0, addr, true, false); } SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc, @@ -198,7 +199,7 @@ u32 *meta = MemToMeta(addr); u32 idx0 = *meta; u32 myidx = 0; - SyncVar *mys = 0; + SyncVar *mys = nullptr; for (;;) { u32 idx = idx0; for (;;) { @@ -222,7 +223,7 @@ idx = s->next; } if (!create) - return 0; + return nullptr; if (*meta != idx0) { idx0 = *meta; continue; @@ -284,4 +285,4 @@ sync_alloc_.FlushCache(&thr->sync_cache); } -} // namespace __tsan +} // namespace __tsan