Index: lib/sanitizer_common/sanitizer_stacktrace.h =================================================================== --- lib/sanitizer_common/sanitizer_stacktrace.h +++ lib/sanitizer_common/sanitizer_stacktrace.h @@ -64,6 +64,7 @@ BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {} + void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top, uptr stack_bottom, bool request_fast_unwind); @@ -75,6 +76,9 @@ uptr max_depth); void PopStackFrames(uptr count); uptr LocatePcInTrace(uptr pc); + + BufferedStackTrace(const BufferedStackTrace &); + void operator=(const BufferedStackTrace &); }; } // namespace __sanitizer Index: lib/sanitizer_common/sanitizer_stacktrace.cc =================================================================== --- lib/sanitizer_common/sanitizer_stacktrace.cc +++ lib/sanitizer_common/sanitizer_stacktrace.cc @@ -36,6 +36,15 @@ return GET_CALLER_PC(); } +void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { + size = cnt + !!extra_top_pc; + CHECK_LE(size, kStackTraceMax); + internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0])); + if (extra_top_pc) + trace_buffer[cnt] = extra_top_pc; + top_frame_bp = 0; +} + // Check if given pointer points into allocated stack area. static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) { return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr); Index: lib/tsan/rtl/tsan_defs.h =================================================================== --- lib/tsan/rtl/tsan_defs.h +++ lib/tsan/rtl/tsan_defs.h @@ -43,7 +43,6 @@ const int kClkBits = 42; const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; const uptr kShadowStackSize = 64 * 1024; -const uptr kTraceStackSize = 256; #ifdef TSAN_SHADOW_COUNT # if TSAN_SHADOW_COUNT == 2 \ @@ -174,7 +173,6 @@ struct ReportStack; class ReportDesc; class RegionAlloc; -class StackTrace; // Descriptor of user's memory block. struct MBlock { Index: lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- lib/tsan/rtl/tsan_interceptors.cc +++ lib/tsan/rtl/tsan_interceptors.cc @@ -216,7 +216,7 @@ ThreadState *thr = cur_thread(); \ const uptr caller_pc = GET_CALLER_PC(); \ ScopedInterceptor si(thr, #func, caller_pc); \ - const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ + const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ /**/ @@ -1884,12 +1884,12 @@ // from rtl_generic_sighandler) we have not yet received the reraised // signal; and it looks too fragile to intercept all ways to reraise a signal. if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { - __tsan::StackTrace stack; - stack.ObtainCurrent(thr, pc); + VarSizeStackTrace stack; + ObtainCurrentStack(thr, pc, &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); if (!IsFiredSuppression(ctx, rep, stack)) { - rep.AddStack(&stack, true); + rep.AddStack(stack, true); OutputReport(thr, rep); } } Index: lib/tsan/rtl/tsan_interface_ann.cc =================================================================== --- lib/tsan/rtl/tsan_interface_ann.cc +++ lib/tsan/rtl/tsan_interface_ann.cc @@ -54,7 +54,7 @@ StatInc(thr, StatAnnotation); \ StatInc(thr, Stat##typ); \ ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \ - const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ + const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ /**/ Index: lib/tsan/rtl/tsan_interface_atomic.cc =================================================================== --- lib/tsan/rtl/tsan_interface_atomic.cc +++ lib/tsan/rtl/tsan_interface_atomic.cc @@ -474,7 +474,7 @@ #define SCOPED_ATOMIC(func, ...) \ const uptr callpc = (uptr)__builtin_return_address(0); \ - uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ + uptr pc = StackTrace::GetCurrentPc(); \ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \ ThreadState *const thr = cur_thread(); \ if (thr->ignore_interceptors) \ Index: lib/tsan/rtl/tsan_interface_java.cc =================================================================== --- lib/tsan/rtl/tsan_interface_java.cc +++ lib/tsan/rtl/tsan_interface_java.cc @@ -61,7 +61,7 @@ #define SCOPED_JAVA_FUNC(func) \ ThreadState *thr = cur_thread(); \ const uptr caller_pc = GET_CALLER_PC(); \ - const uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \ + const uptr pc = StackTrace::GetCurrentPc(); \ (void)pc; \ ScopedJavaFunc scoped(thr, caller_pc); \ /**/ Index: lib/tsan/rtl/tsan_mman.cc =================================================================== --- lib/tsan/rtl/tsan_mman.cc +++ lib/tsan/rtl/tsan_mman.cc @@ -66,12 +66,12 @@ if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 || !flags()->report_signal_unsafe) return; - StackTrace stack; - stack.ObtainCurrent(thr, pc); + VarSizeStackTrace stack; + ObtainCurrentStack(thr, pc, &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); if (!IsFiredSuppression(ctx, rep, stack)) { - rep.AddStack(&stack, true); + rep.AddStack(stack, true); OutputReport(thr, rep); } } Index: lib/tsan/rtl/tsan_rtl.h =================================================================== --- lib/tsan/rtl/tsan_rtl.h +++ lib/tsan/rtl/tsan_rtl.h @@ -498,9 +498,9 @@ explicit ScopedReport(ReportType typ); ~ScopedReport(); - void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack, + void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, const MutexSet *mset); - void AddStack(const StackTrace *stack, bool suppressable = false); + void AddStack(StackTrace stack, bool suppressable = false); void AddThread(const ThreadContext *tctx, bool suppressable = false); void AddThread(int unique_tid, bool suppressable = false); void AddUniqueTid(int unique_tid); @@ -524,7 +524,20 @@ void operator = (const ScopedReport&); }; -void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset); +void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, + MutexSet *mset); + +template +void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { + uptr size = thr->shadow_stack_pos - thr->shadow_stack; + uptr start = 0; + if (size + !!toppc > kStackTraceMax) { + start = size + !!toppc - kStackTraceMax; + size = kStackTraceMax - !!toppc; + } + stack->Init(&thr->shadow_stack[start], size, toppc); +} + void StatAggregate(u64 *dst, u64 *src); void StatOutput(u64 *stat); @@ -551,9 +564,8 @@ void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); -bool IsFiredSuppression(Context *ctx, - const ScopedReport &srep, - const StackTrace &trace); +bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, + StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); bool FrameIsInternal(const ReportStack *frame); Index: lib/tsan/rtl/tsan_rtl.cc =================================================================== --- lib/tsan/rtl/tsan_rtl.cc +++ lib/tsan/rtl/tsan_rtl.cc @@ -462,8 +462,8 @@ thr->shadow_stack_pos[0] = pc; thr->shadow_stack_pos++; } - u32 id = StackDepotPut(__sanitizer::StackTrace( - thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); + u32 id = StackDepotPut( + StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); if (pc != 0) thr->shadow_stack_pos--; return id; @@ -476,7 +476,7 @@ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); TraceHeader *hdr = &thr_trace->headers[trace]; hdr->epoch0 = thr->fast_state.epoch(); - hdr->stack0.ObtainCurrent(thr, 0); + ObtainCurrentStack(thr, 0, &hdr->stack0); hdr->mset0 = thr->mset; thr->nomalloc--; } Index: lib/tsan/rtl/tsan_rtl_mutex.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_mutex.cc +++ lib/tsan/rtl/tsan_rtl_mutex.cc @@ -59,9 +59,9 @@ ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(typ); rep.AddMutex(mid); - StackTrace trace; - trace.ObtainCurrent(thr, pc); - rep.AddStack(&trace, true); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); } @@ -124,12 +124,12 @@ ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeMutexDestroyLocked); rep.AddMutex(mid); - StackTrace trace; - trace.ObtainCurrent(thr, pc); - rep.AddStack(&trace); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep.AddStack(trace); FastState last(last_lock); RestoreStack(last.tid(), last.epoch(), &trace, 0); - rep.AddStack(&trace, true); + rep.AddStack(trace, true); rep.AddLocation(addr, 1); OutputReport(thr, rep); } @@ -472,20 +472,17 @@ rep.AddUniqueTid((int)r->loop[i].thr_ctx); rep.AddThread((int)r->loop[i].thr_ctx); } - InternalScopedBuffer stacks(2 * DDReport::kMaxLoopSize); uptr dummy_pc = 0x42; for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; if (stk) { - __sanitizer::StackTrace stack = StackDepotGet(stk); - stacks[i].Init(const_cast(stack.trace), stack.size); + rep.AddStack(StackDepotGet(stk), true); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), // but we should still produce some stack trace in the report. - stacks[i].Init(&dummy_pc, 1); + rep.AddStack(StackTrace(&dummy_pc, 1), true); } - rep.AddStack(&stacks[i], true); } } OutputReport(thr, rep); Index: lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_report.cc +++ lib/tsan/rtl/tsan_rtl_report.cc @@ -30,7 +30,7 @@ using namespace __sanitizer; // NOLINT -static ReportStack *SymbolizeStack(const StackTrace& trace); +static ReportStack *SymbolizeStack(StackTrace trace); void TsanCheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2) { @@ -107,28 +107,26 @@ ReportStack *SymbolizeStackId(u32 stack_id) { if (stack_id == 0) return 0; - __sanitizer::StackTrace stack = StackDepotGet(stack_id); + StackTrace stack = StackDepotGet(stack_id); if (stack.trace == nullptr) - return 0; - StackTrace trace; - trace.Init(stack.trace, stack.size); - return SymbolizeStack(trace); + return nullptr; + return SymbolizeStack(stack); } -static ReportStack *SymbolizeStack(const StackTrace& trace) { - if (trace.IsEmpty()) +static ReportStack *SymbolizeStack(StackTrace trace) { + if (trace.size == 0) return 0; ReportStack *stack = 0; - for (uptr si = 0; si < trace.Size(); si++) { - const uptr pc = trace.Get(si); + for (uptr si = 0; si < trace.size; si++) { + const uptr pc = trace.trace[si]; #ifndef TSAN_GO // We obtain the return address, that is, address of the next instruction, // so offset it by 1 byte. - const uptr pc1 = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); + const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc); #else // FIXME(dvyukov): Go sometimes uses address of a function as top pc. uptr pc1 = pc; - if (si != trace.Size() - 1) + if (si != trace.size - 1) pc1 -= 1; #endif ReportStack *ent = SymbolizeCode(pc1); @@ -161,14 +159,14 @@ DestroyAndFree(rep_); } -void ScopedReport::AddStack(const StackTrace *stack, bool suppressable) { +void ScopedReport::AddStack(StackTrace stack, bool suppressable) { ReportStack **rs = rep_->stacks.PushBack(); - *rs = SymbolizeStack(*stack); + *rs = SymbolizeStack(stack); (*rs)->suppressable = suppressable; } -void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, - const StackTrace *stack, const MutexSet *mset) { +void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack, + const MutexSet *mset) { void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); ReportMop *mop = new(mem) ReportMop; rep_->mops.PushBack(mop); @@ -177,7 +175,7 @@ mop->size = s.size(); mop->write = s.IsWrite(); mop->atomic = s.IsAtomic(); - mop->stack = SymbolizeStack(*stack); + mop->stack = SymbolizeStack(stack); if (mop->stack) mop->stack->suppressable = true; for (uptr i = 0; i < mset->Size(); i++) { @@ -385,7 +383,8 @@ return rep_; } -void RestoreStack(int tid, const u64 epoch, StackTrace *stk, MutexSet *mset) { +void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, + MutexSet *mset) { // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. @@ -410,13 +409,13 @@ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); InternalScopedBuffer stack(kShadowStackSize); - for (uptr i = 0; i < hdr->stack0.Size(); i++) { - stack[i] = hdr->stack0.Get(i); + for (uptr i = 0; i < hdr->stack0.size; i++) { + stack[i] = hdr->stack0.trace[i]; DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); } if (mset) *mset = hdr->mset0; - uptr pos = hdr->stack0.Size(); + uptr pos = hdr->stack0.size; Event *events = (Event*)GetThreadTrace(tid); for (uptr i = ebegin; i <= eend; i++) { Event ev = events[i]; @@ -451,13 +450,13 @@ stk->Init(stack.data(), pos); } -static bool HandleRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], - uptr addr_min, uptr addr_max) { +static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], + uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; if (flags()->suppress_equal_stacks) { - hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { if (hash == ctx->racy_stacks[i]) { DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); @@ -490,12 +489,12 @@ return false; } -static void AddRacyStacks(ThreadState *thr, const StackTrace (&traces)[2], - uptr addr_min, uptr addr_max) { +static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], + uptr addr_min, uptr addr_max) { if (flags()->suppress_equal_stacks) { RacyStacks hash; - hash.hash[0] = md5_hash(traces[0].Begin(), traces[0].Size() * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].Begin(), traces[1].Size() * sizeof(uptr)); + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); ctx->racy_stacks.PushBack(hash); } if (flags()->suppress_equal_addresses) { @@ -536,15 +535,14 @@ return true; } -bool IsFiredSuppression(Context *ctx, - const ScopedReport &srep, - const StackTrace &trace) { +bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, + StackTrace trace) { for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) continue; - for (uptr j = 0; j < trace.Size(); j++) { + for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; - if (trace.Get(j) == s->pc) { + if (trace.trace[j] == s->pc) { if (s->supp) s->supp->hit_count++; return true; @@ -636,9 +634,9 @@ if (IsFiredSuppression(ctx, rep, addr)) return; const uptr kMop = 2; - StackTrace traces[kMop]; + VarSizeStackTrace traces[kMop]; const uptr toppc = TraceTopPC(thr); - traces[0].ObtainCurrent(thr, toppc); + ObtainCurrentStack(thr, toppc, &traces[0]); if (IsFiredSuppression(ctx, rep, traces[0])) return; InternalScopedBuffer mset2(1); @@ -653,7 +651,7 @@ for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, &traces[i], + rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2.data()); } @@ -683,26 +681,23 @@ } void PrintCurrentStack(ThreadState *thr, uptr pc) { - StackTrace trace; - trace.ObtainCurrent(thr, pc); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); PrintStack(SymbolizeStack(trace)); } void PrintCurrentStackSlow() { #ifndef TSAN_GO - __sanitizer::BufferedStackTrace *ptrace = new( - internal_alloc(MBlockStackTrace, sizeof(__sanitizer::BufferedStackTrace))) - __sanitizer::BufferedStackTrace(); - ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0, - 0, 0, false); + BufferedStackTrace *ptrace = + new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) + BufferedStackTrace(); + ptrace->Unwind(kStackTraceMax, StackTrace::GetCurrentPc(), 0, 0, 0, 0, false); for (uptr i = 0; i < ptrace->size / 2; i++) { uptr tmp = ptrace->trace_buffer[i]; ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; ptrace->trace_buffer[ptrace->size - i - 1] = tmp; } - StackTrace trace; - trace.Init(ptrace->trace, ptrace->size); - PrintStack(SymbolizeStack(trace)); + PrintStack(SymbolizeStack(*ptrace)); #endif } Index: lib/tsan/rtl/tsan_stack_trace.h =================================================================== --- lib/tsan/rtl/tsan_stack_trace.h +++ lib/tsan/rtl/tsan_stack_trace.h @@ -13,34 +13,25 @@ #ifndef TSAN_STACK_TRACE_H #define TSAN_STACK_TRACE_H +#include "sanitizer_common/sanitizer_stacktrace.h" #include "tsan_defs.h" namespace __tsan { -// FIXME: Delete this class in favor of __sanitizer::StackTrace. -class StackTrace { - public: - StackTrace(); - // Initialized the object in "static mode", - // in this mode it never calls malloc/free but uses the provided buffer. - StackTrace(uptr *buf, uptr cnt); - ~StackTrace(); - void Reset(); - - void Init(const uptr *pcs, uptr cnt); - void ObtainCurrent(ThreadState *thr, uptr toppc); - bool IsEmpty() const; - uptr Size() const; - uptr Get(uptr i) const; - const uptr *Begin() const; +// StackTrace which calls malloc/free to allocate the buffer for +// addresses in stack traces. +struct VarSizeStackTrace : public StackTrace { + uptr *trace_buffer; // Owned. + + VarSizeStackTrace(); + ~VarSizeStackTrace(); + void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); private: - uptr n_; - uptr *s_; - const uptr c_; + void ResizeBuffer(uptr new_size); - StackTrace(const StackTrace&); - void operator = (const StackTrace&); + VarSizeStackTrace(const VarSizeStackTrace &); + void operator=(const VarSizeStackTrace &); }; } // namespace __tsan Index: lib/tsan/rtl/tsan_stack_trace.cc =================================================================== --- lib/tsan/rtl/tsan_stack_trace.cc +++ lib/tsan/rtl/tsan_stack_trace.cc @@ -16,91 +16,31 @@ namespace __tsan { -StackTrace::StackTrace() - : n_() - , s_() - , c_() { -} - -StackTrace::StackTrace(uptr *buf, uptr cnt) - : n_() - , s_(buf) - , c_(cnt) { - CHECK_NE(buf, 0); - CHECK_NE(cnt, 0); -} - -StackTrace::~StackTrace() { - Reset(); -} - -void StackTrace::Reset() { - if (s_ && !c_) { - CHECK_NE(n_, 0); - internal_free(s_); - s_ = 0; - } - n_ = 0; -} +VarSizeStackTrace::VarSizeStackTrace() + : StackTrace(nullptr, 0), trace_buffer(nullptr) {} -void StackTrace::Init(const uptr *pcs, uptr cnt) { - Reset(); - if (cnt == 0) - return; - if (c_) { - CHECK_NE(s_, 0); - CHECK_LE(cnt, c_); - } else { - s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0])); - } - n_ = cnt; - internal_memcpy(s_, pcs, cnt * sizeof(s_[0])); +VarSizeStackTrace::~VarSizeStackTrace() { + ResizeBuffer(0); } -void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) { - Reset(); - n_ = thr->shadow_stack_pos - thr->shadow_stack; - if (n_ + !!toppc == 0) - return; - uptr start = 0; - if (c_) { - CHECK_NE(s_, 0); - if (n_ + !!toppc > c_) { - start = n_ - c_ + !!toppc; - n_ = c_ - !!toppc; - } - } else { - // Cap potentially huge stacks. - if (n_ + !!toppc > kTraceStackSize) { - start = n_ - kTraceStackSize + !!toppc; - n_ = kTraceStackSize - !!toppc; - } - s_ = (uptr*)internal_alloc(MBlockStackTrace, - (n_ + !!toppc) * sizeof(s_[0])); - } - for (uptr i = 0; i < n_; i++) - s_[i] = thr->shadow_stack[start + i]; - if (toppc) { - s_[n_] = toppc; - n_++; +void VarSizeStackTrace::ResizeBuffer(uptr new_size) { + if (trace_buffer) { + internal_free(trace_buffer); } + trace_buffer = + (new_size > 0) + ? (uptr *)internal_alloc(MBlockStackTrace, + new_size * sizeof(trace_buffer[0])) + : nullptr; + trace = trace_buffer; + size = new_size; } -bool StackTrace::IsEmpty() const { - return n_ == 0; -} - -uptr StackTrace::Size() const { - return n_; -} - -uptr StackTrace::Get(uptr i) const { - CHECK_LT(i, n_); - return s_[i]; -} - -const uptr *StackTrace::Begin() const { - return s_; +void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) { + ResizeBuffer(cnt + !!extra_top_pc); + internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0])); + if (extra_top_pc) + trace_buffer[cnt] = extra_top_pc; } } // namespace __tsan Index: lib/tsan/rtl/tsan_trace.h =================================================================== --- lib/tsan/rtl/tsan_trace.h +++ lib/tsan/rtl/tsan_trace.h @@ -42,21 +42,15 @@ typedef u64 Event; struct TraceHeader { - StackTrace stack0; // Start stack for the trace. - u64 epoch0; // Start epoch for the trace. - MutexSet mset0; -#ifndef TSAN_GO - uptr stack0buf[kTraceStackSize]; -#endif - - TraceHeader() #ifndef TSAN_GO - : stack0(stack0buf, kTraceStackSize) + BufferedStackTrace stack0; // Start stack for the trace. #else - : stack0() + VarSizeStackTrace stack0; #endif - , epoch0() { - } + u64 epoch0; // Start epoch for the trace. + MutexSet mset0; + + TraceHeader() : stack0(), epoch0() {} }; struct Trace { Index: lib/tsan/tests/unit/tsan_stack_test.cc =================================================================== --- lib/tsan/tests/unit/tsan_stack_test.cc +++ lib/tsan/tests/unit/tsan_stack_test.cc @@ -17,70 +17,79 @@ namespace __tsan { -static void TestStackTrace(StackTrace *trace) { +template +static void TestStackTrace(StackTraceTy *trace) { ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0); uptr stack[128]; thr.shadow_stack = &stack[0]; thr.shadow_stack_pos = &stack[0]; thr.shadow_stack_end = &stack[128]; - trace->ObtainCurrent(&thr, 0); - EXPECT_EQ(trace->Size(), (uptr)0); + ObtainCurrentStack(&thr, 0, trace); + EXPECT_EQ(0U, trace->size); - trace->ObtainCurrent(&thr, 42); - EXPECT_EQ(trace->Size(), (uptr)1); - EXPECT_EQ(trace->Get(0), (uptr)42); + ObtainCurrentStack(&thr, 42, trace); + EXPECT_EQ(1U, trace->size); + EXPECT_EQ(42U, trace->trace[0]); *thr.shadow_stack_pos++ = 100; *thr.shadow_stack_pos++ = 101; - trace->ObtainCurrent(&thr, 0); - EXPECT_EQ(trace->Size(), (uptr)2); - EXPECT_EQ(trace->Get(0), (uptr)100); - EXPECT_EQ(trace->Get(1), (uptr)101); - - trace->ObtainCurrent(&thr, 42); - EXPECT_EQ(trace->Size(), (uptr)3); - EXPECT_EQ(trace->Get(0), (uptr)100); - EXPECT_EQ(trace->Get(1), (uptr)101); - EXPECT_EQ(trace->Get(2), (uptr)42); + ObtainCurrentStack(&thr, 0, trace); + EXPECT_EQ(2U, trace->size); + EXPECT_EQ(100U, trace->trace[0]); + EXPECT_EQ(101U, trace->trace[1]); + + ObtainCurrentStack(&thr, 42, trace); + EXPECT_EQ(3U, trace->size); + EXPECT_EQ(100U, trace->trace[0]); + EXPECT_EQ(101U, trace->trace[1]); + EXPECT_EQ(42U, trace->trace[2]); } -TEST(StackTrace, Basic) { - StackTrace trace; - TestStackTrace(&trace); -} +template +static void TestTrim(StackTraceTy *trace) { + ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0); + const uptr kShadowStackSize = 2 * kStackTraceMax; + uptr stack[kShadowStackSize]; + thr.shadow_stack = &stack[0]; + thr.shadow_stack_pos = &stack[0]; + thr.shadow_stack_end = &stack[kShadowStackSize]; -TEST(StackTrace, StaticBasic) { - uptr buf[10]; - StackTrace trace1(buf, 10); - TestStackTrace(&trace1); - StackTrace trace2(buf, 3); - TestStackTrace(&trace2); + for (uptr i = 0; i < kShadowStackSize; ++i) + *thr.shadow_stack_pos++ = 100 + i; + + ObtainCurrentStack(&thr, 0, trace); + EXPECT_EQ(kStackTraceMax, trace->size); + for (uptr i = 0; i < kStackTraceMax; i++) { + EXPECT_EQ(100 + kStackTraceMax + i, trace->trace[i]); + } + + ObtainCurrentStack(&thr, 42, trace); + EXPECT_EQ(kStackTraceMax, trace->size); + for (uptr i = 0; i < kStackTraceMax - 1; i++) { + EXPECT_EQ(101 + kStackTraceMax + i, trace->trace[i]); + } + EXPECT_EQ(42U, trace->trace[kStackTraceMax - 1]); } -TEST(StackTrace, StaticTrim) { - uptr buf[2]; - StackTrace trace(buf, 2); +TEST(StackTrace, BasicVarSize) { + VarSizeStackTrace trace; + TestStackTrace(&trace); +} - ThreadState thr(0, 0, 0, 0, 0, 0, 0, 0, 0); - uptr stack[128]; - thr.shadow_stack = &stack[0]; - thr.shadow_stack_pos = &stack[0]; - thr.shadow_stack_end = &stack[128]; +TEST(StackTrace, BasicBuffered) { + BufferedStackTrace trace; + TestStackTrace(&trace); +} - *thr.shadow_stack_pos++ = 100; - *thr.shadow_stack_pos++ = 101; - *thr.shadow_stack_pos++ = 102; - trace.ObtainCurrent(&thr, 0); - EXPECT_EQ(trace.Size(), (uptr)2); - EXPECT_EQ(trace.Get(0), (uptr)101); - EXPECT_EQ(trace.Get(1), (uptr)102); - - trace.ObtainCurrent(&thr, 42); - EXPECT_EQ(trace.Size(), (uptr)2); - EXPECT_EQ(trace.Get(0), (uptr)102); - EXPECT_EQ(trace.Get(1), (uptr)42); +TEST(StackTrace, TrimVarSize) { + VarSizeStackTrace trace; + TestTrim(&trace); } +TEST(StackTrace, TrimBuffered) { + BufferedStackTrace trace; + TestTrim(&trace); +} } // namespace __tsan