Index: lib/asan/asan_allocator.h =================================================================== --- lib/asan/asan_allocator.h +++ lib/asan/asan_allocator.h @@ -45,8 +45,8 @@ uptr AllocTid(); uptr FreeTid(); bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; } - void GetAllocStack(StackTrace *stack); - void GetFreeStack(StackTrace *stack); + StackTrace GetAllocStack(); + StackTrace GetFreeStack(); bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) { if (addr >= Beg() && (addr + access_size) <= End()) { *offset = addr - Beg(); @@ -139,20 +139,20 @@ AsanThreadLocalMallocStorage() {} }; -void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type); -void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type); -void asan_sized_free(void *ptr, uptr size, StackTrace *stack, +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type); +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, AllocType alloc_type); -void *asan_malloc(uptr size, StackTrace *stack); -void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack); -void *asan_realloc(void *p, uptr size, StackTrace *stack); -void *asan_valloc(uptr size, StackTrace *stack); -void *asan_pvalloc(uptr size, StackTrace *stack); +void *asan_malloc(uptr size, BufferedStackTrace *stack); +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack); +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack); +void *asan_valloc(uptr size, BufferedStackTrace *stack); +void *asan_pvalloc(uptr size, BufferedStackTrace *stack); int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - StackTrace *stack); + BufferedStackTrace *stack); uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp); uptr asan_mz_size(const void *ptr); Index: lib/asan/asan_allocator2.cc =================================================================== --- lib/asan/asan_allocator2.cc +++ lib/asan/asan_allocator2.cc @@ -182,20 +182,19 @@ uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } -static void GetStackTraceFromId(u32 id, StackTrace *stack) { +static StackTrace GetStackTraceFromId(u32 id) { CHECK(id); - uptr size = 0; - const uptr *trace = StackDepotGet(id, &size); - CHECK(trace); - stack->CopyFrom(trace, size); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; } -void AsanChunkView::GetAllocStack(StackTrace *stack) { - GetStackTraceFromId(chunk_->alloc_context_id, stack); +StackTrace AsanChunkView::GetAllocStack() { + return GetStackTraceFromId(chunk_->alloc_context_id); } -void AsanChunkView::GetFreeStack(StackTrace *stack) { - GetStackTraceFromId(chunk_->free_context_id, stack); +StackTrace AsanChunkView::GetFreeStack() { + return GetStackTraceFromId(chunk_->free_context_id); } struct QuarantineCallback; @@ -263,7 +262,7 @@ quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); } -static void *Allocate(uptr size, uptr alignment, StackTrace *stack, +static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, AllocType alloc_type, bool can_fill) { if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); @@ -391,15 +390,16 @@ return res; } -static void ReportInvalidFree(void *ptr, u8 chunk_state, StackTrace *stack) { +static void ReportInvalidFree(void *ptr, u8 chunk_state, + BufferedStackTrace *stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else ReportFreeNotMalloced((uptr)ptr, stack); } -static void AtomicallySetQuarantineFlag(AsanChunk *m, - void *ptr, StackTrace *stack) { +static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { u8 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, @@ -410,8 +410,8 @@ // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlag. -static void QuarantineChunk(AsanChunk *m, void *ptr, - StackTrace *stack, AllocType alloc_type) { +static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, + AllocType alloc_type) { CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) @@ -447,7 +447,7 @@ } } -static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack, +static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, AllocType alloc_type) { uptr p = reinterpret_cast(ptr); if (p == 0) return; @@ -464,7 +464,8 @@ QuarantineChunk(m, ptr, stack, alloc_type); } -static void *Reallocate(void *old_ptr, uptr new_size, StackTrace *stack) { +static void *Reallocate(void *old_ptr, uptr new_size, + BufferedStackTrace *stack) { CHECK(old_ptr && new_size); uptr p = reinterpret_cast(old_ptr); uptr chunk_beg = p - kChunkHeaderSize; @@ -577,25 +578,25 @@ allocator.PrintStats(); } -void *asan_memalign(uptr alignment, uptr size, StackTrace *stack, +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { return Allocate(size, alignment, stack, alloc_type, true); } -void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) { +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { Deallocate(ptr, 0, stack, alloc_type); } -void asan_sized_free(void *ptr, uptr size, StackTrace *stack, +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, AllocType alloc_type) { Deallocate(ptr, size, stack, alloc_type); } -void *asan_malloc(uptr size, StackTrace *stack) { +void *asan_malloc(uptr size, BufferedStackTrace *stack) { return Allocate(size, 8, stack, FROM_MALLOC, true); } -void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) { +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return AllocatorReturnNull(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); @@ -606,7 +607,7 @@ return ptr; } -void *asan_realloc(void *p, uptr size, StackTrace *stack) { +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { if (p == 0) return Allocate(size, 8, stack, FROM_MALLOC, true); if (size == 0) { @@ -616,11 +617,11 @@ return Reallocate(p, size, stack); } -void *asan_valloc(uptr size, StackTrace *stack) { +void *asan_valloc(uptr size, BufferedStackTrace *stack) { return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); } -void *asan_pvalloc(uptr size, StackTrace *stack) { +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); size = RoundUpTo(size, PageSize); if (size == 0) { @@ -631,7 +632,7 @@ } int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - StackTrace *stack) { + BufferedStackTrace *stack) { void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; Index: lib/asan/asan_debugging.cc =================================================================== --- lib/asan/asan_debugging.cc +++ lib/asan/asan_debugging.cc @@ -86,22 +86,19 @@ AsanChunkView chunk = FindHeapChunkByAddress(addr); if (!chunk.IsValid()) return 0; - StackTrace stack; + StackTrace stack(nullptr, 0); if (alloc_stack) { if (chunk.AllocTid() == kInvalidTid) return 0; - chunk.GetAllocStack(&stack); + stack = chunk.GetAllocStack(); if (thread_id) *thread_id = chunk.AllocTid(); } else { if (chunk.FreeTid() == kInvalidTid) return 0; - chunk.GetFreeStack(&stack); + stack = chunk.GetFreeStack(); if (thread_id) *thread_id = chunk.FreeTid(); } if (trace && size) { - if (size > kStackTraceMax) - size = kStackTraceMax; - if (size > stack.size) - size = stack.size; + size = Min(size, Min(stack.size, kStackTraceMax)); for (uptr i = 0; i < size; i++) trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]); Index: lib/asan/asan_report.h =================================================================== --- lib/asan/asan_report.h +++ lib/asan/asan_report.h @@ -57,35 +57,41 @@ void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, void *context, uptr addr); void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, - StackTrace *free_stack); -void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack); -void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack); -void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack, + BufferedStackTrace *free_stack); +void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); +void NORETURN ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack); +void NORETURN ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, AllocType alloc_type, AllocType dealloc_type); -void NORETURN ReportMallocUsableSizeNotOwned(uptr addr, - StackTrace *stack); void NORETURN -ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack); -void NORETURN ReportStringFunctionMemoryRangesOverlap( - const char *function, const char *offset1, uptr length1, - const char *offset2, uptr length2, StackTrace *stack); + ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack); void NORETURN -ReportStringFunctionSizeOverflow(uptr offset, uptr size, StackTrace *stack); + ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack); void NORETURN -ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid, - uptr new_mid, StackTrace *stack); + ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack); +void NORETURN ReportStringFunctionSizeOverflow(uptr offset, uptr size, + BufferedStackTrace *stack); +void NORETURN + ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, + uptr old_mid, uptr new_mid, + BufferedStackTrace *stack); void NORETURN ReportODRViolation(const __asan_global *g1, u32 stack_id1, const __asan_global *g2, u32 stack_id2); // Mac-specific errors and warnings. -void WarnMacFreeUnallocated( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); -void NORETURN ReportMacMzReallocUnknown( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); -void NORETURN ReportMacCfReallocUnknown( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack); +void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack); +void NORETURN ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); +void NORETURN ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, + const char *zone_name, + BufferedStackTrace *stack); } // namespace __asan Index: lib/asan/asan_report.cc =================================================================== --- lib/asan/asan_report.cc +++ lib/asan/asan_report.cc @@ -440,16 +440,15 @@ // previously. That's unfortunate, but I have no better solution, // especially given that the alloca may be from entirely different place // (e.g. use-after-scope, or different thread's stack). - StackTrace alloca_stack; #if defined(__powerpc64__) && defined(__BIG_ENDIAN__) // On PowerPC64 ELFv1, the address of a function actually points to a // three-doubleword data structure with the first field containing // the address of the function's code. access.frame_pc = *reinterpret_cast(access.frame_pc); #endif - alloca_stack.trace[0] = access.frame_pc + 16; - alloca_stack.size = 1; + access.frame_pc += 16; Printf("%s", d.EndLocation()); + StackTrace alloca_stack(&access.frame_pc, 1); alloca_stack.Print(); InternalMmapVector vars(16); @@ -519,8 +518,7 @@ asanThreadRegistry().CheckLocked(); AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(chunk.AllocTid()); - StackTrace alloc_stack; - chunk.GetAllocStack(&alloc_stack); + StackTrace alloc_stack = chunk.GetAllocStack(); char tname[128]; Decorator d; AsanThreadContext *free_thread = 0; @@ -530,8 +528,7 @@ free_thread->tid, ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)), d.EndAllocation()); - StackTrace free_stack; - chunk.GetFreeStack(&free_stack); + StackTrace free_stack = chunk.GetFreeStack(); free_stack.Print(); Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(), alloc_thread->tid, @@ -581,9 +578,7 @@ " created by T%d%s here:\n", context->parent_tid, ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); Printf("%s", str.data()); - uptr stack_size; - const uptr *stack_trace = StackDepotGet(context->stack_id, &stack_size); - StackTrace::PrintStack(stack_trace, stack_size); + StackDepotGet(context->stack_id).Print(); // Recursively described parent thread if needed. if (flags()->print_full_thread_history) { AsanThreadContext *parent_context = @@ -684,7 +679,7 @@ ReportErrorSummary("SEGV", &stack); } -void ReportDoubleFree(uptr addr, StackTrace *free_stack) { +void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); @@ -703,7 +698,7 @@ } void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, - StackTrace *free_stack) { + BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); @@ -726,7 +721,7 @@ "ASAN_OPTIONS=new_delete_type_mismatch=0\n"); } -void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) { +void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); @@ -743,7 +738,7 @@ ReportErrorSummary("bad-free", &stack); } -void ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack, +void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, AllocType alloc_type, AllocType dealloc_type) { static const char *alloc_names[] = @@ -766,7 +761,7 @@ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n"); } -void ReportMallocUsableSizeNotOwned(uptr addr, StackTrace *stack) { +void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); @@ -779,7 +774,8 @@ ReportErrorSummary("bad-malloc_usable_size", stack); } -void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) { +void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); @@ -792,9 +788,10 @@ ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack); } -void ReportStringFunctionMemoryRangesOverlap( - const char *function, const char *offset1, uptr length1, - const char *offset2, uptr length2, StackTrace *stack) { +void ReportStringFunctionMemoryRangesOverlap(const char *function, + const char *offset1, uptr length1, + const char *offset2, uptr length2, + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Decorator d; char bug_type[100]; @@ -811,7 +808,7 @@ } void ReportStringFunctionSizeOverflow(uptr offset, uptr size, - StackTrace *stack) { + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Decorator d; const char *bug_type = "negative-size-param"; @@ -825,7 +822,7 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, uptr old_mid, uptr new_mid, - StackTrace *stack) { + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Report("ERROR: AddressSanitizer: bad parameters to " "__sanitizer_annotate_contiguous_container:\n" @@ -855,12 +852,9 @@ if (stack_id1 && stack_id2) { Printf("These globals were registered at these points:\n"); Printf(" [1]:\n"); - uptr stack_size; - const uptr *stack_trace = StackDepotGet(stack_id1, &stack_size); - StackTrace::PrintStack(stack_trace, stack_size); + StackDepotGet(stack_id1).Print(); Printf(" [2]:\n"); - stack_trace = StackDepotGet(stack_id2, &stack_size); - StackTrace::PrintStack(stack_trace, stack_size); + StackDepotGet(stack_id2).Print(); } Report("HINT: if you don't care about these warnings you may set " "ASAN_OPTIONS=detect_odr_violation=0\n"); @@ -900,8 +894,8 @@ } // ----------------------- Mac-specific reports ----------------- {{{1 -void WarnMacFreeUnallocated( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) { +void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { // Just print a warning here. Printf("free_common(%p) -- attempting to free unallocated memory.\n" "AddressSanitizer is ignoring this error on Mac OS now.\n", @@ -911,8 +905,8 @@ DescribeHeapAddress(addr, 1); } -void ReportMacMzReallocUnknown( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) { +void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" "This is an unrecoverable problem, exiting now.\n", @@ -922,8 +916,8 @@ DescribeHeapAddress(addr, 1); } -void ReportMacCfReallocUnknown( - uptr addr, uptr zone_ptr, const char *zone_name, StackTrace *stack) { +void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, + BufferedStackTrace *stack) { ScopedInErrorReport in_report; Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n" "This is an unrecoverable problem, exiting now.\n", Index: lib/asan/asan_stack.h =================================================================== --- lib/asan/asan_stack.h +++ lib/asan/asan_stack.h @@ -25,8 +25,9 @@ // The pc will be in the position 0 of the resulting stack trace. // The bp may refer to the current frame or to the caller's frame. ALWAYS_INLINE -void GetStackTraceWithPcBpAndContext(StackTrace *stack, uptr max_depth, uptr pc, - uptr bp, void *context, bool fast) { +void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, + uptr pc, uptr bp, void *context, + bool fast) { #if SANITIZER_WINDOWS stack->Unwind(max_depth, pc, bp, context, 0, 0, fast); #else @@ -53,14 +54,14 @@ // don't want stack trace to contain functions from ASan internals. #define GET_STACK_TRACE(max_size, fast) \ - StackTrace stack; \ + BufferedStackTrace stack; \ if (max_size <= 2) { \ stack.size = max_size; \ if (max_size > 0) { \ stack.top_frame_bp = GET_CURRENT_FRAME(); \ - stack.trace[0] = StackTrace::GetCurrentPc(); \ + stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \ if (max_size > 1) \ - stack.trace[1] = GET_CALLER_PC(); \ + stack.trace_buffer[1] = GET_CALLER_PC(); \ } \ } else { \ GetStackTraceWithPcBpAndContext(&stack, max_size, \ @@ -69,12 +70,12 @@ } #define GET_STACK_TRACE_FATAL(pc, bp) \ - StackTrace stack; \ + BufferedStackTrace stack; \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ common_flags()->fast_unwind_on_fatal) #define GET_STACK_TRACE_SIGNAL(pc, bp, context) \ - StackTrace stack; \ + BufferedStackTrace stack; \ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \ common_flags()->fast_unwind_on_fatal) Index: lib/asan/tests/asan_noinst_test.cc =================================================================== --- lib/asan/tests/asan_noinst_test.cc +++ lib/asan/tests/asan_noinst_test.cc @@ -52,19 +52,19 @@ static void MallocStress(size_t n) { u32 seed = my_rand(); - StackTrace stack1; - stack1.trace[0] = 0xa123; - stack1.trace[1] = 0xa456; + BufferedStackTrace stack1; + stack1.trace_buffer[0] = 0xa123; + stack1.trace_buffer[1] = 0xa456; stack1.size = 2; - StackTrace stack2; - stack2.trace[0] = 0xb123; - stack2.trace[1] = 0xb456; + BufferedStackTrace stack2; + stack2.trace_buffer[0] = 0xb123; + stack2.trace_buffer[1] = 0xb456; stack2.size = 2; - StackTrace stack3; - stack3.trace[0] = 0xc123; - stack3.trace[1] = 0xc456; + BufferedStackTrace stack3; + stack3.trace_buffer[0] = 0xc123; + stack3.trace_buffer[1] = 0xc456; stack3.size = 2; std::vector vec; @@ -140,8 +140,8 @@ } TEST(AddressSanitizer, QuarantineTest) { - StackTrace stack; - stack.trace[0] = 0x890; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; stack.size = 1; const int size = 1024; @@ -161,8 +161,8 @@ void *ThreadedQuarantineTestWorker(void *unused) { (void)unused; u32 seed = my_rand(); - StackTrace stack; - stack.trace[0] = 0x890; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; stack.size = 1; for (size_t i = 0; i < 1000; i++) { @@ -188,8 +188,8 @@ void *ThreadedOneSizeMallocStress(void *unused) { (void)unused; - StackTrace stack; - stack.trace[0] = 0x890; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; stack.size = 1; const size_t kNumMallocs = 1000; for (int iter = 0; iter < 1000; iter++) { @@ -241,8 +241,8 @@ uptr buggy_ptr; __asan_test_only_reported_buggy_pointer = &buggy_ptr; - StackTrace stack; - stack.trace[0] = 0x890; + BufferedStackTrace stack; + stack.trace_buffer[0] = 0x890; stack.size = 1; for (uptr len = 16; len <= 32; len++) { Index: lib/lsan/lsan.h =================================================================== --- lib/lsan/lsan.h +++ lib/lsan/lsan.h @@ -15,17 +15,17 @@ #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#define GET_STACK_TRACE(max_size, fast) \ - StackTrace stack; \ - { \ - uptr stack_top = 0, stack_bottom = 0; \ - ThreadContext *t; \ - if (fast && (t = CurrentThreadContext())) { \ - stack_top = t->stack_end(); \ - stack_bottom = t->stack_begin(); \ - } \ - stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ - /* context */ 0, stack_top, stack_bottom, fast); \ +#define GET_STACK_TRACE(max_size, fast) \ + BufferedStackTrace stack; \ + { \ + uptr stack_top = 0, stack_bottom = 0; \ + ThreadContext *t; \ + if (fast && (t = CurrentThreadContext())) { \ + stack_top = t->stack_end(); \ + stack_bottom = t->stack_begin(); \ + } \ + stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ + /* context */ 0, stack_top, stack_bottom, fast); \ } #define GET_STACK_TRACE_FATAL \ Index: lib/lsan/lsan_common.cc =================================================================== --- lib/lsan/lsan_common.cc +++ lib/lsan/lsan_common.cc @@ -355,9 +355,7 @@ static void PrintStackTraceById(u32 stack_trace_id) { CHECK(stack_trace_id); - uptr size = 0; - const uptr *trace = StackDepotGet(stack_trace_id, &size); - StackTrace::PrintStack(trace, size); + StackDepotGet(stack_trace_id).Print(); } // ForEachChunk callback. Aggregates information about unreachable chunks into @@ -372,10 +370,9 @@ uptr resolution = flags()->resolution; u32 stack_trace_id = 0; if (resolution > 0) { - uptr size = 0; - const uptr *trace = StackDepotGet(m.stack_trace_id(), &size); - size = Min(size, resolution); - stack_trace_id = StackDepotPut(trace, size); + StackTrace stack = StackDepotGet(m.stack_trace_id()); + uptr size = Min(stack.size, resolution); + stack_trace_id = StackDepotPut(stack.trace, size); } else { stack_trace_id = m.stack_trace_id(); } @@ -487,11 +484,10 @@ } static Suppression *GetSuppressionForStack(u32 stack_trace_id) { - uptr size = 0; - const uptr *trace = StackDepotGet(stack_trace_id, &size); - for (uptr i = 0; i < size; i++) { - Suppression *s = - GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i])); + StackTrace stack = StackDepotGet(stack_trace_id); + for (uptr i = 0; i < stack.size; i++) { + Suppression *s = GetSuppressionForAddr( + StackTrace::GetPreviousInstructionPc(stack.trace[i])); if (s) return s; } return 0; Index: lib/msan/msan.h =================================================================== --- lib/msan/msan.h +++ lib/msan/msan.h @@ -72,7 +72,7 @@ void PrintWarning(uptr pc, uptr bp); void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin); -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, +void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, bool request_fast_unwind); void ReportUMR(StackTrace *stack, u32 origin); @@ -97,27 +97,24 @@ // the previous origin id. u32 ChainOrigin(u32 id, StackTrace *stack); -#define GET_MALLOC_STACK_TRACE \ - StackTrace stack; \ - stack.size = 0; \ - if (__msan_get_track_origins() && msan_inited) \ - GetStackTrace(&stack, common_flags()->malloc_context_size, \ - StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ - common_flags()->fast_unwind_on_malloc) - -#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ - StackTrace stack; \ - stack.size = 0; \ - if (__msan_get_track_origins() > 1 && msan_inited) \ - GetStackTrace(&stack, flags()->store_context_size, pc, bp, \ +#define GET_MALLOC_STACK_TRACE \ + BufferedStackTrace stack; \ + if (__msan_get_track_origins() && msan_inited) \ + GetStackTrace(&stack, common_flags()->malloc_context_size, \ + StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \ common_flags()->fast_unwind_on_malloc) -#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ - StackTrace stack; \ - stack.size = 0; \ - if (msan_inited) \ - GetStackTrace(&stack, kStackTraceMax, pc, bp, \ - common_flags()->fast_unwind_on_fatal) +#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \ + BufferedStackTrace stack; \ + if (__msan_get_track_origins() > 1 && msan_inited) \ + GetStackTrace(&stack, flags()->store_context_size, pc, bp, \ + common_flags()->fast_unwind_on_malloc) + +#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \ + BufferedStackTrace stack; \ + if (msan_inited) \ + GetStackTrace(&stack, kStackTraceMax, pc, bp, \ + common_flags()->fast_unwind_on_fatal) #define GET_STORE_STACK_TRACE \ GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()) Index: lib/msan/msan.cc =================================================================== --- lib/msan/msan.cc +++ lib/msan/msan.cc @@ -187,7 +187,7 @@ ParseFlagsFromString(f, options); } -void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, +void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, bool request_fast_unwind) { MsanThread *t = GetCurrentThread(); if (!t || !StackTrace::WillUseFastUnwind(request_fast_unwind)) { Index: lib/msan/msan_report.cc =================================================================== --- lib/msan/msan_report.cc +++ lib/msan/msan_report.cc @@ -54,7 +54,7 @@ // For some reason function address in LLVM IR is 1 less then the address // of the first instruction. pc += 1; - StackTrace::PrintStack(&pc, 1); + StackTrace(&pc, 1).Print(); } } @@ -77,20 +77,16 @@ DescribeStackOrigin(so, pc); break; } else if (prev_o.isHeapRoot()) { - uptr size = 0; - const uptr *trace = StackDepotGet(stack_id, &size); Printf(" %sUninitialized value was created by a heap allocation%s\n", d.Origin(), d.End()); - StackTrace::PrintStack(trace, size); + StackDepotGet(stack_id).Print(); break; } else { // chained origin - uptr size = 0; - const uptr *trace = StackDepotGet(stack_id, &size); // FIXME: copied? modified? passed through? observed? Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(), d.End()); - StackTrace::PrintStack(trace, size); + StackDepotGet(stack_id).Print(); id = prev_id; } } Index: lib/sanitizer_common/sanitizer_stackdepot.h =================================================================== --- lib/sanitizer_common/sanitizer_stackdepot.h +++ lib/sanitizer_common/sanitizer_stackdepot.h @@ -15,6 +15,7 @@ #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" +#include "sanitizer_stacktrace.h" namespace __sanitizer { @@ -35,10 +36,11 @@ const int kStackDepotMaxUseCount = 1U << 20; StackDepotStats *StackDepotGetStats(); +// FIXME: Pass StackTrace as an input argument here. u32 StackDepotPut(const uptr *stack, uptr size); StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size); // Retrieves a stored stack trace by the id. -const uptr *StackDepotGet(u32 id, uptr *size); +StackTrace StackDepotGet(u32 id); void StackDepotLockAll(); void StackDepotUnlockAll(); Index: lib/sanitizer_common/sanitizer_stackdepot.cc =================================================================== --- lib/sanitizer_common/sanitizer_stackdepot.cc +++ lib/sanitizer_common/sanitizer_stackdepot.cc @@ -18,6 +18,7 @@ namespace __sanitizer { +// FIXME: Get rid of this class in favor of StackTrace. struct StackDepotDesc { const uptr *stack; uptr size; @@ -122,10 +123,9 @@ return theDepot.Put(desc); } -const uptr *StackDepotGet(u32 id, uptr *size) { +StackTrace StackDepotGet(u32 id) { StackDepotDesc desc = theDepot.Get(id); - *size = desc.size; - return desc.stack; + return StackTrace(desc.stack, desc.size); } void StackDepotLockAll() { Index: lib/sanitizer_common/sanitizer_stacktrace.h =================================================================== --- lib/sanitizer_common/sanitizer_stacktrace.h +++ lib/sanitizer_common/sanitizer_stacktrace.h @@ -30,25 +30,13 @@ #endif struct StackTrace { - typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, - int out_size); - uptr top_frame_bp; + const uptr *trace; uptr size; - uptr trace[kStackTraceMax]; - // Prints a symbolized stacktrace, followed by an empty line. - static void PrintStack(const uptr *addr, uptr size); - void Print() const { - PrintStack(trace, size); - } + StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {} - void CopyFrom(const uptr *src, uptr src_size) { - top_frame_bp = 0; - size = src_size; - if (size > kStackTraceMax) size = kStackTraceMax; - for (uptr i = 0; i < size; i++) - trace[i] = src[i]; - } + // Prints a symbolized stacktrace, followed by an empty line. + void Print() const; static bool WillUseFastUnwind(bool request_fast_unwind) { // Check if fast unwind is available. Fast unwind is the only option on Mac. @@ -62,11 +50,21 @@ return request_fast_unwind; } - void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top, - uptr stack_bottom, bool request_fast_unwind); - static uptr GetCurrentPc(); static uptr GetPreviousInstructionPc(uptr pc); + typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, + int out_size); +}; + +// StackTrace that owns the buffer used to store the addresses. +struct BufferedStackTrace : public StackTrace { + uptr trace_buffer[kStackTraceMax]; + uptr top_frame_bp; // Optional bp of a top frame. + + BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {} + + void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top, + uptr stack_bottom, bool request_fast_unwind); private: void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom, Index: lib/sanitizer_common/sanitizer_stacktrace.cc =================================================================== --- lib/sanitizer_common/sanitizer_stacktrace.cc +++ lib/sanitizer_common/sanitizer_stacktrace.cc @@ -65,11 +65,10 @@ #endif } -void StackTrace::FastUnwindStack(uptr pc, uptr bp, - uptr stack_top, uptr stack_bottom, - uptr max_depth) { +void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top, + uptr stack_bottom, uptr max_depth) { CHECK_GE(max_depth, 2); - trace[0] = pc; + trace_buffer[0] = pc; size = 1; if (stack_top < 4096) return; // Sanity check for stack top. uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom); @@ -82,7 +81,7 @@ size < max_depth) { uhwptr pc1 = frame[1]; if (pc1 != pc) { - trace[size++] = (uptr) pc1; + trace_buffer[size++] = (uptr) pc1; } bottom = (uptr)frame; frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom); @@ -93,15 +92,15 @@ return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold; } -void StackTrace::PopStackFrames(uptr count) { +void BufferedStackTrace::PopStackFrames(uptr count) { CHECK_LT(count, size); size -= count; for (uptr i = 0; i < size; ++i) { - trace[i] = trace[i + count]; + trace_buffer[i] = trace_buffer[i + count]; } } -uptr StackTrace::LocatePcInTrace(uptr pc) { +uptr BufferedStackTrace::LocatePcInTrace(uptr pc) { // Use threshold to find PC in stack trace, as PC we want to unwind from may // slightly differ from return address in the actual unwinded stack trace. const int kPcThreshold = 288; Index: lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc +++ lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc @@ -22,18 +22,18 @@ buffer->append(" #%zu 0x%zx", frame_num, pc); } -void StackTrace::PrintStack(const uptr *addr, uptr size) { - if (addr == 0 || size == 0) { +void StackTrace::Print() const { + if (trace == nullptr || size == 0) { Printf(" \n\n"); return; } InternalScopedBuffer addr_frames(64); InternalScopedString frame_desc(GetPageSizeCached() * 2); uptr frame_num = 0; - for (uptr i = 0; i < size && addr[i]; i++) { + for (uptr i = 0; i < size && trace[i]; i++) { // PCs in stack traces are actually the return addresses, that is, // addresses of the next instructions after the call. - uptr pc = GetPreviousInstructionPc(addr[i]); + uptr pc = GetPreviousInstructionPc(trace[i]); uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( pc, addr_frames.data(), addr_frames.size()); if (addr_frames_num == 0) { @@ -68,9 +68,9 @@ Printf("\n"); } -void StackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, - uptr stack_top, uptr stack_bottom, - bool request_fast_unwind) { +void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, + uptr stack_top, uptr stack_bottom, + bool request_fast_unwind) { top_frame_bp = (max_depth > 0) ? bp : 0; // Avoid doing any work for small max_depth. if (max_depth == 0) { @@ -79,7 +79,7 @@ } if (max_depth == 1) { size = 1; - trace[0] = pc; + trace_buffer[0] = pc; return; } if (!WillUseFastUnwind(request_fast_unwind)) { Index: lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc +++ lib/sanitizer_common/sanitizer_unwind_posix_libcdep.cc @@ -95,7 +95,7 @@ } struct UnwindTraceArg { - StackTrace *stack; + BufferedStackTrace *stack; uptr max_depth; }; @@ -103,27 +103,27 @@ UnwindTraceArg *arg = (UnwindTraceArg*)param; CHECK_LT(arg->stack->size, arg->max_depth); uptr pc = Unwind_GetIP(ctx); - arg->stack->trace[arg->stack->size++] = pc; + arg->stack->trace_buffer[arg->stack->size++] = pc; if (arg->stack->size == arg->max_depth) return UNWIND_STOP; return UNWIND_CONTINUE; } -void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { +void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { CHECK_GE(max_depth, 2); size = 0; UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; _Unwind_Backtrace(Unwind_Trace, &arg); // We need to pop a few frames so that pc is on top. uptr to_pop = LocatePcInTrace(pc); - // trace[0] belongs to the current function so we always pop it. + // trace_buffer[0] belongs to the current function so we always pop it. if (to_pop == 0) to_pop = 1; PopStackFrames(to_pop); - trace[0] = pc; + trace_buffer[0] = pc; } -void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - uptr max_depth) { +void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, + uptr max_depth) { CHECK_GE(max_depth, 2); if (!unwind_backtrace_signal_arch) { SlowUnwindStack(pc, max_depth); @@ -145,7 +145,7 @@ // +2 compensate for libcorkscrew unwinder returning addresses of call // instructions instead of raw return addresses. for (sptr i = 0; i < res; ++i) - trace[size++] = frames[i].absolute_pc + 2; + trace_buffer[size++] = frames[i].absolute_pc + 2; } } // namespace __sanitizer Index: lib/sanitizer_common/sanitizer_win.cc =================================================================== --- lib/sanitizer_common/sanitizer_win.cc +++ lib/sanitizer_common/sanitizer_win.cc @@ -444,7 +444,7 @@ } #if !SANITIZER_GO -void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { +void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { CHECK_GE(max_depth, 2); // FIXME: CaptureStackBackTrace might be too slow for us. // FIXME: Compare with StackWalk64. @@ -459,8 +459,8 @@ PopStackFrames(pc_location); } -void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - uptr max_depth) { +void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, + uptr max_depth) { CONTEXT ctx = *(CONTEXT *)context; STACKFRAME64 stack_frame; memset(&stack_frame, 0, sizeof(stack_frame)); Index: lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc +++ lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc @@ -20,30 +20,26 @@ TEST(SanitizerCommon, StackDepotBasic) { uptr s1[] = {1, 2, 3, 4, 5}; u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1)); - uptr sz1 = 0; - const uptr *sp1 = StackDepotGet(i1, &sz1); - EXPECT_NE(sp1, (uptr*)0); - EXPECT_EQ(sz1, ARRAY_SIZE(s1)); - EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0); + StackTrace stack = StackDepotGet(i1); + EXPECT_NE(stack.trace, (uptr*)0); + EXPECT_EQ(ARRAY_SIZE(s1), stack.size); + EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1))); } TEST(SanitizerCommon, StackDepotAbsent) { - uptr sz1 = 0; - const uptr *sp1 = StackDepotGet((1 << 30) - 1, &sz1); - EXPECT_EQ(sp1, (uptr*)0); + StackTrace stack = StackDepotGet((1 << 30) - 1); + EXPECT_EQ((uptr*)0, stack.trace); } TEST(SanitizerCommon, StackDepotEmptyStack) { u32 i1 = StackDepotPut(0, 0); - uptr sz1 = 0; - const uptr *sp1 = StackDepotGet(i1, &sz1); - EXPECT_EQ(sp1, (uptr*)0); + StackTrace stack = StackDepotGet(i1); + EXPECT_EQ((uptr*)0, stack.trace); } TEST(SanitizerCommon, StackDepotZeroId) { - uptr sz1 = 0; - const uptr *sp1 = StackDepotGet(0, &sz1); - EXPECT_EQ(sp1, (uptr*)0); + StackTrace stack = StackDepotGet(0); + EXPECT_EQ((uptr*)0, stack.trace); } TEST(SanitizerCommon, StackDepotSame) { @@ -51,11 +47,10 @@ u32 i1 = StackDepotPut(s1, ARRAY_SIZE(s1)); u32 i2 = StackDepotPut(s1, ARRAY_SIZE(s1)); EXPECT_EQ(i1, i2); - uptr sz1 = 0; - const uptr *sp1 = StackDepotGet(i1, &sz1); - EXPECT_NE(sp1, (uptr*)0); - EXPECT_EQ(sz1, ARRAY_SIZE(s1)); - EXPECT_EQ(internal_memcmp(sp1, s1, sizeof(s1)), 0); + StackTrace stack = StackDepotGet(i1); + EXPECT_NE(stack.trace, (uptr*)0); + EXPECT_EQ(ARRAY_SIZE(s1), stack.size); + EXPECT_EQ(0, internal_memcmp(stack.trace, s1, sizeof(s1))); } TEST(SanitizerCommon, StackDepotSeveral) { @@ -80,12 +75,12 @@ StackDepotReverseMap map; for (uptr i = 0; i < 4; i++) { - uptr sz_depot, sz_map; - const uptr *sp_depot, *sp_map; - sp_depot = StackDepotGet(ids[i], &sz_depot); + uptr sz_map; + const uptr *sp_map; + StackTrace stack = StackDepotGet(ids[i]); sp_map = map.Get(ids[i], &sz_map); - EXPECT_EQ(sz_depot, sz_map); - EXPECT_EQ(sp_depot, sp_map); + EXPECT_EQ(stack.size, sz_map); + EXPECT_EQ(stack.trace, sp_map); } } Index: lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc +++ lib/sanitizer_common/tests/sanitizer_stacktrace_test.cc @@ -35,7 +35,7 @@ uptr start_pc; uptr fake_top; uptr fake_bottom; - StackTrace trace; + BufferedStackTrace trace; }; static uptr PC(uptr idx) { @@ -139,7 +139,7 @@ TEST(SlowUnwindTest, ShortStackTrace) { if (StackTrace::WillUseFastUnwind(false)) return; - StackTrace stack; + BufferedStackTrace stack; uptr pc = StackTrace::GetCurrentPc(); uptr bp = GET_CURRENT_FRAME(); stack.Unwind(0, pc, bp, 0, 0, 0, false); Index: lib/tsan/rtl/tsan_rtl_mutex.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_mutex.cc +++ lib/tsan/rtl/tsan_rtl_mutex.cc @@ -475,12 +475,11 @@ InternalScopedBuffer stacks(2 * DDReport::kMaxLoopSize); uptr dummy_pc = 0x42; for (int i = 0; i < r->n; i++) { - uptr size; for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; if (stk) { - const uptr *trace = StackDepotGet(stk, &size); - stacks[i].Init(const_cast(trace), size); + __sanitizer::StackTrace stack = StackDepotGet(stk); + stacks[i].Init(const_cast(stack.trace), stack.size); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), // but we should still produce some stack trace in the report. Index: lib/tsan/rtl/tsan_rtl_report.cc =================================================================== --- lib/tsan/rtl/tsan_rtl_report.cc +++ lib/tsan/rtl/tsan_rtl_report.cc @@ -107,12 +107,11 @@ ReportStack *SymbolizeStackId(u32 stack_id) { if (stack_id == 0) return 0; - uptr ssz = 0; - const uptr *stack = StackDepotGet(stack_id, &ssz); - if (stack == 0) + __sanitizer::StackTrace stack = StackDepotGet(stack_id); + if (stack.trace == nullptr) return 0; StackTrace trace; - trace.Init(stack, ssz); + trace.Init(stack.trace, stack.size); return SymbolizeStack(trace); } @@ -691,14 +690,15 @@ void PrintCurrentStackSlow() { #ifndef TSAN_GO - __sanitizer::StackTrace *ptrace = new(internal_alloc(MBlockStackTrace, - sizeof(__sanitizer::StackTrace))) __sanitizer::StackTrace; + __sanitizer::BufferedStackTrace *ptrace = new( + internal_alloc(MBlockStackTrace, sizeof(__sanitizer::BufferedStackTrace))) + __sanitizer::BufferedStackTrace(); ptrace->Unwind(kStackTraceMax, __sanitizer::StackTrace::GetCurrentPc(), 0, 0, 0, 0, false); for (uptr i = 0; i < ptrace->size / 2; i++) { - uptr tmp = ptrace->trace[i]; - ptrace->trace[i] = ptrace->trace[ptrace->size - i - 1]; - ptrace->trace[ptrace->size - i - 1] = tmp; + uptr tmp = ptrace->trace_buffer[i]; + ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; + ptrace->trace_buffer[ptrace->size - i - 1] = tmp; } StackTrace trace; trace.Init(ptrace->trace, ptrace->size); Index: lib/tsan/rtl/tsan_stack_trace.h =================================================================== --- lib/tsan/rtl/tsan_stack_trace.h +++ lib/tsan/rtl/tsan_stack_trace.h @@ -13,16 +13,11 @@ #ifndef TSAN_STACK_TRACE_H #define TSAN_STACK_TRACE_H -//#include "sanitizer_common/sanitizer_atomic.h" -//#include "sanitizer_common/sanitizer_common.h" -//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h" #include "tsan_defs.h" -//#include "tsan_clock.h" -//#include "tsan_mutex.h" -//#include "tsan_dense_alloc.h" namespace __tsan { +// FIXME: Delete this class in favor of __sanitizer::StackTrace. class StackTrace { public: StackTrace(); @@ -38,7 +33,6 @@ uptr Size() const; uptr Get(uptr i) const; const uptr *Begin() const; - void CopyFrom(const StackTrace& other); private: uptr n_; Index: lib/tsan/rtl/tsan_stack_trace.cc =================================================================== --- lib/tsan/rtl/tsan_stack_trace.cc +++ lib/tsan/rtl/tsan_stack_trace.cc @@ -10,7 +10,6 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// -//#include "sanitizer_common/sanitizer_placement_new.h" #include "tsan_stack_trace.h" #include "tsan_rtl.h" #include "tsan_mman.h" @@ -87,11 +86,6 @@ } } -void StackTrace::CopyFrom(const StackTrace& other) { - Reset(); - Init(other.Begin(), other.Size()); -} - bool StackTrace::IsEmpty() const { return n_ == 0; } Index: lib/ubsan/ubsan_diag.cc =================================================================== --- lib/ubsan/ubsan_diag.cc +++ lib/ubsan/ubsan_diag.cc @@ -33,7 +33,7 @@ // under ASan). if (StackTrace::WillUseFastUnwind(false)) return; - StackTrace stack; + BufferedStackTrace stack; stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false); stack.Print(); }