diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -25,7 +25,9 @@ public: constexpr StackStore() = default; - using Id = uptr; + using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces. + static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8), + ""); Id Store(const StackTrace &trace); StackTrace Load(Id id) const; @@ -42,7 +44,19 @@ return frame_idx % kBlockSizeFrames; } - uptr *Alloc(uptr count); + static constexpr uptr IdToOffset(Id id) { + CHECK_NE(id, 0); + return id - 1; // Avoid zero as id. + } + + static constexpr uptr OffsetToId(Id id) { + // This makes UINT32_MAX to 0 and it will be retrived as and empty stack. + // But this is not a problem as we will not be able to store anything after + // that anyway. + return id + 1; // Avoid zero as id. + } + + uptr *Alloc(uptr count, uptr *idx); // Total number of allocated frames. atomic_uintptr_t total_frames_ = {}; @@ -53,9 +67,9 @@ StaticSpinMutex mtx_; // Protects alloc of new blocks. uptr *Create(); - uptr *Get() const; public: + uptr *Get() const; uptr *GetOrCreate(); void TestOnlyUnmap(); }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp @@ -37,16 +37,23 @@ if (!trace.size && !trace.tag) return 0; StackTraceHeader h(trace); - uptr *stack_trace = Alloc(h.size + 1); + uptr idx; + uptr *stack_trace = Alloc(h.size + 1, &idx); *stack_trace = h.ToUptr(); internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr)); - return reinterpret_cast(stack_trace); + return OffsetToId(idx); } StackTrace StackStore::Load(Id id) const { if (!id) return {}; - const uptr *stack_trace = reinterpret_cast(id); + uptr idx = IdToOffset(id); + uptr block_idx = GetBlockIdx(idx); + CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + uptr *stack_trace = blocks_[block_idx].Get(); + if (!stack_trace) + return {}; + stack_trace += GetInBlockIdx(idx); StackTraceHeader h(*stack_trace); return StackTrace(stack_trace + 1, h.size, h.tag); } @@ -57,7 +64,7 @@ sizeof(*this); } -uptr *StackStore::Alloc(uptr count) { +uptr *StackStore::Alloc(uptr count, uptr *idx) { for (;;) { // Optimisic lock-free allocation, essentially try to bump the // total_frames_. @@ -66,6 +73,7 @@ if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) { // Fits into the a single block. CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + *idx = start; return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start); }