diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp @@ -37,7 +37,9 @@ static bool is_valid(const args_type &args); - void store(u32 id, const args_type &args, hash_type other_hash); + bool store(u32 id, const args_type &args, hash_type other_hash); + + void post_process() {} args_type load(u32 id) const; @@ -105,10 +107,11 @@ bool ChainedOriginDepotNode::is_valid(const args_type &args) { return true; } -void ChainedOriginDepotNode::store(u32 id, const args_type &args, +bool ChainedOriginDepotNode::store(u32 id, const args_type &args, hash_type other_hash) { here_id = args.here_id; prev_id = args.prev_id; + return false; } ChainedOriginDepotNode::args_type ChainedOriginDepotNode::load(u32 id) const { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -29,13 +29,17 @@ static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8), ""); - Id Store(const StackTrace &trace); + Id Store(const StackTrace &trace, + uptr *pack /* number of blocks completed by this call */); StackTrace Load(Id id) const; uptr Allocated() const; + void Pack(); + void TestOnlyUnmap(); private: + friend class StackStoreTest; static constexpr uptr GetBlockIdx(uptr frame_idx) { return frame_idx / kBlockSizeFrames; } @@ -56,7 +60,7 @@ return id + 1; // Avoid zero as id. } - uptr *Alloc(uptr count, uptr *idx); + uptr *Alloc(uptr count, uptr *idx, uptr *pack); // Total number of allocated frames. atomic_uintptr_t total_frames_ = {}; @@ -64,7 +68,10 @@ // Each block will hold pointer to exactly kBlockSizeFrames. class BlockInfo { atomic_uintptr_t data_; - StaticSpinMutex mtx_; // Protects alloc of new blocks. + // Counter to track store progress to know when we can Pack() the block. + atomic_uint32_t stored_; + // Protects alloc of new blocks. + StaticSpinMutex mtx_; uptr *Create(); @@ -72,6 +79,7 @@ uptr *Get() const; uptr *GetOrCreate(); void TestOnlyUnmap(); + bool Stored(uptr n); }; BlockInfo blocks_[kBlockCount] = {}; }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp @@ -33,14 +33,16 @@ }; } // namespace -StackStore::Id StackStore::Store(const StackTrace &trace) { +StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) { if (!trace.size && !trace.tag) return 0; StackTraceHeader h(trace); - uptr idx; - uptr *stack_trace = Alloc(h.size + 1, &idx); + uptr idx = 0; + *pack = 0; + uptr *stack_trace = Alloc(h.size + 1, &idx, pack); *stack_trace = h.ToUptr(); internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr)); + *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1); return OffsetToId(idx); } @@ -64,13 +66,14 @@ sizeof(*this); } -uptr *StackStore::Alloc(uptr count, uptr *idx) { +uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) { for (;;) { // Optimisic lock-free allocation, essentially try to bump the // total_frames_. uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed); uptr block_idx = GetBlockIdx(start); - if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) { + uptr last_idx = GetBlockIdx(start + count - 1); + if (LIKELY(block_idx == last_idx)) { // Fits into the a single block. CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); *idx = start; @@ -78,9 +81,19 @@ } // Retry. We can't use range allocated in two different blocks. + CHECK_LE(count, kBlockSizeFrames); + uptr in_first = kBlockSizeFrames - GetInBlockIdx(start); + // Mark tail/head of these blocks as "stored".to avoid waiting before we can + // Pack(). + *pack += blocks_[block_idx].Stored(in_first); + *pack += blocks_[last_idx].Stored(count - in_first); } } +void StackStore::Pack() { + // TODO +} + void StackStore::TestOnlyUnmap() { for (BlockInfo &b : blocks_) b.TestOnlyUnmap(); internal_memset(this, 0, sizeof(*this)); @@ -113,4 +126,9 @@ UnmapOrDie(ptr, StackStore::kBlockSizeBytes); } +bool StackStore::BlockInfo::Stored(uptr n) { + return n + atomic_fetch_add(&stored_, n, memory_order_release) == + kBlockSizeFrames; +} + } // namespace __sanitizer diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -41,7 +41,9 @@ static bool is_valid(const args_type &args) { return args.size > 0 && args.trace; } - void store(u32 id, const args_type &args, hash_type hash); + // Returns true if post_process() call is needed. + bool store(u32 id, const args_type &args, hash_type hash); + void post_process(); args_type load(u32 id) const; static StackDepotHandle get_handle(u32 id); @@ -72,11 +74,15 @@ return stackStore.Allocated() + useCounts.MemoryUsage(); } -void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { +bool StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { stack_hash = hash; - store_id = stackStore.Store(args); + uptr pack = 0; + store_id = stackStore.Store(args, &pack); + return pack; } +void StackDepotNode::post_process() { stackStore.Pack(); } + StackDepotNode::args_type StackDepotNode::load(u32 id) const { if (!store_id) return {}; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h @@ -140,9 +140,13 @@ CHECK_EQ(s & kUnlockMask, s); CHECK_EQ(s & (((u32)-1) >> kReservedBits), s); Node &new_node = nodes[s]; - new_node.store(s, args, h); + bool post_process = new_node.store(s, args, h); new_node.link = s2; unlock(p, s); + if (UNLIKELY(post_process)) { + // Heavy work can be done here after bucket unlock. + new_node.post_process(); + } if (inserted) *inserted = true; return s; } diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp @@ -12,6 +12,7 @@ #include #include "gtest/gtest.h" +#include "sanitizer_atomic.h" #include "sanitizer_hash.h" #include "sanitizer_stacktrace.h" @@ -40,19 +41,39 @@ }; } + using BlockInfo = StackStore::BlockInfo; + + uptr GetTotalFramesCount() const { + return atomic_load_relaxed(&store_.total_frames_); + } + + uptr CountReadyToPackBlocks() { + uptr res = 0; + for (BlockInfo& b : store_.blocks_) res += b.Stored(0); + return res; + } + + uptr IdToOffset(StackStore::Id id) const { return store_.IdToOffset(id); } + + static constexpr uptr kBlockSizeFrames = StackStore::kBlockSizeFrames; + StackStore store_ = {}; }; TEST_F(StackStoreTest, Empty) { uptr before = store_.Allocated(); - EXPECT_EQ(0u, store_.Store({})); + uptr pack = 0; + EXPECT_EQ(0u, store_.Store({}, &pack)); uptr after = store_.Allocated(); EXPECT_EQ(before, after); } TEST_F(StackStoreTest, Basic) { std::vector ids; - ForEachTrace([&](const StackTrace& s) { ids.push_back(store_.Store(s)); }); + ForEachTrace([&](const StackTrace& s) { + uptr pack = 0; + ids.push_back(store_.Store(s, &pack)); + }); auto id = ids.begin(); ForEachTrace([&](const StackTrace& s) { @@ -67,11 +88,35 @@ TEST_F(StackStoreTest, Allocated) { EXPECT_LE(store_.Allocated(), 0x100000u); std::vector ids; - ForEachTrace([&](const StackTrace& s) { ids.push_back(store_.Store(s)); }); + ForEachTrace([&](const StackTrace& s) { + uptr pack = 0; + ids.push_back(store_.Store(s, &pack)); + }); EXPECT_NEAR(store_.Allocated(), FIRST_32_SECOND_64(500000000u, 1000000000u), FIRST_32_SECOND_64(50000000u, 100000000u)); store_.TestOnlyUnmap(); EXPECT_LE(store_.Allocated(), 0x100000u); } +TEST_F(StackStoreTest, ReadyToPack) { + uptr next_pack = kBlockSizeFrames; + uptr total_ready = 0; + ForEachTrace( + [&](const StackTrace& s) { + uptr pack = 0; + StackStore::Id id = store_.Store(s, &pack); + uptr end_idx = IdToOffset(id) + 1 + s.size; + if (end_idx >= next_pack) { + EXPECT_EQ(1u, pack); + next_pack += kBlockSizeFrames; + } else { + EXPECT_EQ(0u, pack); + } + total_ready += pack; + EXPECT_EQ(CountReadyToPackBlocks(), total_ready); + }, + 100000); + EXPECT_EQ(GetTotalFramesCount() / kBlockSizeFrames, total_ready); +} + } // namespace __sanitizer