diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.h @@ -53,7 +53,9 @@ bool eq(hash_type hash, const args_type &args) const; - static uptr storage_size(const args_type &args); + static uptr allocated(); + + static ChainedOriginDepotNode *allocate(const args_type &args); static hash_type hash(const args_type &args); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_chained_origin_depot.cpp @@ -13,14 +13,21 @@ namespace __sanitizer { +static PersistentAllocator allocator; + bool ChainedOriginDepot::ChainedOriginDepotNode::eq( hash_type hash, const args_type &args) const { return here_id == args.here_id && prev_id == args.prev_id; } -uptr ChainedOriginDepot::ChainedOriginDepotNode::storage_size( - const args_type &args) { - return sizeof(ChainedOriginDepotNode); +uptr ChainedOriginDepot::ChainedOriginDepotNode::allocated() { + return allocator.allocated(); +} + +ChainedOriginDepot::ChainedOriginDepotNode * +ChainedOriginDepot::ChainedOriginDepotNode::allocate(const args_type &args) { + return static_cast( + allocator.alloc(sizeof(ChainedOriginDepotNode))); } /* This is murmur2 hash for the 64->32 bit case. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.h @@ -23,13 +23,19 @@ class PersistentAllocator { public: void *alloc(uptr size); + uptr allocated() const { + SpinMutexLock l(&mtx); + return atomic_load_relaxed(&mapped_size) + + atomic_load_relaxed(®ion_pos) - atomic_load_relaxed(®ion_end); + } private: void *tryAlloc(uptr size); void *refillAndAlloc(uptr size); - StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator. + mutable StaticSpinMutex mtx; // Protects alloc of new blocks. atomic_uintptr_t region_pos; // Region allocator for Node's. atomic_uintptr_t region_end; + atomic_uintptr_t mapped_size; }; inline void *PersistentAllocator::tryAlloc(uptr size) { @@ -51,11 +57,6 @@ return refillAndAlloc(size); } -extern PersistentAllocator thePersistentAllocator; -inline void *PersistentAlloc(uptr sz) { - return thePersistentAllocator.alloc(sz); -} - } // namespace __sanitizer #endif // SANITIZER_PERSISTENT_ALLOCATOR_H diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_persistent_allocator.cpp @@ -13,8 +13,6 @@ namespace __sanitizer { -PersistentAllocator thePersistentAllocator; - void *PersistentAllocator::refillAndAlloc(uptr size) { // If failed, lock, retry and alloc new superblock. SpinMutexLock l(&mtx); @@ -27,6 +25,7 @@ if (allocsz < size) allocsz = size; uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); + atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed); atomic_store(®ion_end, mem + allocsz, memory_order_release); atomic_store(®ion_pos, mem, memory_order_release); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -14,10 +14,13 @@ #include "sanitizer_common.h" #include "sanitizer_hash.h" +#include "sanitizer_persistent_allocator.h" #include "sanitizer_stackdepotbase.h" namespace __sanitizer { +static PersistentAllocator allocator; + struct StackDepotNode { using hash_type = u64; hash_type stack_hash; @@ -36,8 +39,10 @@ bool eq(hash_type hash, const args_type &args) const { return hash == stack_hash; } - static uptr storage_size(const args_type &args) { - return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); + static uptr allocated() { return allocator.allocated(); } + static StackDepotNode *allocate(const args_type &args) { + uptr alloc_size = sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); + return (StackDepotNode *)allocator.alloc(alloc_size); } static hash_type hash(const args_type &args) { MurMur2Hash64Builder H(args.size * sizeof(uptr)); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepotbase.h @@ -33,7 +33,7 @@ // Retrieves a stored stack trace by the id. args_type Get(u32 id); - StackDepotStats GetStats() const { return stats; } + StackDepotStats GetStats() const { return {n_uniq_ids, Node::allocated()}; } void LockAll(); void UnlockAll(); @@ -55,7 +55,7 @@ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. atomic_uint32_t seq[kPartCount]; // Unique id generators. - StackDepotStats stats; + uptr n_uniq_ids; friend class StackDepotReverseMap; }; @@ -120,14 +120,12 @@ } uptr part = (h % kTabSize) / kPartSize; u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1; - stats.n_uniq_ids++; + n_uniq_ids++; CHECK_LT(id, kMaxId); id |= part << kPartShift; CHECK_NE(id, 0); CHECK_EQ(id & (((u32)-1) >> kReservedBits), id); - uptr memsz = Node::storage_size(args); - s = (Node *)PersistentAlloc(memsz); - stats.allocated += memsz; + s = Node::allocate(args); s->id = id; s->store(args, h); s->link = s2;