Index: asan/asan_stats.cc =================================================================== --- asan/asan_stats.cc +++ asan/asan_stats.cc @@ -129,8 +129,8 @@ BlockingMutexLock lock(&print_lock); stats.Print(); StackDepotStats *stack_depot_stats = StackDepotGetStats(); - Printf("Stats: StackDepot: %zd ids; %zdM mapped\n", - stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20); + Printf("Stats: StackDepot: %zd ids; %zdM allocated\n", + stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20); PrintInternalAllocatorStats(); } Index: msan/msan_report.cc =================================================================== --- msan/msan_report.cc +++ msan/msan_report.cc @@ -121,7 +121,7 @@ // FIXME: we want this at normal exit, too! // FIXME: but only with verbosity=1 or something Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids); - Printf("Stack depot mapped bytes: %zu\n", stack_depot_stats->mapped); + Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated); } class OriginSet { Index: sanitizer_common/CMakeLists.txt =================================================================== --- sanitizer_common/CMakeLists.txt +++ sanitizer_common/CMakeLists.txt @@ -3,6 +3,7 @@ set(SANITIZER_SOURCES sanitizer_allocator.cc + sanitizer_chainedorigindepot.cc sanitizer_common.cc sanitizer_coverage.cc sanitizer_deadlock_detector1.cc @@ -18,6 +19,7 @@ sanitizer_printf.cc sanitizer_procmaps_linux.cc sanitizer_procmaps_mac.cc + sanitizer_region_allocator.cc sanitizer_stackdepot.cc sanitizer_stacktrace.cc sanitizer_suppressions.cc @@ -49,6 +51,7 @@ sanitizer_atomic_msvc.h sanitizer_bitvector.h sanitizer_bvgraph.h + sanitizer_chainedorigindepot.h sanitizer_common.h sanitizer_common_interceptors.inc sanitizer_common_interceptors_ioctl.inc @@ -71,8 +74,10 @@ sanitizer_platform_limits_posix.h sanitizer_procmaps.h sanitizer_quarantine.h + sanitizer_region_allocator.h sanitizer_report_decorator.h sanitizer_stackdepot.h + sanitizer_stackdepotbase.h sanitizer_stacktrace.h sanitizer_stoptheworld.h sanitizer_suppressions.h Index: sanitizer_common/sanitizer_chainedorigindepot.h =================================================================== --- sanitizer_common/sanitizer_chainedorigindepot.h +++ sanitizer_common/sanitizer_chainedorigindepot.h @@ -0,0 +1,29 @@ +//===-- sanitizer_chainedorigindepot.h --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_CHAINEDORIGINDEPOT_H +#define SANITIZER_CHAINEDORIGINDEPOT_H + +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_stackdepotbase.h" + +namespace __sanitizer { + +StackDepotStats *ChainedOriginDepotGetStats(); +u32 ChainedOriginDepotPut(u32 here_id, u32 prev_id); +// Retrieves a stored stack trace by the id. +u32 ChainedOriginDepotGet(u32 id, u32 *other); + +} // namespace __sanitizer + +#endif // SANITIZER_CHAINEDORIGINDEPOT_H Index: sanitizer_common/sanitizer_chainedorigindepot.cc =================================================================== --- sanitizer_common/sanitizer_chainedorigindepot.cc +++ sanitizer_common/sanitizer_chainedorigindepot.cc @@ -0,0 +1,77 @@ +//===-- sanitizer_chainedorigindepot.cc -----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// + +#include "sanitizer_chainedorigindepot.h" +#include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" + +struct ChainedOriginDepotDesc { + u32 here_id; + u32 prev_id; + u32 hash() const { return 0; } + bool is_valid() { return true; } +}; + +struct ChainedOriginDepotNode { + ChainedOriginDepotNode *link; + u32 id; + u32 here_id; + u32 prev_id; + + typedef ChainedOriginDepotDesc args_type; + bool eq(u32 hash, const args_type &args) const { + return here_id == args.here_id && prev_id == args.prev_id; + } + static uptr storage_size(const args_type &args) { + return sizeof(ChainedOriginDepotNode); + } + void store(const args_type &args, u32 other_hash) { + here_id = args.here_id; + prev_id = args.prev_id; + } + args_type load() const { + args_type ret = {here_id, prev_id}; + return ret; + } + struct Handle { + ChainedOriginDepotNode *node_; + Handle() : node_(0) {} + explicit Handle(ChainedOriginDepotNode *node) : node_(node) {} + bool valid() { return node_; } + u32 id() { return node_->id; } + int here_id() { return node_->here_id; } + int prev_id() { return node_->prev_id; } + }; + Handle get_handle() { return Handle(this); } + + typedef Handle handle_type; +}; + +static StackDepotBase chainedOriginDepot; + +StackDepotStats *ChainedOriginDepotGetStats() { + return chainedOriginDepot.GetStats(); +} + +u32 ChainedOriginDepotPut(u32 here_id, u32 prev_id) { + ChainedOriginDepotDesc desc = {here_id, prev_id}; + ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc); + return h.valid() ? h.id() : 0; +} + +// Retrieves a stored stack trace by the id. +u32 ChainedOriginDepotGet(u32 id, u32 *other) { + ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id); + *other = desc.prev_id; + return desc.here_id; +} Index: sanitizer_common/sanitizer_region_allocator.h =================================================================== --- sanitizer_common/sanitizer_region_allocator.h +++ sanitizer_common/sanitizer_region_allocator.h @@ -0,0 +1,69 @@ +//===-- sanitizer_region_allocator.h ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_REGION_ALLOCATOR_H +#define SANITIZER_REGION_ALLOCATOR_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_mutex.h" +#include "sanitizer_atomic.h" +#include "sanitizer_common.h" + +namespace __sanitizer { + +class RegionAllocator { + public: + void *alloc(uptr size); + + private: + void *tryAlloc(uptr size); + StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator. + atomic_uintptr_t region_pos; // Region allocator for Node's. + atomic_uintptr_t region_end; +}; + +inline void *RegionAllocator::tryAlloc(uptr size) { + // Optimisic lock-free allocation, essentially try to bump the region ptr. + for (;;) { + uptr cmp = atomic_load(®ion_pos, memory_order_acquire); + uptr end = atomic_load(®ion_end, memory_order_acquire); + if (cmp == 0 || cmp + size > end) return 0; + if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size, + memory_order_acquire)) + return (void *)cmp; + } +} + +inline void *RegionAllocator::alloc(uptr size) { + // First, try to allocate optimisitically. + void *s = tryAlloc(size); + if (s) return s; + // If failed, lock, retry and alloc new superblock. + SpinMutexLock l(&mtx); + for (;;) { + s = tryAlloc(size); + if (s) return s; + atomic_store(®ion_pos, 0, memory_order_relaxed); + uptr allocsz = 64 * 1024; + if (allocsz < size) allocsz = size; + uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); + atomic_store(®ion_end, mem + allocsz, memory_order_release); + atomic_store(®ion_pos, mem, memory_order_release); + } +} + +extern RegionAllocator theRegionAllocator; +inline void *RegionAlloc(uptr sz) { return theRegionAllocator.alloc(sz); } + +} // namespace __sanitizer + +#endif // SANITIZER_REGION_ALLOCATOR_H Index: sanitizer_common/sanitizer_region_allocator.cc =================================================================== --- sanitizer_common/sanitizer_region_allocator.cc +++ sanitizer_common/sanitizer_region_allocator.cc @@ -0,0 +1,19 @@ +//===-- sanitizer_region_allocator.cc ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries. +//===----------------------------------------------------------------------===// +#include "sanitizer_region_allocator.h" + +namespace __sanitizer { + +RegionAllocator theRegionAllocator; + +} // namespace __sanitizer Index: sanitizer_common/sanitizer_stackdepot.h =================================================================== --- sanitizer_common/sanitizer_stackdepot.h +++ sanitizer_common/sanitizer_stackdepot.h @@ -15,24 +15,30 @@ #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" +#include "sanitizer_stackdepotbase.h" namespace __sanitizer { // StackDepot efficiently stores huge amounts of stack traces. -// Maps stack trace to an unique id. -u32 StackDepotPut(const uptr *stack, uptr size); -// Retrieves a stored stack trace by the id. -const uptr *StackDepotGet(u32 id, uptr *size); +struct StackDepotNode; -struct StackDepotStats { - uptr n_uniq_ids; - uptr mapped; +struct StackDepotHandle { + StackDepotNode *node_; + StackDepotHandle() : node_(0) {} + explicit StackDepotHandle(StackDepotNode *node) : node_(node) {} + bool valid() { return node_; } + u32 id(); + int inc_use_count(); + uptr size(); + uptr *stack(); }; StackDepotStats *StackDepotGetStats(); - -struct StackDesc; +u32 StackDepotPut(const uptr *stack, uptr size); +StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size); +// Retrieves a stored stack trace by the id. +const uptr *StackDepotGet(u32 id, uptr *size); // Instantiating this class creates a snapshot of StackDepot which can be // efficiently queried with StackDepotGet(). You can use it concurrently with @@ -46,7 +52,7 @@ private: struct IdDescPair { u32 id; - StackDesc *desc; + StackDepotNode *desc; static bool IdComparator(const IdDescPair &a, const IdDescPair &b); }; @@ -57,6 +63,8 @@ StackDepotReverseMap(const StackDepotReverseMap&); void operator=(const StackDepotReverseMap&); }; + + } // namespace __sanitizer #endif // SANITIZER_STACKDEPOT_H Index: sanitizer_common/sanitizer_stackdepot.cc =================================================================== --- sanitizer_common/sanitizer_stackdepot.cc +++ sanitizer_common/sanitizer_stackdepot.cc @@ -13,192 +13,101 @@ #include "sanitizer_stackdepot.h" #include "sanitizer_common.h" -#include "sanitizer_internal_defs.h" -#include "sanitizer_mutex.h" -#include "sanitizer_atomic.h" namespace __sanitizer { -const int kTabSize = 1024 * 1024; // Hash table size. -const int kPartBits = 8; -const int kPartShift = sizeof(u32) * 8 - kPartBits - 1; -const int kPartCount = 1 << kPartBits; // Number of subparts in the table. -const int kPartSize = kTabSize / kPartCount; -const int kMaxId = 1 << kPartShift; +struct StackDepotDesc { + const uptr *stack; + uptr size; + u32 hash() const { + // murmur2 + const u32 m = 0x5bd1e995; + const u32 seed = 0x9747b28c; + const u32 r = 24; + u32 h = seed ^ (size * sizeof(uptr)); + for (uptr i = 0; i < size; i++) { + u32 k = stack[i]; + k *= m; + k ^= k >> r; + k *= m; + h *= m; + h ^= k; + } + h ^= h >> 13; + h *= m; + h ^= h >> 15; + return h; + } + bool is_valid() { return size > 0 && stack; } +}; -struct StackDesc { - StackDesc *link; +struct StackDepotNode { + StackDepotNode *link; u32 id; - u32 hash; + u32 hash_bits : 12; + u32 use_count : 20; + static const u32 MAX_USE_COUNT = 1 << 18; uptr size; uptr stack[1]; // [size] -}; - -static struct { - StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator. - atomic_uintptr_t region_pos; // Region allocator for StackDesc's. - atomic_uintptr_t region_end; - atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's. - atomic_uint32_t seq[kPartCount]; // Unique id generators. -} depot; -static StackDepotStats stats; - -StackDepotStats *StackDepotGetStats() { - return &stats; -} - -static u32 hash(const uptr *stack, uptr size) { - // murmur2 - const u32 m = 0x5bd1e995; - const u32 seed = 0x9747b28c; - const u32 r = 24; - u32 h = seed ^ (size * sizeof(uptr)); - for (uptr i = 0; i < size; i++) { - u32 k = stack[i]; - k *= m; - k ^= k >> r; - k *= m; - h *= m; - h ^= k; + typedef StackDepotDesc args_type; + bool eq(u32 hash, const args_type &args) const { + if ((hash >> 20) != hash_bits || args.size != size) return false; + uptr i = 0; + for (; i < size; i++) { + if (stack[i] != args.stack[i]) return false; + } + return true; } - h ^= h >> 13; - h *= m; - h ^= h >> 15; - return h; -} - -static StackDesc *tryallocDesc(uptr memsz) { - // Optimisic lock-free allocation, essentially try to bump the region ptr. - for (;;) { - uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire); - uptr end = atomic_load(&depot.region_end, memory_order_acquire); - if (cmp == 0 || cmp + memsz > end) - return 0; - if (atomic_compare_exchange_weak( - &depot.region_pos, &cmp, cmp + memsz, - memory_order_acquire)) - return (StackDesc*)cmp; + static uptr storage_size(const args_type &args) { + return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); } -} - -static StackDesc *allocDesc(uptr size) { - // First, try to allocate optimisitically. - uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr); - StackDesc *s = tryallocDesc(memsz); - if (s) - return s; - // If failed, lock, retry and alloc new superblock. - SpinMutexLock l(&depot.mtx); - for (;;) { - s = tryallocDesc(memsz); - if (s) - return s; - atomic_store(&depot.region_pos, 0, memory_order_relaxed); - uptr allocsz = 64 * 1024; - if (allocsz < memsz) - allocsz = memsz; - uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); - stats.mapped += allocsz; - atomic_store(&depot.region_end, mem + allocsz, memory_order_release); - atomic_store(&depot.region_pos, mem, memory_order_release); + void store(const args_type &args, u32 hash) { + hash_bits = hash >> 20; + size = args.size; + internal_memcpy(stack, args.stack, size * sizeof(uptr)); } -} - -static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) { - // Searches linked list s for the stack, returns its id. - for (; s; s = s->link) { - if (s->hash == hash && s->size == size) { - uptr i = 0; - for (; i < size; i++) { - if (stack[i] != s->stack[i]) - break; - } - if (i == size) - return s->id; - } + args_type load() const { + args_type ret = {&stack[0], size}; + return ret; } - return 0; -} + StackDepotHandle get_handle() { return StackDepotHandle(this); } -static StackDesc *lock(atomic_uintptr_t *p) { - // Uses the pointer lsb as mutex. - for (int i = 0;; i++) { - uptr cmp = atomic_load(p, memory_order_relaxed); - if ((cmp & 1) == 0 - && atomic_compare_exchange_weak(p, &cmp, cmp | 1, - memory_order_acquire)) - return (StackDesc*)cmp; - if (i < 10) - proc_yield(10); - else - internal_sched_yield(); - } -} + typedef StackDepotHandle handle_type; +}; -static void unlock(atomic_uintptr_t *p, StackDesc *s) { - DCHECK_EQ((uptr)s & 1, 0); - atomic_store(p, (uptr)s, memory_order_release); +u32 StackDepotHandle::id() { return node_->id; } +int StackDepotHandle::inc_use_count() { + if (node_->use_count >= StackDepotNode::MAX_USE_COUNT) node_->use_count++; + return node_->use_count; +} +uptr StackDepotHandle::size() { return node_->size; } +uptr *StackDepotHandle::stack() { return &node_->stack[0]; } + +// FIXME: ASan and TSan can work with 0 reserved bits. +// MSan requires 2 reserved bits with chained origins, 1 with normal origins. +typedef StackDepotBase StackDepot; +static StackDepot theDepot; + +StackDepotStats *StackDepotGetStats() { + return theDepot.GetStats(); } u32 StackDepotPut(const uptr *stack, uptr size) { - if (stack == 0 || size == 0) - return 0; - uptr h = hash(stack, size); - atomic_uintptr_t *p = &depot.tab[h % kTabSize]; - uptr v = atomic_load(p, memory_order_consume); - StackDesc *s = (StackDesc*)(v & ~1); - // First, try to find the existing stack. - u32 id = find(s, stack, size, h); - if (id) - return id; - // If failed, lock, retry and insert new. - StackDesc *s2 = lock(p); - if (s2 != s) { - id = find(s2, stack, size, h); - if (id) { - unlock(p, s2); - return id; - } - } - uptr part = (h % kTabSize) / kPartSize; - id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1; - stats.n_uniq_ids++; - CHECK_LT(id, kMaxId); - id |= part << kPartShift; - CHECK_NE(id, 0); - CHECK_EQ(id & (1u << 31), 0); - s = allocDesc(size); - s->id = id; - s->hash = h; - s->size = size; - internal_memcpy(s->stack, stack, size * sizeof(uptr)); - s->link = s2; - unlock(p, s); - return id; + StackDepotDesc desc = {stack, size}; + StackDepotHandle h = theDepot.Put(desc); + return h.valid() ? h.id() : 0; +} + +StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) { + StackDepotDesc desc = {stack, size}; + return theDepot.Put(desc); } const uptr *StackDepotGet(u32 id, uptr *size) { - if (id == 0) - return 0; - CHECK_EQ(id & (1u << 31), 0); - // High kPartBits contain part id, so we need to scan at most kPartSize lists. - uptr part = id >> kPartShift; - for (int i = 0; i != kPartSize; i++) { - uptr idx = part * kPartSize + i; - CHECK_LT(idx, kTabSize); - atomic_uintptr_t *p = &depot.tab[idx]; - uptr v = atomic_load(p, memory_order_consume); - StackDesc *s = (StackDesc*)(v & ~1); - for (; s; s = s->link) { - if (s->id == id) { - *size = s->size; - return s->stack; - } - } - } - *size = 0; - return 0; + StackDepotDesc desc = theDepot.Get(id); + *size = desc.size; + return desc.stack; } bool StackDepotReverseMap::IdDescPair::IdComparator( @@ -209,10 +118,10 @@ StackDepotReverseMap::StackDepotReverseMap() : map_(StackDepotGetStats()->n_uniq_ids + 100) { - for (int idx = 0; idx < kTabSize; idx++) { - atomic_uintptr_t *p = &depot.tab[idx]; + for (int idx = 0; idx < StackDepot::kTabSize; idx++) { + atomic_uintptr_t *p = &theDepot.tab[idx]; uptr v = atomic_load(p, memory_order_consume); - StackDesc *s = (StackDesc*)(v & ~1); + StackDepotNode *s = (StackDepotNode*)(v & ~1); for (; s; s = s->link) { IdDescPair pair = {s->id, s}; map_.push_back(pair); @@ -230,7 +139,7 @@ *size = 0; return 0; } - StackDesc *desc = map_[idx].desc; + StackDepotNode *desc = map_[idx].desc; *size = desc->size; return desc->stack; } Index: sanitizer_common/sanitizer_stackdepotbase.h =================================================================== --- sanitizer_common/sanitizer_stackdepotbase.h +++ sanitizer_common/sanitizer_stackdepotbase.h @@ -0,0 +1,155 @@ +//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementation of a value-indexed hash map. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_STACKDEPOTBASE_H +#define SANITIZER_STACKDEPOTBASE_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_mutex.h" +#include "sanitizer_atomic.h" +#include "sanitizer_region_allocator.h" + +namespace __sanitizer { + +struct StackDepotStats { + uptr n_uniq_ids; + uptr allocated; +}; + +template +class StackDepotBase { + public: + typedef typename Node::args_type args_type; + typedef typename Node::handle_type handle_type; + // Maps stack trace to an unique id. + handle_type Put(args_type args); + // Retrieves a stored stack trace by the id. + args_type Get(u32 id); + + StackDepotStats *GetStats() { return &stats; } + + private: + static Node *find(Node *s, args_type args, u32 hash); + static Node *lock(atomic_uintptr_t *p); + static void unlock(atomic_uintptr_t *p, Node *s); + + static const int kTabSize = 1024 * 1024; // Hash table size. + static const int kPartBits = 8; + static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits; + static const int kPartCount = + 1 << kPartBits; // Number of subparts in the table. + static const int kPartSize = kTabSize / kPartCount; + static const int kMaxId = 1 << kPartShift; + + atomic_uintptr_t tab[kTabSize]; // Hash table of Node's. + atomic_uint32_t seq[kPartCount]; // Unique id generators. + + StackDepotStats stats; + + friend class StackDepotReverseMap; +}; + +template +Node *StackDepotBase::find(Node *s, args_type args, + u32 hash) { + // Searches linked list s for the stack, returns its id. + for (; s; s = s->link) { + if (s->eq(hash, args)) { + return s; + } + } + return 0; +} + +template +Node *StackDepotBase::lock(atomic_uintptr_t *p) { + // Uses the pointer lsb as mutex. + for (int i = 0;; i++) { + uptr cmp = atomic_load(p, memory_order_relaxed); + if ((cmp & 1) == 0 && + atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire)) + return (Node *)cmp; + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + } +} + +template +void StackDepotBase::unlock(atomic_uintptr_t *p, Node *s) { + DCHECK_EQ((uptr)s & 1, 0); + atomic_store(p, (uptr)s, memory_order_release); +} + +template +typename StackDepotBase::handle_type +StackDepotBase::Put(args_type args) { + if (!args.is_valid()) return handle_type(); + uptr h = args.hash(); + atomic_uintptr_t *p = &tab[h % kTabSize]; + uptr v = atomic_load(p, memory_order_consume); + Node *s = (Node *)(v & ~1); + // First, try to find the existing stack. + Node *node = find(s, args, h); + if (node) return node->get_handle(); + // If failed, lock, retry and insert new. + Node *s2 = lock(p); + if (s2 != s) { + node = find(s2, args, h); + if (node) { + unlock(p, s2); + return node->get_handle(); + } + } + uptr part = (h % kTabSize) / kPartSize; + u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1; + stats.n_uniq_ids++; + CHECK_LT(id, kMaxId); + id |= part << kPartShift; + CHECK_NE(id, 0); + CHECK_EQ(id & (1u << 31), 0); + uptr memsz = Node::storage_size(args); + s = (Node *)RegionAlloc(memsz); + stats.allocated += memsz; + s->id = id; + s->store(args, h); + s->link = s2; + unlock(p, s); + return s->get_handle(); +} + +template +typename StackDepotBase::args_type +StackDepotBase::Get(u32 id) { + if (id == 0) { + return args_type(); + } + CHECK_EQ(id & (1u << 31), 0); + // High kPartBits contain part id, so we need to scan at most kPartSize lists. + uptr part = id >> kPartShift; + for (int i = 0; i != kPartSize; i++) { + uptr idx = part * kPartSize + i; + CHECK_LT(idx, kTabSize); + atomic_uintptr_t *p = &tab[idx]; + uptr v = atomic_load(p, memory_order_consume); + Node *s = (Node *)(v & ~1); + for (; s; s = s->link) { + if (s->id == id) { + return s->load(); + } + } + } + return args_type(); +} + +} // namespace __sanitizer +#endif // SANITIZER_STACKDEPOTBASE_H