diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -10,6 +10,7 @@ #define SANITIZER_STACK_STORE_H #include "sanitizer_atomic.h" +#include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_mutex.h" #include "sanitizer_stacktrace.h" @@ -17,6 +18,10 @@ namespace __sanitizer { class StackStore { + static constexpr uptr kBlockSizeFrames = 0x100000; + static constexpr uptr kBlockCount = 0x1000; + static constexpr uptr kBlockSizeBytes = kBlockSizeFrames * sizeof(uptr); + public: constexpr StackStore() = default; @@ -29,20 +34,32 @@ void TestOnlyUnmap(); private: - uptr *Alloc(uptr count = 1); - uptr *TryAlloc(uptr count); - uptr *RefillAndAlloc(uptr count); - mutable StaticSpinMutex mtx_ = {}; // Protects alloc of new blocks. - atomic_uintptr_t region_pos_ = {}; // Region allocator for Node's. - atomic_uintptr_t region_end_ = {}; - atomic_uintptr_t mapped_size_ = {}; - - struct BlockInfo { - const BlockInfo *next; - uptr ptr; - uptr size; + static constexpr uptr GetBlockIdx(uptr frame_idx) { + return frame_idx / kBlockSizeFrames; + } + + static constexpr uptr GetInBlockIdx(uptr frame_idx) { + return frame_idx % kBlockSizeFrames; + } + + uptr *Alloc(uptr count); + + // Total number of allocated frames. + atomic_uintptr_t total_frames_ = {}; + + // Each block will hold pointer to exactly kBlockSizeFrames. + class BlockInfo { + atomic_uintptr_t data_; + StaticSpinMutex mtx_; // Protects alloc of new blocks. + + uptr *Create(); + uptr *Get() const; + + public: + uptr *GetOrCreate(); + void TestOnlyUnmap(); }; - const BlockInfo *curr_ = nullptr; + BlockInfo blocks_[kBlockCount] = {}; }; } // namespace __sanitizer diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp @@ -52,64 +52,60 @@ } uptr StackStore::Allocated() const { - return atomic_load_relaxed(&mapped_size_); + return RoundUpTo(atomic_load_relaxed(&total_frames_) * sizeof(uptr), + GetPageSizeCached()) + + sizeof(*this); } -uptr *StackStore::TryAlloc(uptr count) { - // Optimisic lock-free allocation, essentially try to bump the region ptr. +uptr *StackStore::Alloc(uptr count) { for (;;) { - uptr cmp = atomic_load(®ion_pos_, memory_order_acquire); - uptr end = atomic_load(®ion_end_, memory_order_acquire); - uptr size = count * sizeof(uptr); - if (cmp == 0 || cmp + size > end) - return nullptr; - if (atomic_compare_exchange_weak(®ion_pos_, &cmp, cmp + size, - memory_order_acquire)) - return reinterpret_cast(cmp); + // Optimisic lock-free allocation, essentially try to bump the + // total_frames_. + uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed); + uptr block_idx = GetBlockIdx(start); + if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) { + // Fits into the a single block. + CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start); + } + + // Retry. We can't use range allocated in two different blocks. } } -uptr *StackStore::Alloc(uptr count) { - // First, try to allocate optimisitically. - uptr *s = TryAlloc(count); - if (LIKELY(s)) - return s; - return RefillAndAlloc(count); +void StackStore::TestOnlyUnmap() { + for (BlockInfo &b : blocks_) b.TestOnlyUnmap(); + internal_memset(this, 0, sizeof(*this)); +} + +uptr *StackStore::BlockInfo::Get() const { + // Idiomatic double-checked locking uses memory_order_acquire here. But + // relaxed is find for us, justification is similar to + // TwoLevelMap::GetOrCreate. + return reinterpret_cast(atomic_load_relaxed(&data_)); } -uptr *StackStore::RefillAndAlloc(uptr count) { - // If failed, lock, retry and alloc new superblock. +uptr *StackStore::BlockInfo::Create() { SpinMutexLock l(&mtx_); - for (;;) { - uptr *s = TryAlloc(count); - if (s) - return s; - atomic_store(®ion_pos_, 0, memory_order_relaxed); - uptr size = count * sizeof(uptr) + sizeof(BlockInfo); - uptr allocsz = RoundUpTo(Max(size, 64u * 1024u), GetPageSizeCached()); - uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); - BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1; - new_block->next = curr_; - new_block->ptr = mem; - new_block->size = allocsz; - curr_ = new_block; - - atomic_fetch_add(&mapped_size_, allocsz, memory_order_relaxed); - - allocsz -= sizeof(BlockInfo); - atomic_store(®ion_end_, mem + allocsz, memory_order_release); - atomic_store(®ion_pos_, mem, memory_order_release); + uptr *ptr = Get(); + if (!ptr) { + ptr = reinterpret_cast( + MmapNoReserveOrDie(kBlockSizeBytes, "StackStore")); + atomic_store(&data_, reinterpret_cast(ptr), memory_order_release); } + return ptr; } -void StackStore::TestOnlyUnmap() { - while (curr_) { - uptr mem = curr_->ptr; - uptr allocsz = curr_->size; - curr_ = curr_->next; - UnmapOrDie((void *)mem, allocsz); - } - internal_memset(this, 0, sizeof(*this)); +uptr *StackStore::BlockInfo::GetOrCreate() { + uptr *ptr = Get(); + if (LIKELY(ptr)) + return ptr; + return Create(); +} + +void StackStore::BlockInfo::TestOnlyUnmap() { + if (uptr *ptr = Get()) + UnmapOrDie(ptr, StackStore::kBlockSizeBytes); } } // namespace __sanitizer diff --git a/compiler-rt/test/hwasan/TestCases/Linux/decorate-proc-maps.c b/compiler-rt/test/hwasan/TestCases/Linux/decorate-proc-maps.c --- a/compiler-rt/test/hwasan/TestCases/Linux/decorate-proc-maps.c +++ b/compiler-rt/test/hwasan/TestCases/Linux/decorate-proc-maps.c @@ -10,7 +10,7 @@ // B-DAG: rw-p {{.*}}SizeClassAllocator: region info] // B-DAG: rw-p {{.*}}LargeMmapAllocator] -// B-DAG: rw-p {{.*}}stack depot] +// B-DAG: rw-p {{.*}}StackStore] #include #include