diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h --- a/compiler-rt/lib/hwasan/hwasan_allocator.h +++ b/compiler-rt/lib/hwasan/hwasan_allocator.h @@ -17,6 +17,7 @@ #include "hwasan_interface_internal.h" #include "hwasan_mapping.h" #include "hwasan_poisoning.h" +#include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -34,7 +35,12 @@ u32 requested_size_low; u32 requested_size_high : 31; u32 right_aligned : 1; - u32 alloc_context_id; + atomic_uint64_t alloc_context_id; + u8 lsan_tag : 2; + + void SetAllocContext(u32 stack); + void GetAllocContext(u32 &tid, u32 &stack) const; + u64 get_requested_size() { return (static_cast(requested_size_high) << 32) + requested_size_low; } @@ -89,6 +95,7 @@ u32 GetAllocStackId() const; bool FromSmallHeap() const; private: + friend class __lsan::LsanMetadata; uptr block_; Metadata *const metadata_; }; @@ -109,6 +116,8 @@ typedef RingBuffer HeapAllocationsRingBuffer; void GetAllocatorStats(AllocatorStatCounters s); +void AllocatorThreadFinish(); +void InitializeAllocator(); inline bool InTaggableRegion(uptr addr) { #if defined(HWASAN_ALIASING_MODE) diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp --- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp +++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp @@ -21,6 +21,7 @@ #include "hwasan_malloc_bisect.h" #include "hwasan_thread.h" #include "hwasan_report.h" +#include "lsan/lsan_common.h" namespace __hwasan { @@ -42,7 +43,9 @@ static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1]; bool HwasanChunkView::IsAllocated() const { - return metadata_ && metadata_->alloc_context_id && + return metadata_ && + __sanitizer::atomic_load(&metadata_->alloc_context_id, + memory_order_relaxed) && metadata_->get_requested_size(); } @@ -65,7 +68,8 @@ return metadata_->get_requested_size(); } u32 HwasanChunkView::GetAllocStackId() const { - return metadata_->alloc_context_id; + return __sanitizer::atomic_load(&metadata_->alloc_context_id, + memory_order_relaxed); } uptr HwasanChunkView::ActualSize() const { @@ -76,6 +80,24 @@ return allocator.FromPrimary(reinterpret_cast(block_)); } +void Metadata::SetAllocContext(u32 stack) { + Thread *t = GetCurrentThread(); + u64 context = t ? t->unique_id() : kMainTid; + context <<= 32; + context += stack; + __sanitizer::atomic_store(&alloc_context_id, context, memory_order_relaxed); +} + +void Metadata::GetAllocContext(u32 &tid, u32 &stack) const { + u64 context = + __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed); + stack = context; + context >>= 32; + tid = context; +} + +static const uptr kChunkHeaderSize = sizeof(HwasanChunkView); + void GetAllocatorStats(AllocatorStatCounters s) { allocator.GetStats(s); } @@ -158,7 +180,7 @@ Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->set_requested_size(orig_size); - meta->alloc_context_id = StackDepotPut(*stack); + meta->SetAllocContext(StackDepotPut(*stack)); meta->right_aligned = false; if (zeroise) { internal_memset(allocated, 0, size); @@ -246,7 +268,9 @@ } uptr orig_size = meta->get_requested_size(); u32 free_context_id = StackDepotPut(*stack); - u32 alloc_context_id = meta->alloc_context_id; + u32 tid; + u32 stack_id; + meta->GetAllocContext(tid, stack_id); // Check tail magic. uptr tagged_size = TaggedSize(orig_size); @@ -266,7 +290,7 @@ } meta->set_requested_size(0); - meta->alloc_context_id = 0; + meta->SetAllocContext(0); // This memory will not be reused by anyone else, so we are free to keep it // poisoned. Thread *t = GetCurrentThread(); @@ -298,7 +322,7 @@ if (t) { allocator.Deallocate(t->allocator_cache(), aligned_ptr); if (auto *ha = t->heap_allocations()) - ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, + ha->push({reinterpret_cast(tagged_ptr), stack_id, free_context_id, static_cast(orig_size)}); } else { SpinMutexLock l(&fallback_mutex); @@ -451,6 +475,52 @@ } // namespace __hwasan +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = chunk ? reinterpret_cast(chunk - __hwasan::kChunkHeaderSize) + : nullptr; + metadata_ = nullptr; +} + +bool LsanMetadata::allocated() const { + if (!metadata_) + return false; + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_); + // TODO(kstoimenov): is this thread safe? + return m->IsAllocated(); +} + +ChunkTag LsanMetadata::tag() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + return static_cast(m->metadata_->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + m->metadata_->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + return m->UsedSize(); +} + +u32 LsanMetadata::stack_trace_id() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + u32 tid = 0; + u32 stack = 0; + m->metadata_->GetAllocContext(tid, stack); + return stack; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __hwasan::allocator.ForEachChunk(callback, arg); +} + +} // namespace __lsan + using namespace __hwasan; void __hwasan_enable_allocator_tagging() {