diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h --- a/compiler-rt/lib/hwasan/hwasan_allocator.h +++ b/compiler-rt/lib/hwasan/hwasan_allocator.h @@ -17,6 +17,7 @@ #include "hwasan_interface_internal.h" #include "hwasan_mapping.h" #include "hwasan_poisoning.h" +#include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -31,14 +32,26 @@ namespace __hwasan { struct Metadata { + private: + atomic_uint64_t alloc_context_id; u32 requested_size_low; - u32 requested_size_high : 31; - u32 right_aligned : 1; - u32 alloc_context_id; - u64 get_requested_size() { + u16 requested_size_high; + + public: + u8 right_aligned; + u8 lsan_tag; + + void SetAllocContext(u32 stack); + void GetAllocContext(u32 &tid, u32 &stack) const; + + bool IsAllocated() const; + u32 GetAllocStackId() const; + + u64 get_requested_size() const { return (static_cast(requested_size_high) << 32) + requested_size_low; } void set_requested_size(u64 size) { + static_assert(sizeof(Metadata) == 16); requested_size_low = size & ((1ul << 32) - 1); requested_size_high = size >> 32; } @@ -89,6 +102,7 @@ u32 GetAllocStackId() const; bool FromSmallHeap() const; private: + friend class __lsan::LsanMetadata; uptr block_; Metadata *const metadata_; }; diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp --- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp +++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp @@ -21,6 +21,7 @@ #include "hwasan_malloc_bisect.h" #include "hwasan_thread.h" #include "hwasan_report.h" +#include "lsan/lsan_common.h" namespace __hwasan { @@ -42,8 +43,7 @@ static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1]; bool HwasanChunkView::IsAllocated() const { - return metadata_ && metadata_->alloc_context_id && - metadata_->get_requested_size(); + return metadata_ && metadata_->IsAllocated(); } // Aligns the 'addr' right to the granule boundary. @@ -65,7 +65,7 @@ return metadata_->get_requested_size(); } u32 HwasanChunkView::GetAllocStackId() const { - return metadata_->alloc_context_id; + return metadata_->GetAllocStackId(); } uptr HwasanChunkView::ActualSize() const { @@ -76,6 +76,33 @@ return allocator.FromPrimary(reinterpret_cast(block_)); } +void Metadata::SetAllocContext(u32 stack) { + Thread *t = GetCurrentThread(); + u64 context = t ? t->unique_id() : kMainTid; + context <<= 32; + context += stack; + __sanitizer::atomic_store(&alloc_context_id, context, memory_order_relaxed); +} + +void Metadata::GetAllocContext(u32 &tid, u32 &stack) const { + u64 context = + __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed); + stack = context; + context >>= 32; + tid = context; +} + +bool Metadata::IsAllocated() const { + return __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed) && + get_requested_size(); +} + +u32 Metadata::GetAllocStackId() const { + return __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed); +} + +static const uptr kChunkHeaderSize = sizeof(HwasanChunkView); + void GetAllocatorStats(AllocatorStatCounters s) { allocator.GetStats(s); } @@ -158,7 +185,7 @@ Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->set_requested_size(orig_size); - meta->alloc_context_id = StackDepotPut(*stack); + meta->SetAllocContext(StackDepotPut(*stack)); meta->right_aligned = false; if (zeroise) { internal_memset(allocated, 0, size); @@ -246,7 +273,9 @@ } uptr orig_size = meta->get_requested_size(); u32 free_context_id = StackDepotPut(*stack); - u32 alloc_context_id = meta->alloc_context_id; + u32 tid; + u32 stack_id; + meta->GetAllocContext(tid, stack_id); // Check tail magic. uptr tagged_size = TaggedSize(orig_size); @@ -266,7 +295,7 @@ } meta->set_requested_size(0); - meta->alloc_context_id = 0; + meta->SetAllocContext(0); // This memory will not be reused by anyone else, so we are free to keep it // poisoned. Thread *t = GetCurrentThread(); @@ -298,7 +327,7 @@ if (t) { allocator.Deallocate(t->allocator_cache(), aligned_ptr); if (auto *ha = t->heap_allocations()) - ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, + ha->push({reinterpret_cast(tagged_ptr), stack_id, free_context_id, static_cast(orig_size)}); } else { SpinMutexLock l(&fallback_mutex); @@ -451,6 +480,52 @@ } // namespace __hwasan +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = chunk ? reinterpret_cast(chunk - __hwasan::kChunkHeaderSize) + : nullptr; + metadata_ = nullptr; +} + +bool LsanMetadata::allocated() const { + if (!metadata_) + return false; + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_); + // TODO(kstoimenov): is this thread safe? + return m->IsAllocated(); +} + +ChunkTag LsanMetadata::tag() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + return static_cast(m->metadata_->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + m->metadata_->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + return m->UsedSize(); +} + +u32 LsanMetadata::stack_trace_id() const { + __hwasan::HwasanChunkView *m = reinterpret_cast<__hwasan::HwasanChunkView *>(metadata_);; + u32 tid = 0; + u32 stack = 0; + m->metadata_->GetAllocContext(tid, stack); + return stack; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __hwasan::allocator.ForEachChunk(callback, arg); +} + +} // namespace __lsan + using namespace __hwasan; void __hwasan_enable_allocator_tagging() {