diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.h b/compiler-rt/lib/hwasan/hwasan_allocator.h --- a/compiler-rt/lib/hwasan/hwasan_allocator.h +++ b/compiler-rt/lib/hwasan/hwasan_allocator.h @@ -17,6 +17,7 @@ #include "hwasan_interface_internal.h" #include "hwasan_mapping.h" #include "hwasan_poisoning.h" +#include "lsan/lsan_common.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -31,17 +32,27 @@ namespace __hwasan { struct Metadata { + private: + atomic_uint64_t alloc_context_id; u32 requested_size_low; - u32 requested_size_high; - u32 alloc_context_id; - u64 GetRequestedSize() { - return (static_cast(requested_size_high) << 32) + requested_size_low; - } - void SetRequestedSize(u64 size) { - requested_size_low = size & ((1ul << 32) - 1); - requested_size_high = size >> 32; - } + u16 requested_size_high; + atomic_uint8_t chunk_state; + u8 lsan_tag; + + public: + inline void SetAllocContext(u32 stack); + inline void GetAllocContext(u32 &tid, u32 &stack) const; + + inline void SetLsanTag(__lsan::ChunkTag tag); + inline __lsan::ChunkTag GetLsanTag() const; + + inline bool IsAllocated() const; + inline u32 GetAllocStackId() const; + + inline u64 GetRequestedSize() const; + inline void SetRequestedSize(u64 size); }; +static_assert(sizeof(Metadata) == 16); struct HwasanMapUnmapCallback { void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); } @@ -88,6 +99,7 @@ u32 GetAllocStackId() const; bool FromSmallHeap() const; private: + friend class __lsan::LsanMetadata; uptr block_; Metadata *const metadata_; }; diff --git a/compiler-rt/lib/hwasan/hwasan_allocator.cpp b/compiler-rt/lib/hwasan/hwasan_allocator.cpp --- a/compiler-rt/lib/hwasan/hwasan_allocator.cpp +++ b/compiler-rt/lib/hwasan/hwasan_allocator.cpp @@ -21,6 +21,7 @@ #include "hwasan_malloc_bisect.h" #include "hwasan_thread.h" #include "hwasan_report.h" +#include "lsan/lsan_common.h" namespace __hwasan { @@ -32,18 +33,21 @@ static constexpr tag_t kFallbackAllocTag = 0xBB & kTagMask; static constexpr tag_t kFallbackFreeTag = 0xBC; -enum RightAlignMode { - kRightAlignNever, - kRightAlignSometimes, - kRightAlignAlways +enum { + // Either just allocated by underlying allocator, but AsanChunk is not yet + // ready, or almost returned to undelying allocator and AsanChunk is already + // meaningless. + CHUNK_INVALID = 0, + // The chunk is allocated and not yet freed. + CHUNK_ALLOCATED = 1, }; + // Initialized in HwasanAllocatorInit, an never changed. static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1]; bool HwasanChunkView::IsAllocated() const { - return metadata_ && metadata_->alloc_context_id && - metadata_->GetRequestedSize(); + return metadata_ && metadata_->IsAllocated(); } uptr HwasanChunkView::Beg() const { @@ -56,7 +60,7 @@ return metadata_->GetRequestedSize(); } u32 HwasanChunkView::GetAllocStackId() const { - return metadata_->alloc_context_id; + return metadata_->GetAllocStackId(); } uptr HwasanChunkView::ActualSize() const { @@ -67,6 +71,52 @@ return allocator.FromPrimary(reinterpret_cast(block_)); } +inline void Metadata::SetAllocContext(u32 stack) { + Thread *t = GetCurrentThread(); + u64 context = t ? t->unique_id() : kMainTid; + context <<= 32; + context += stack; + atomic_store(&chunk_state, stack == 0 ? CHUNK_INVALID : CHUNK_ALLOCATED, + memory_order_release); + __sanitizer::atomic_store(&alloc_context_id, context, memory_order_relaxed); +} + +inline void Metadata::GetAllocContext(u32 &tid, u32 &stack) const { + u64 context = + __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed); + stack = context; + context >>= 32; + tid = context; +} + +inline void Metadata::SetLsanTag(__lsan::ChunkTag tag) { + lsan_tag = tag; +} + +inline __lsan::ChunkTag Metadata::GetLsanTag() const { + return static_cast<__lsan::ChunkTag>(lsan_tag); +} + +inline bool Metadata::IsAllocated() const { + return atomic_load(&chunk_state, memory_order_relaxed) == CHUNK_ALLOCATED && + GetRequestedSize(); +} + +inline u32 Metadata::GetAllocStackId() const { + return __sanitizer::atomic_load(&alloc_context_id, memory_order_relaxed); +} + +inline u64 Metadata::GetRequestedSize() const { + return (static_cast(requested_size_high) << 32) + requested_size_low; +} + +inline void Metadata::SetRequestedSize(u64 size) { + requested_size_low = size & ((1ul << 32) - 1); + requested_size_high = size >> 32; +} + +static const uptr kChunkHeaderSize = sizeof(HwasanChunkView); + void GetAllocatorStats(AllocatorStatCounters s) { allocator.GetStats(s); } @@ -149,7 +199,7 @@ Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->SetRequestedSize(orig_size); - meta->alloc_context_id = StackDepotPut(*stack); + meta->SetAllocContext(StackDepotPut(*stack)); if (zeroise) { internal_memset(allocated, 0, size); } else if (flags()->max_malloc_fill_size > 0) { @@ -236,7 +286,9 @@ } uptr orig_size = meta->GetRequestedSize(); u32 free_context_id = StackDepotPut(*stack); - u32 alloc_context_id = meta->alloc_context_id; + u32 tid; + u32 stack_id; + meta->GetAllocContext(tid, stack_id); // Check tail magic. uptr tagged_size = TaggedSize(orig_size); @@ -256,7 +308,7 @@ } meta->SetRequestedSize(0); - meta->alloc_context_id = 0; + meta->SetAllocContext(0); // This memory will not be reused by anyone else, so we are free to keep it // poisoned. Thread *t = GetCurrentThread(); @@ -288,7 +340,7 @@ if (t) { allocator.Deallocate(t->allocator_cache(), aligned_ptr); if (auto *ha = t->heap_allocations()) - ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, + ha->push({reinterpret_cast(tagged_ptr), stack_id, free_context_id, static_cast(orig_size)}); } else { SpinMutexLock l(&fallback_mutex); @@ -435,6 +487,47 @@ } // namespace __hwasan +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = chunk ? reinterpret_cast<__hwasan::Metadata *>( + chunk - __hwasan::kChunkHeaderSize) + : nullptr; +} + +bool LsanMetadata::allocated() const { + if (!metadata_) + return false; + __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_); + return m->IsAllocated(); +} + +ChunkTag LsanMetadata::tag() const { + __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);; + return m->GetLsanTag(); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);; + m->SetLsanTag(value); +} + +uptr LsanMetadata::requested_size() const { + __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);; + return m->GetRequestedSize(); +} + +u32 LsanMetadata::stack_trace_id() const { + __hwasan::Metadata *m = reinterpret_cast<__hwasan::Metadata *>(metadata_);; + u32 tid; + u32 stack; + m->GetAllocContext(tid, stack); + return stack; +} + +} // namespace __lsan + using namespace __hwasan; void __hwasan_enable_allocator_tagging() {