Index: lib/hwasan/hwasan_allocator.h =================================================================== --- lib/hwasan/hwasan_allocator.h +++ lib/hwasan/hwasan_allocator.h @@ -15,6 +15,7 @@ #define HWASAN_ALLOCATOR_H #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_ring_buffer.h" namespace __hwasan { @@ -49,6 +50,17 @@ HwasanChunkView FindHeapChunkByAddress(uptr address); +// Information about one (de)allocation that happened in the past. +// These are recorded in a thread-local ring buffer. +struct HeapAllocationRecord { + uptr tagged_addr; + u32 free_context_id; + u32 requested_size; +}; + +typedef RingBuffer HeapAllocationsRingBuffer; + + } // namespace __hwasan #endif // HWASAN_ALLOCATOR_H Index: lib/hwasan/hwasan_allocator.cc =================================================================== --- lib/hwasan/hwasan_allocator.cc +++ lib/hwasan/hwasan_allocator.cc @@ -36,7 +36,7 @@ struct Metadata { u64 state : 2; - u64 requested_size : 62; + u32 requested_size; // Current use cases of hwasan do not expect sizes > 4G. u32 alloc_context_id; u32 free_context_id; }; @@ -155,7 +155,7 @@ Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->state = CHUNK_ALLOCATED; - meta->requested_size = size; + meta->requested_size = static_cast(size); meta->alloc_context_id = StackDepotPut(*stack); if (zeroise) { internal_memset(allocated, 0, size); @@ -194,7 +194,8 @@ uptr size = meta->requested_size; meta->state = CHUNK_FREE; meta->requested_size = 0; - meta->free_context_id = StackDepotPut(*stack); + u32 free_context_id = StackDepotPut(*stack); + meta->free_context_id = free_context_id; // This memory will not be reused by anyone else, so we are free to keep it // poisoned. HwasanThread *t = GetCurrentThread(); @@ -209,6 +210,9 @@ if (t) { AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); allocator.Deallocate(cache, p); + if (auto *ha = t->heap_allocations()) + ha->push({reinterpret_cast(user_ptr), free_context_id, + static_cast(size)}); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; @@ -252,6 +256,7 @@ internal_memcpy(new_p, old_p, memcpy_size); HwasanDeallocate(stack, old_p); } + // FIXME: update t->heap_allocations or simplify HwasanReallocate. return new_p; } Index: lib/hwasan/hwasan_flags.inc =================================================================== --- lib/hwasan/hwasan_flags.inc +++ lib/hwasan/hwasan_flags.inc @@ -44,3 +44,7 @@ "Value used to fill the newly allocated memory.") HWASAN_FLAG(int, free_fill_byte, 0x55, "Value used to fill deallocated memory.") +HWASAN_FLAG(int, heap_history_size, 1023, + "The number of heap (de)allocations remembered per thread. " + "Affects the quality of heap-related reports, but not the ability " + "to find bugs.") Index: lib/hwasan/hwasan_report.cc =================================================================== --- lib/hwasan/hwasan_report.cc +++ lib/hwasan/hwasan_report.cc @@ -15,6 +15,7 @@ #include "hwasan.h" #include "hwasan_allocator.h" #include "hwasan_mapping.h" +#include "hwasan_thread.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" @@ -61,6 +62,21 @@ } }; +bool FindHeapAllocation(HeapAllocationsRingBuffer *rb, + uptr tagged_addr, + HeapAllocationRecord *har) { + if (!rb) return false; + for (uptr i = 0, size = rb->size(); i < size; i++) { + auto h = (*rb)[i]; + if (h.tagged_addr <= tagged_addr && + h.tagged_addr + h.requested_size > tagged_addr) { + *har = h; + return true; + } + } + return false; +} + bool GetHeapAddressInformation(uptr addr, uptr access_size, HeapAddressDescription *description) { HwasanChunkView chunk = FindHeapChunkByAddress(addr); @@ -182,6 +198,18 @@ PrintAddressDescription(address, access_size); + // Temporary functionality; to be folded into PrintAddressDescription. + // TODOs: + // * implement ThreadRegistry + // * check all threads, not just the current one. + // * remove reduntant fields from the allocator metadata + // * use the allocations found in the ring buffer for the main report. + HeapAllocationRecord har; + HwasanThread *t = GetCurrentThread(); + if (t && FindHeapAllocation(t->heap_allocations(), addr, &har)) + Printf("Address found in the ring buffer: %p %u %u\n", har.tagged_addr, + har.free_context_id, har.requested_size); + PrintTagsAroundAddr(tag_ptr); ReportErrorSummary(bug_type, stack); Index: lib/hwasan/hwasan_thread.h =================================================================== --- lib/hwasan/hwasan_thread.h +++ lib/hwasan/hwasan_thread.h @@ -50,6 +50,9 @@ void LeaveInterceptorScope() { in_interceptor_scope_--; } HwasanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } + HeapAllocationsRingBuffer *heap_allocations() { + return heap_allocations_; + } tag_t GenerateRandomTag(); @@ -75,6 +78,7 @@ u32 random_buffer_; HwasanThreadLocalMallocStorage malloc_storage_; + HeapAllocationsRingBuffer *heap_allocations_; }; HwasanThread *GetCurrentThread(); Index: lib/hwasan/hwasan_thread.cc =================================================================== --- lib/hwasan/hwasan_thread.cc +++ lib/hwasan/hwasan_thread.cc @@ -32,7 +32,8 @@ thread->arg_ = arg; thread->destructor_iterations_ = GetPthreadDestructorIterations(); thread->random_state_ = flags()->random_tags ? RandomSeed() : 0; - + if (auto sz = flags()->heap_history_size) + thread->heap_allocations_ = RingBuffer::New(sz); return thread; } @@ -76,6 +77,8 @@ malloc_storage().CommitBack(); ClearShadowForThreadStackAndTLS(); uptr size = RoundUpTo(sizeof(HwasanThread), GetPageSizeCached()); + if (heap_allocations_) + heap_allocations_->Delete(); UnmapOrDie(this, size); DTLS_Destroy(); }