diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -28,7 +28,7 @@ FROM_NEW_BR = 3 // Memory block came from operator new [ ] }; -struct AsanChunk; +class AsanChunk; struct AllocatorOptions { u32 quarantine_size_mb; diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -68,43 +68,57 @@ } // The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R +// M L L L L L H H U U U U U U R R +// M -- Magic ChunkHeader which points to real one. // L -- left redzone words (0 or more bytes) -// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// H -- ChunkHeader, which is also a part of the left redzone. // U -- user memory. // R -- right redzone (0 or more bytes) // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L L H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -static const uptr kAllocBegMagic = 0xCC6E96B9; - -struct ChunkHeader { +static const u64 kAllocBegMagic = 0xCC6E96B9CC6E96B9ULL; + +class ChunkHeader { + private: + // This is xor-ed pointer to itself or another header. + // Magic ChunkHeader points to real one, realone points to the Magic one. + // Pointer is xor-ed to avoid random unrelated garbage interpreded as the + // header. We use this value to let lsan to identify fully initialized + // headers. + atomic_uint64_t magic_ptr; + + public: atomic_uint8_t chunk_state; - u8 from_memalign : 1; u8 alloc_type : 2; - u8 rz_log : 3; u8 lsan_tag : 2; - - // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. - u32 user_requested_size : 29; // align < 8 -> 0 // else -> log2(min(align, 512)) - 2 - u32 user_requested_alignment_log : 3; + u8 user_requested_alignment_log : 3; private: + // Should be enough according kMaxAllowedMallocSize. + u16 user_requested_size_hi; + u32 user_requested_size_lo; atomic_uint64_t alloc_context_id; public: + uptr UsedSize() const { + uptr R = user_requested_size_lo; + if (sizeof(uptr) > sizeof(user_requested_size_lo)) + R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo)); + return R; + } + + void SetUsedSize(uptr size) { + user_requested_size_lo = size; + if (sizeof(uptr) > sizeof(user_requested_size_lo)) { + size >>= (8 * sizeof(user_requested_size_lo)); + user_requested_size_hi = size; + CHECK_EQ(user_requested_size_hi, size); + } + } + void SetAllocContext(u32 tid, u32 stack) { AtomicContextStore(&alloc_context_id, tid, stack); } @@ -112,9 +126,27 @@ void GetAllocContext(u32 &tid, u32 &stack) const { AtomicContextLoad(&alloc_context_id, tid, stack); } + + uptr Beg() { return reinterpret_cast(this) + sizeof(ChunkHeader); } + + void *AllocBeg() { return GetChunkPtr(); } + + bool AddrIsInside(uptr addr) { + return (addr >= Beg()) && (addr < Beg() + UsedSize()); + } + + void SetChunkPtr(AsanChunk *p) { + atomic_store(&magic_ptr, kAllocBegMagic ^ reinterpret_cast(p), + memory_order_release); + } + + AsanChunk *GetChunkPtr() const { + return reinterpret_cast( + kAllocBegMagic ^ atomic_load(&magic_ptr, memory_order_acquire)); + } }; -class ChunkBase : public ChunkHeader { +class AsanChunk : public ChunkHeader { atomic_uint64_t free_context_id; public: @@ -128,8 +160,8 @@ }; static const uptr kChunkHeaderSize = sizeof(ChunkHeader); -static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; -COMPILER_CHECK(kChunkHeaderSize == 16); +static const uptr kChunkHeader2Size = sizeof(AsanChunk) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 24); COMPILER_CHECK(kChunkHeader2Size <= 16); enum { @@ -143,28 +175,6 @@ CHUNK_QUARANTINE = 3, }; -struct AsanChunk: ChunkBase { - uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast( - get_allocator().GetMetaData(AllocBeg(locked_version))); - } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return get_allocator().GetBlockBeginFastLocked( - reinterpret_cast(this)); - return get_allocator().GetBlockBegin(reinterpret_cast(this)); - } - return reinterpret_cast(Beg() - RZLog2Size(rz_log)); - } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); - } -}; - struct QuarantineCallback { QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) : cache_(cache), @@ -172,6 +182,10 @@ } void Recycle(AsanChunk *m) { + AsanChunk *p = reinterpret_cast(m->AllocBeg()); + p->SetChunkPtr(nullptr); + if (p != m) + m->SetChunkPtr(nullptr); u8 old_chunk_state = CHUNK_QUARANTINE; if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, CHUNK_INVALID, memory_order_acquire)) { @@ -181,16 +195,6 @@ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); - } - // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); thread_stats.real_frees++; @@ -338,7 +342,7 @@ if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == CHUNK_ALLOCATED) { uptr beg = ac->Beg(); - uptr end = ac->Beg() + ac->UsedSize(true); + uptr end = ac->Beg() + ac->UsedSize(); uptr chunk_end = chunk + allocated_size; if (chunk < beg && beg < end && end <= chunk_end) { // Looks like a valid AsanChunk in use, poison redzones only. @@ -489,13 +493,10 @@ uptr needed_size = rounded_size + rz_size; if (alignment > min_alignment) needed_size += alignment; - bool using_primary_allocator = true; // If we are allocating from the secondary allocator, there will be no // automatic right redzone, so add the right redzone manually. - if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) needed_size += rz_size; - using_primary_allocator = false; - } CHECK(IsAligned(needed_size, min_alignment)); if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || size > max_user_defined_malloc_size) { @@ -546,24 +547,8 @@ uptr chunk_beg = user_beg - kChunkHeaderSize; AsanChunk *m = reinterpret_cast(chunk_beg); m->alloc_type = alloc_type; - m->rz_log = rz_log; - m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg + 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } - if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; - CHECK(allocator.FromPrimary(allocated)); - } else { - CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[0] = size; - meta[1] = chunk_beg; - } + CHECK(size); + m->SetUsedSize(size); m->user_requested_alignment_log = user_requested_alignment_log; m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack)); @@ -598,8 +583,16 @@ m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored : __lsan::kDirectlyLeaked; #endif - // Must be the last mutation of metadata in this function. atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); + // Must be the last mutation of metadata in this function. + AsanChunk *magic = reinterpret_cast(alloc_beg); + m->SetChunkPtr(magic); + if (m != magic) { + // Magic header stores only u64. min_alignment guarantees that it's true. + CHECK_GE(chunk_beg - alloc_beg, 8); + magic->SetChunkPtr(m); + } + ASAN_MALLOC_HOOK(res, size); return res; } @@ -767,25 +760,23 @@ AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; - AsanChunk *p = nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - p = reinterpret_cast(meta[1]); - } else { - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - p = reinterpret_cast(alloc_magic[1]); - else - p = reinterpret_cast(alloc_beg); + AsanChunk *m = reinterpret_cast(alloc_beg); + AsanChunk *m2 = m->GetChunkPtr(); + if (m != m2) { + // Check that Magic header points into allocated block. + if (m2 < m || + (reinterpret_cast(m2) - reinterpret_cast(m) + + sizeof(AsanChunk)) >= allocator.GetActuallyAllocatedSize(alloc_beg)) + return nullptr; + // Check that read header points back Magic header. + if (m != m2->GetChunkPtr()) + return nullptr; + // Now we sure that this is the real one. + m = m2; } - if (!p) - return nullptr; - u8 state = atomic_load(&p->chunk_state, memory_order_relaxed); - // It does not guaranty that Chunk is initialized, but it's - // definitely not for any other value. - if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE) - return p; - return nullptr; + u8 state = atomic_load(&m->chunk_state, memory_order_relaxed); + CHECK(state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE); + return m; } AsanChunk *GetAsanChunkByAddr(uptr p) { @@ -1116,10 +1107,9 @@ __asan::CHUNK_ALLOCATED) return 0; uptr chunk = m->Beg(); - if (m->AddrIsInside(addr, /*locked_version=*/true)) + if (m->AddrIsInside(addr)) return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) return chunk; return 0; } @@ -1154,7 +1144,7 @@ uptr LsanMetadata::requested_size() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); + return m->UsedSize(); } u32 LsanMetadata::stack_trace_id() const {