diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -76,15 +76,14 @@ // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. +// If the left redzone is greater than the sizeof(ChunkHeader) + 2 * +// sizeof(uptr) then we store a magic value in the first byte of the memory +// block and store the address of ChunkBase in the second uptr. // M B L L L L L L L L L H H U U U U U U // | ^ // ---------------------| -// M -- magic value kAllocBegMagic +// M -- ChunkHeader with CHUNK_PTR // B -- address of ChunkHeader pointing to the first 'H' -static const uptr kAllocBegMagic = 0xCC6E96B9; struct ChunkHeader { atomic_uint8_t chunk_state; @@ -155,6 +154,8 @@ CHUNK_ALLOCATED = 2, // The chunk was freed and put into quarantine zone. CHUNK_QUARANTINE = 3, + // This is not a chunk and just a placeholder for the chunk pointer. + CHUNK_PTR = 255, }; struct AsanChunk: ChunkBase { @@ -178,6 +179,17 @@ } void Recycle(AsanChunk *m) { + AsanChunk *p = reinterpret_cast(m->AllocBeg()); + if (p != m) { + u8 old_chunk_state = CHUNK_PTR; + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + if (!atomic_compare_exchange_strong(&p->chunk_state, &old_chunk_state, + CHUNK_INVALID, + memory_order_acquire)) { + CHECK_EQ(old_chunk_state, CHUNK_PTR); + } + } u8 old_chunk_state = CHUNK_QUARANTINE; if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, CHUNK_INVALID, memory_order_acquire)) { @@ -187,15 +199,6 @@ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); - } // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); @@ -494,13 +497,10 @@ uptr needed_size = rounded_size + rz_size; if (alignment > min_alignment) needed_size += alignment; - bool using_primary_allocator = true; // If we are allocating from the secondary allocator, there will be no // automatic right redzone, so add the right redzone manually. - if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) needed_size += rz_size; - using_primary_allocator = false; - } CHECK(IsAligned(needed_size, min_alignment)); if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || size > max_user_defined_malloc_size) { @@ -553,20 +553,8 @@ m->alloc_type = alloc_type; m->rz_log = rz_log; m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg + 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } CHECK(size); m->SetUsedSize(size); - if (using_primary_allocator) { - CHECK(allocator.FromPrimary(allocated)); - } else { - CHECK(!allocator.FromPrimary(allocated)); - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[1] = chunk_beg; - } m->user_requested_alignment_log = user_requested_alignment_log; m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack)); @@ -603,6 +591,15 @@ #endif // Must be the last mutation of metadata in this function. atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); + if (alloc_beg != chunk_beg) { + reinterpret_cast(alloc_beg)[1] = chunk_beg; + // Up to this point only LSan can inspect this chunk, and the only + // information LSan needs is where AsanChunk ends. Without CHUNK_PTR LSan + // assumes chunk_beg == alloc_beg but it was not important for leak + // checking on just allocated block. + atomic_store(&reinterpret_cast(alloc_beg)->chunk_state, + CHUNK_PTR, memory_order_release); + } ASAN_MALLOC_HOOK(res, size); return res; } @@ -770,17 +767,9 @@ AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; - AsanChunk *p = nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - p = reinterpret_cast(meta[1]); - } else { - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - p = reinterpret_cast(alloc_magic[1]); - else - p = reinterpret_cast(alloc_beg); - } + AsanChunk *p = reinterpret_cast(alloc_beg); + if (atomic_load(&p->chunk_state, memory_order_acquire) == CHUNK_PTR) + p = reinterpret_cast(reinterpret_cast(alloc_beg)[1]); if (!p) return nullptr; if (atomic_load(&p->chunk_state, memory_order_relaxed) == CHUNK_INVALID)