diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -52,7 +52,7 @@ static AsanAllocator &get_allocator(); // The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R +// L H H U U U U U U R R // L -- left redzone words (0 or more bytes) // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. // U -- user memory. @@ -60,16 +60,14 @@ // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L S H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -// S -- Maybe user_requested_size -static const uptr kAllocBegMagic = 0xCC6E96B9; +// If the left redzone is greater than the sizeof(ChunkHeader) + 2 * +// sizeof(uptr) then we store a magic value in the first byte of the memory +// block and store the address of ChunkBase in the second uptr. +// A L L L L L L L L S H H U U U U U U +// | ^ +// --------------------| +// A -- AllocationHeader with address of ChunkHeader pointing to the first 'H' +// S -- Optional user_requested_size struct ChunkHeader { // 1-st 8 bytes. @@ -81,6 +79,7 @@ u32 alloc_type : 2; u32 rz_log : 3; u32 lsan_tag : 2; + // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to // SizeClassMap::kMaxSize and the actual size is stored left of the header. @@ -113,6 +112,8 @@ CHUNK_ALLOCATED = 2, // The chunk was freed and put into quarantine zone. CHUNK_QUARANTINE = 3, + // This is not the chunk and just a placeholder for the chunk pointer. + CHUNK_PTR = 255, }; struct AsanChunk: ChunkBase { @@ -152,6 +153,17 @@ } void Recycle(AsanChunk *m) { + AsanChunk *p = reinterpret_cast(m->AllocBeg()); + if (p != m) { + u8 old_chunk_state = CHUNK_PTR; + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + if (!atomic_compare_exchange_strong(&p->chunk_state, &old_chunk_state, + CHUNK_INVALID, + memory_order_acquire)) { + CHECK_EQ(old_chunk_state, CHUNK_PTR); + } + } u8 old_chunk_state = CHUNK_QUARANTINE; if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, CHUNK_INVALID, memory_order_acquire)) { @@ -163,15 +175,6 @@ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); - } // Statistics. AsanStats &thread_stats = GetCurrentThreadStats(); @@ -533,11 +536,6 @@ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? m->free_tid = kInvalidTid; m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg + 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } CHECK(size); if (using_primary_allocator) { CHECK(allocator.FromPrimary(allocated)); @@ -546,8 +544,6 @@ CHECK(!allocator.FromPrimary(allocated)); m->SetUsedSize( size, reinterpret_cast(alloc_beg) + 2 /* magic, chunk_beg */); - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[1] = chunk_beg; } m->user_requested_alignment_log = user_requested_alignment_log; @@ -586,6 +582,15 @@ #endif // Must be the last mutation of metadata in this function. atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); + if (alloc_beg != chunk_beg) { + reinterpret_cast(alloc_beg)[1] = chunk_beg; + // Up to this point only LSan can inspect this chunk, and the only + // information LSan needs is where AsanChunk ends. Without CHUNK_PTR LSan + // assumes chunk_beg == alloc_beg but it was not important for leak + // checking on just allocated block. + atomic_store(&reinterpret_cast(alloc_beg)->chunk_state, + CHUNK_PTR, memory_order_release); + } ASAN_MALLOC_HOOK(res, size); return res; } @@ -755,17 +760,9 @@ AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; - AsanChunk *p = nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - p = reinterpret_cast(meta[1]); - } else { - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - p = reinterpret_cast(alloc_magic[1]); - else - p = reinterpret_cast(alloc_beg); - } + AsanChunk *p = reinterpret_cast(alloc_beg); + if (atomic_load(&p->chunk_state, memory_order_acquire) == CHUNK_PTR) + p = reinterpret_cast(reinterpret_cast(alloc_beg)[1]); if (!p) return nullptr; if (atomic_load(&p->chunk_state, memory_order_relaxed) == CHUNK_INVALID)