diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -52,7 +52,7 @@ static AsanAllocator &get_allocator(); // The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R +// L H H U U U U U U R R // L -- left redzone words (0 or more bytes) // H -- ChunkHeader (16 bytes), which is also a part of the left redzone. // U -- user memory. @@ -60,27 +60,27 @@ // ChunkBase consists of ChunkHeader and other bytes that overlap with user // memory. -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L S H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -// S -- Maybe user_requested_size -static const uptr kAllocBegMagic = 0xCC6E96B9; +// If the left redzone is greater than the sizeof(ChunkHeader) + 2 * +// sizeof(uptr) then we store a magic value in the first byte of the memory +// block and store the address of ChunkBase in the second uptr. +// A L L L L L L L L S H H U U U U U U +// | ^ +// --------------------| +// A -- AllocationHeader with address of ChunkHeader pointing to the first 'H' +// S -- Optional user_requested_size struct ChunkHeader { // 1-st 8 bytes. atomic_uint8_t chunk_state; - u32 alloc_tid : 24; - u32 free_tid : 24; - u32 from_memalign : 1; - u32 alloc_type : 2; - u32 rz_log : 3; - u32 lsan_tag : 2; + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to // SizeClassMap::kMaxSize and the actual size is stored left to the header. @@ -105,10 +105,12 @@ // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. // CHUNK_ALLOCATED: the chunk is allocated and not yet freed. // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +// CHUNK_OFFSET: This is not the chunk was freed and put into quarantine zone. enum { - CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. - CHUNK_ALLOCATED = 2, - CHUNK_QUARANTINE = 3 + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3, + CHUNK_PTR = 0xff, }; struct AsanChunk: ChunkBase { @@ -157,14 +159,15 @@ PoisonShadow(m->Beg(), RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); + AsanChunk *p = reinterpret_cast(m->AllocBeg()); if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); + old_chunk_state = CHUNK_AVAILABLE; // Clear the magic value, as allocator internals may overwrite the // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); + if (!atomic_compare_exchange_strong(&p->chunk_state, &old_chunk_state, + CHUNK_PTR, memory_order_release)) { + CHECK_EQ(old_chunk_state, CHUNK_PTR); + } } // Statistics. @@ -523,11 +526,6 @@ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? m->free_tid = kInvalidTid; m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } CHECK(size); if (using_primary_allocator) { CHECK(allocator.FromPrimary(allocated)); @@ -536,8 +534,6 @@ CHECK(!allocator.FromPrimary(allocated)); m->SetUsedSize( size, reinterpret_cast(alloc_beg) + 2 /* magic, chunk_beg */); - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[1] = chunk_beg; } m->user_requested_alignment_log = user_requested_alignment_log; @@ -576,6 +572,15 @@ #endif // Must be the last mutation of metadata in this function. atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); + if (alloc_beg != chunk_beg) { + reinterpret_cast(alloc_beg)[1] = chunk_beg; + // Up to this point only LSan can inspect this chunk, and the only + // information LSan needs is where AsanChunk ends. Without CHUNK_PTR LSan + // assumes chunk_beg == alloc_beg but it was not important for leak + // checking on just allocated block. + atomic_store(&reinterpret_cast(alloc_beg)->chunk_state, + CHUNK_PTR, memory_order_release); + } ASAN_MALLOC_HOOK(res, size); return res; } @@ -740,37 +745,24 @@ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). AsanChunk *GetAsanChunk(void *alloc_beg) { - if (!alloc_beg) return nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - AsanChunk *m = reinterpret_cast(meta[1]); - return m ? m : reinterpret_cast(alloc_beg); - } - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - return reinterpret_cast(alloc_magic[1]); - return reinterpret_cast(alloc_beg); + if (!alloc_beg) + return nullptr; + AsanChunk *p = reinterpret_cast(alloc_beg); + if (atomic_load(&p->chunk_state, memory_order_acquire) == CHUNK_PTR) + p = reinterpret_cast(reinterpret_cast(alloc_beg)[1]); + return p; } AsanChunk *GetAsanChunkDebug(void *alloc_beg) { - if (!alloc_beg) return nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - AsanChunk *m = reinterpret_cast(meta[1]); - Printf("GetAsanChunkDebug1 alloc_beg %p meta %p m %p\n", alloc_beg, meta, m); - return m ? m : reinterpret_cast(alloc_beg); - } - uptr *alloc_magic = reinterpret_cast(alloc_beg); - Printf( - "GetAsanChunkDebug2 alloc_beg %p alloc_magic %p alloc_magic[0] %p " - "alloc_magic[1] %p\n", - alloc_beg, alloc_magic, alloc_magic[0], alloc_magic[1]); - if (alloc_magic[0] == kAllocBegMagic) - return reinterpret_cast(alloc_magic[1]); - return reinterpret_cast(alloc_beg); + if (!alloc_beg) + return nullptr; + AsanChunk *p = reinterpret_cast(alloc_beg); + if (atomic_load(&p->chunk_state, memory_order_acquire) == CHUNK_PTR) + p = reinterpret_cast(reinterpret_cast(alloc_beg)[1]); + Printf("GetAsanChunkDebug2 alloc_beg %p AsanChunk %p\n", alloc_beg, p); + return p; } - AsanChunk *GetAsanChunkByAddr(uptr p) { void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); return GetAsanChunk(alloc_beg); @@ -1057,7 +1049,7 @@ instance.SetRssLimitExceeded(limit_exceeded); } -} // namespace __asan +} // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan {