diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -62,11 +62,12 @@ // If the left redzone is greater than the ChunkHeader size we store a magic // value in the first uptr word of the memory block and store the address of // ChunkBase in the next uptr. -// M B L L L L L L L L L H H U U U U U U +// M B L L L L L L L L S H H U U U U U U // | ^ // ---------------------| // M -- magic value kAllocBegMagic // B -- address of ChunkHeader pointing to the first 'H' +// S -- Maybe user_requested_size static const uptr kAllocBegMagic = 0xCC6E96B9; struct ChunkHeader { @@ -81,8 +82,7 @@ u32 lsan_tag : 2; // 2-nd 8 bytes // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. + // SizeClassMap::kMaxSize and the actual size is stored left to the header. u32 user_requested_size : 29; // align < 8 -> 0 // else -> log2(min(align, 512)) - 2 @@ -112,23 +112,28 @@ struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { + uptr UsedSize() { if (user_requested_size != SizeClassMap::kMaxSize) return user_requested_size; - return *reinterpret_cast( - get_allocator().GetMetaData(AllocBeg(locked_version))); + return *(reinterpret_cast(this) - 1); } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return get_allocator().GetBlockBeginFastLocked( - reinterpret_cast(this)); - return get_allocator().GetBlockBegin(reinterpret_cast(this)); + void SetUsedSize(uptr size, void *unused_beg) { + if (size < SizeClassMap::kMaxSize) { + user_requested_size = size; + return; } + CHECK_GE(reinterpret_cast(this), + reinterpret_cast(unused_beg) + 1); + *(reinterpret_cast(this) - 1) = size; + user_requested_size = SizeClassMap::kMaxSize; + } + void *AllocBeg() { + if (from_memalign) + return get_allocator().GetBlockBegin(reinterpret_cast(this)); return reinterpret_cast(Beg() - RZLog2Size(rz_log)); } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + bool AddrIsInside(uptr addr) { + return (addr >= Beg()) && (addr < Beg() + UsedSize()); } }; @@ -308,7 +313,7 @@ if (atomic_load(&ac->chunk_state, memory_order_acquire) == CHUNK_ALLOCATED) { uptr beg = ac->Beg(); - uptr end = ac->Beg() + ac->UsedSize(true); + uptr end = ac->Beg() + ac->UsedSize(); uptr chunk_end = chunk + allocated_size; if (chunk < beg && beg < end && end <= chunk_end) { // Looks like a valid AsanChunk in use, poison redzones only. @@ -522,15 +527,15 @@ reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; reinterpret_cast(alloc_beg)[1] = chunk_beg; } + CHECK(size); if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; CHECK(allocator.FromPrimary(allocated)); + m->user_requested_size = size; } else { CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; + m->SetUsedSize( + size, reinterpret_cast(alloc_beg) + 2 /* magic, chunk_beg */); uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[0] = size; meta[1] = chunk_beg; } m->user_requested_alignment_log = user_requested_alignment_log; @@ -1076,10 +1081,9 @@ __asan::CHUNK_ALLOCATED) return 0; uptr chunk = m->Beg(); - if (m->AddrIsInside(addr, /*locked_version=*/true)) + if (m->AddrIsInside(addr)) return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) return chunk; return 0; } @@ -1132,7 +1136,7 @@ uptr LsanMetadata::requested_size() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); + return m->UsedSize(); } u32 LsanMetadata::stack_trace_id() const {