diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -93,18 +93,32 @@ u8 rz_log : 3; u8 lsan_tag : 2; - // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. - u32 user_requested_size : 29; // align < 8 -> 0 // else -> log2(min(align, 512)) - 2 - u32 user_requested_alignment_log : 3; + u16 user_requested_alignment_log : 3; private: + u16 user_requested_size_hi : 13; + u32 user_requested_size_lo; atomic_uint64_t alloc_context_id; public: + uptr UsedSize() const { + uptr R = user_requested_size_lo; + if (sizeof(uptr) > sizeof(user_requested_size_lo)) + R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo)); + return R; + } + + void SetUsedSize(uptr size) { + user_requested_size_lo = size; + if (sizeof(uptr) > sizeof(user_requested_size_lo)) { + size >>= (8 * sizeof(user_requested_size_lo)); + user_requested_size_hi = size; + CHECK_EQ(user_requested_size_hi, size); + } + } + void SetAllocContext(u32 tid, u32 stack) { AtomicContextStore(&alloc_context_id, tid, stack); } @@ -145,23 +159,15 @@ struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast( - get_allocator().GetMetaData(AllocBeg(locked_version))); - } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return get_allocator().GetBlockBeginFastLocked( - reinterpret_cast(this)); + + void *AllocBeg() { + if (from_memalign) return get_allocator().GetBlockBegin(reinterpret_cast(this)); - } return reinterpret_cast(Beg() - RZLog2Size(rz_log)); } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + + bool AddrIsInside(uptr addr) { + return (addr >= Beg()) && (addr < Beg() + UsedSize()); } }; @@ -338,7 +344,7 @@ if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) == CHUNK_ALLOCATED) { uptr beg = ac->Beg(); - uptr end = ac->Beg() + ac->UsedSize(true); + uptr end = ac->Beg() + ac->UsedSize(); uptr chunk_end = chunk + allocated_size; if (chunk < beg && beg < end && end <= chunk_end) { // Looks like a valid AsanChunk in use, poison redzones only. @@ -552,15 +558,13 @@ reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; reinterpret_cast(alloc_beg)[1] = chunk_beg; } + CHECK(size); + m->SetUsedSize(size); if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; CHECK(allocator.FromPrimary(allocated)); } else { CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[0] = size; meta[1] = chunk_beg; } m->user_requested_alignment_log = user_requested_alignment_log; @@ -1112,10 +1116,9 @@ __asan::CHUNK_ALLOCATED) return 0; uptr chunk = m->Beg(); - if (m->AddrIsInside(addr, /*locked_version=*/true)) + if (m->AddrIsInside(addr)) return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr)) return chunk; return 0; } @@ -1150,7 +1153,7 @@ uptr LsanMetadata::requested_size() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); + return m->UsedSize(); } u32 LsanMetadata::stack_trace_id() const {