Index: lib/asan/asan_allocator.cc =================================================================== --- lib/asan/asan_allocator.cc +++ lib/asan/asan_allocator.cc @@ -111,24 +111,79 @@ }; struct AsanChunk: ChunkBase { - uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } + uptr Beg() { return BegInternal(reinterpret_cast(this)); } uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast( - get_allocator().GetMetaData(AllocBeg(locked_version))); + return UsedSizeInternal( + reinterpret_cast(this), locked_version, + reinterpret_cast(&get_allocator()), nullptr); } void *AllocBeg(bool locked_version = false) { + return AllocBegInternal( + reinterpret_cast(this), locked_version, + reinterpret_cast(&get_allocator())); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } + + private: + // Common code for in-process and out-of-process chunk examination. + // + // * For the in-process case `chunk_storage_addr` is the same as the `this` + // pointer. For the out-of-process case it is a pointer to the + // `AsanChunk` in the target process. + // * `allocator` is a pointer to the allocator in the local + // address space, instantiated with the appropriate `AddressSpaceView` + // template parameter. For the `out-of-process` case this means the client + // is responsible for copying the allocator from the target process into + // the local process. + + uptr BegInternal(uptr chunk_storage_addr) { + return chunk_storage_addr + kChunkHeaderSize; + } + + template + void *AllocBegInternal(uptr chunk_storage_addr, bool locked_version, + uptr allocator) { if (from_memalign) { + auto allocator_ptr = + reinterpret_cast *>(allocator); if (locked_version) - return get_allocator().GetBlockBeginFastLocked( - reinterpret_cast(this)); - return get_allocator().GetBlockBegin(reinterpret_cast(this)); + return allocator_ptr->GetBlockBeginFastLocked( + reinterpret_cast(chunk_storage_addr)); + return allocator_ptr->GetBlockBegin( + reinterpret_cast(chunk_storage_addr)); } - return reinterpret_cast(Beg() - RZLog2Size(rz_log)); + return reinterpret_cast(BegInternal(chunk_storage_addr) - + RZLog2Size(rz_log)); } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + + // `known_alloc_beg` - Address of the beginning of the underlying + // allocator block in the target process. + // If not set to zero then the following must be true `known_alloc_beg == + // AllocBegInternal(chunk_storage_addr, lockec_version, allocator)`. If set to + // `0` then `AllocBegInternal()` will be called to determine the + // the address of the allocator block. + // + // This is an optimization to avoid calling `AllocBegInternal()` when + // the result of `AllocBegInternal(...)` is already know by the + // caller. + template + uptr UsedSizeInternal(uptr chunk_storage_addr, bool locked_version, + uptr allocator, void *known_alloc_beg) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + if (!known_alloc_beg) { + known_alloc_beg = AllocBegInternal( + chunk_storage_addr, locked_version, allocator); + } else { + DCHECK_EQ(known_alloc_beg, + AllocBegInternal(chunk_storage_addr, + locked_version, allocator)); + } + return *AddressSpaceView::Load(reinterpret_cast( + reinterpret_cast *>(allocator) + ->GetMetaData(known_alloc_beg))); } };