Index: lib/asan/asan_malloc_mac.cc =================================================================== --- lib/asan/asan_malloc_mac.cc +++ lib/asan/asan_malloc_mac.cc @@ -19,6 +19,7 @@ #include "asan_stack.h" #include "asan_stats.h" #include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_remote_address_space_view.h" using namespace __asan; #define COMMON_MALLOC_ZONE_NAME "asan" @@ -58,7 +59,7 @@ GET_STACK_TRACE_FREE; \ ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); #define COMMON_MALLOC_NAMESPACE __asan -#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0 +#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 1 #define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1 #include "sanitizer_common/sanitizer_malloc_mac.inc" @@ -83,6 +84,7 @@ InitMallocZoneFields(); return true; } + } // namespace COMMON_MALLOC_NAMESPACE namespace { @@ -97,6 +99,165 @@ mi->allocator_size = last_byte_plus_one - (mi->allocator_ptr); CHECK_GT(mi->allocator_size, 0); } + +struct CallBackContext { + // TODO(dliew): Evaluate the performance of this buffer size. + static const uptr kAllocationsBufferSize = 256; + void *context; + vm_range_recorder_t *recorder; + bool failure; + const sanitizer_malloc_introspection_t *target_mi; + u32 num_buffered_allocations; + uptr allocator_cpy; + vm_range_t allocations_buffer[kAllocationsBufferSize]; +}; + +void FlushAllocations(CallBackContext *cbc) { + if (cbc->num_buffered_allocations == 0) { + // Nothing to flush + return; + } + // Record all the allocations as a batch. + CHECK_LE(cbc->num_buffered_allocations, + CallBackContext::kAllocationsBufferSize); + VReport(2, "Flushing %d enumerated allocations (%d max buffer size)\n", + cbc->num_buffered_allocations, + CallBackContext::kAllocationsBufferSize); + cbc->recorder(RemoteAddressSpaceView::get_context()->GetTargetProcess(), + cbc->context, + /*type=*/MALLOC_PTR_IN_USE_RANGE_TYPE, cbc->allocations_buffer, + /*count=*/cbc->num_buffered_allocations); + cbc->num_buffered_allocations = 0; +} + +void ChunkCallBack(uptr chunk, void *arg) { + CallBackContext *cbc = reinterpret_cast(arg); + + __lsan::LsanMetadataRemoteView meta_data(chunk, cbc->allocator_cpy); + + bool is_allocated = meta_data.allocated(); + if (!is_allocated) { + return; + } + uptr requested_size = meta_data.requested_size(); + + // Report allocation + vm_range_t range = {.address = meta_data.user_begin(), + .size = requested_size}; + VReport(2, "Enumerated allocation %p has been size %d bytes\n", range.address, + range.size); + + // Store the allocations in a small buffer and flush them when necessary + uptr buffer_index = cbc->num_buffered_allocations; + CHECK_LT(buffer_index, CallBackContext::kAllocationsBufferSize); + cbc->allocations_buffer[buffer_index] = range; + ++(cbc->num_buffered_allocations); + if (cbc->num_buffered_allocations == + CallBackContext::kAllocationsBufferSize) { + FlushAllocations(cbc); + } +} + +using RAV = RemoteAddressSpaceView; + +kern_return_t mi_enumerator(task_t task, void *context, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + if (!(type_mask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { + CHECK(type_mask & + (MALLOC_ADMIN_REGION_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE)); + // We don't support iterating VM regions (ranges of pages) + // + // MALLOC_PTR_REGION_RANGE_TYPE - VM regions used to contain memory + // allocated by the allocator. + // MALLOC_ADMIN_REGION_RANGE_TYPE - VM regions used for allocator + // administration. + // + // so just pretend there aren't any. + // + // Not supporting enumeration of these VM regions should be okay because + // Symbolication won't treat VM regions tagged with VM_MEMORY_SANITIZER + // as root regions for the purposes of leak detection. + return KERN_SUCCESS; + } + + RemoteAddressSpaceView::ScopedContext scoped_rav((void *)reader, task); + // Copy `malloc_zone_t` struct into this process. + const malloc_zone_t *target_zone = + RAV::Load(reinterpret_cast(zone_address)); + + // Copy the introspection struct into this process + const sanitizer_malloc_introspection_t *target_mi = + RAV::Load(reinterpret_cast( + target_zone->introspect)); + + // Do allocator enumeration version check + if (target_mi->allocator_enumeration_version != + GetMallocZoneAllocatorEnumerationVersion()) { + Report( + "Malloc zone allocator enumeration version in target process (%d) and " + "analysis " + "process " + "(%d) do not match. Refusing to enumerate.\n", + target_mi->allocator_enumeration_version, + GetMallocZoneAllocatorEnumerationVersion()); + Die(); + return KERN_FAILURE; + } + + const bool should_lock_allocator = + RemoteAddressSpaceView::get_context()->IsLocal(); + + if (should_lock_allocator) { + // FIXME(dliew): Fix racey allocator access in remote case. + // We should really lock the allocator in the remote + // case too but at this point its too late to do it because the target + // process is already frozen. So really the client of this method needs + // to do it on our behalf. Our main client (Symbolication) doesn't do + // this right now so we need to figure out a way for Symbolication to + // tell a target sanitizer process to lock/unlock its allocator remotely. + mi_force_lock(&sanitizer_zone); + } + + // Copy the allocator into this process + using AsanAllocatorOOP = __asan::AsanAllocatorASVT; + static_assert(sizeof(AsanAllocatorOOP) == sizeof(__asan::AsanAllocator), + "wrong size"); + if (target_mi->allocator_size != sizeof(__asan::AsanAllocator)) { + Report( + "Size of allocator in target process (%d) doesn't match expected size " + "(%d). Refusing to enumeration.\n", + target_mi->allocator_size, sizeof(__asan::AsanAllocator)); + Die(); + return KERN_FAILURE; + } + auto *target_allocator = RAV::LoadWritable( + reinterpret_cast(target_mi->allocator_ptr)); + + CallBackContext arg = { + .context = context, + .recorder = recorder, + .failure = false, + .target_mi = target_mi, + .num_buffered_allocations = 0, + .allocator_cpy = reinterpret_cast(target_allocator), + }; + + target_allocator->ForEachChunk(ChunkCallBack, &arg); + + if (should_lock_allocator) { + mi_force_unlock(&sanitizer_zone); + } + + if (arg.failure) { + return KERN_FAILURE; + } + + // Flush any remaining allocations + FlushAllocations(&arg); + + return KERN_SUCCESS; +} } // namespace #endif