Index: lib/sanitizer_common/CMakeLists.txt =================================================================== --- lib/sanitizer_common/CMakeLists.txt +++ lib/sanitizer_common/CMakeLists.txt @@ -37,6 +37,9 @@ sanitizer_suppressions.cc sanitizer_tls_get_addr.cc sanitizer_thread_registry.cc + sanitizer_vm_read_context_common.cc + sanitizer_vm_read_context_mac.cc + sanitizer_vm_read_context_stubs.cc sanitizer_win.cc) if(UNIX AND NOT APPLE AND NOT OS_NAME MATCHES "SunOS") @@ -144,6 +147,7 @@ sanitizer_libignore.h sanitizer_linux.h sanitizer_list.h + sanitizer_local_object_view.h sanitizer_mac.h sanitizer_malloc_mac.inc sanitizer_mutex.h @@ -158,6 +162,7 @@ sanitizer_posix.h sanitizer_procmaps.h sanitizer_quarantine.h + sanitizer_remote_object_view.h sanitizer_report_decorator.h sanitizer_ring_buffer.h sanitizer_rtems.h @@ -182,6 +187,7 @@ sanitizer_thread_registry.h sanitizer_tls_get_addr.h sanitizer_vector.h + sanitizer_vm_read_context.h sanitizer_win.h sanitizer_win_defs.h sanitizer_win_dll_thunk.h Index: lib/sanitizer_common/sanitizer_allocator.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator.h +++ lib/sanitizer_common/sanitizer_allocator.h @@ -14,12 +14,13 @@ #ifndef SANITIZER_ALLOCATOR_H #define SANITIZER_ALLOCATOR_H -#include "sanitizer_internal_defs.h" #include "sanitizer_common.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_lfstack.h" #include "sanitizer_libc.h" #include "sanitizer_list.h" +#include "sanitizer_local_object_view.h" #include "sanitizer_mutex.h" -#include "sanitizer_lfstack.h" #include "sanitizer_procmaps.h" namespace __sanitizer { Index: lib/sanitizer_common/sanitizer_allocator_bytemap.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_bytemap.h +++ lib/sanitizer_common/sanitizer_allocator_bytemap.h @@ -18,6 +18,7 @@ template class FlatByteMap { public: + typedef FlatByteMap ThisT; void Init() { internal_memset(map_, 0, sizeof(map_)); } @@ -27,11 +28,23 @@ CHECK_EQ(0U, map_[idx]); map_[idx] = val; } - u8 operator[] (uptr idx) { + + u8 operator[](uptr idx) const { + LocalObjectView local_view(this); + return GetOOP(idx, &local_view); + } + + template + ALWAYS_INLINE static u8 GetOOP(uptr idx, ObjectView *view) { + static_assert( + std::is_same::value || + std::is_same::value, + "ObjectView has wrong type"); CHECK_LT(idx, kSize); - // FIXME: CHECK may be too expensive here. - return map_[idx]; + CHECK(view->IsValid()); + return view->GetLocalAddress()->map_[idx]; } + private: u8 map_[kSize]; }; @@ -44,6 +57,7 @@ template class TwoLevelByteMap { public: + typedef TwoLevelByteMap ThisT; void Init() { internal_memset(map1_, 0, sizeof(map1_)); mu_.Init(); @@ -70,10 +84,29 @@ } u8 operator[] (uptr idx) const { + LocalObjectView local_view(this); + return GetOOP(idx, &local_view); + } + + template + ALWAYS_INLINE static u8 GetOOP(uptr idx, ObjectView *view) { + static_assert( + std::is_same::value || + std::is_same::value, + "ObjectView has wrong type"); CHECK_LT(idx, kSize1 * kSize2); - u8 *map2 = Get(idx / kSize2); - if (!map2) return 0; - return map2[idx % kSize2]; + CHECK(view->IsValid()); + // FIXME(dliew): This method is current safe to call directly. Should + // we make an ObjectView version? + uptr map2 = + reinterpret_cast(view->GetLocalAddress()->Get(idx / kSize2)); + if (!map2) + return 0; + uptr byte_addr = map2 + (idx % kSize2); + typename ObjectView::template SmallAllocTy byte_view( + byte_addr, view->GetReadContext()); + CHECK(byte_view.IsValid()); + return *(byte_view.GetLocalAddress()); } private: Index: lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_combined.h +++ lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,6 +24,9 @@ class SecondaryAllocator> // NOLINT class CombinedAllocator { public: + typedef CombinedAllocator + ThisT; void InitLinkerInitialized(s32 release_to_os_interval_ms) { primary_.Init(release_to_os_interval_ms); secondary_.InitLinkerInitialized(); @@ -185,8 +188,20 @@ // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { - primary_.ForEachChunk(callback, arg); - secondary_.ForEachChunk(callback, arg); + LocalObjectView local_view(this); + ThisT::ForEachChunkOOP(callback, arg, &local_view); + } + + template + ALWAYS_INLINE static void ForEachChunkOOP(ForEachChunkCallback callback, + void *arg, ObjectView *view) { + static_assert(std::is_same::value, + "ObjectView has wrong type"); + CHECK(view->IsValid()); + auto primary_slice = view->MakeSliceView(&ThisT::primary_); + PrimaryAllocator::ForEachChunkOOP(callback, arg, &primary_slice); + auto secondary_slice = view->MakeSliceView(&ThisT::secondary_); + SecondaryAllocator::ForEachChunkOOP(callback, arg, &secondary_slice); } private: Index: lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary32.h +++ lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -236,18 +236,32 @@ // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { - for (uptr region = 0; region < kNumPossibleRegions; region++) - if (possible_regions[region]) { - uptr chunk_size = ClassIdToSize(possible_regions[region]); - uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); - uptr region_beg = region * kRegionSize; - for (uptr chunk = region_beg; - chunk < region_beg + max_chunks_in_region * chunk_size; - chunk += chunk_size) { - // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); - callback(chunk, arg); - } + LocalObjectView local_view(this); + ForEachChunkOOP(callback, arg, &local_view); + } + + template + ALWAYS_INLINE static void ForEachChunkOOP(ForEachChunkCallback callback, + void *arg, ObjectView *view) { + static_assert(std::is_same::value, + "ObjectView has wrong type"); + CHECK(view->IsValid()); + auto possible_regions_view = view->MakeSliceView(&ThisT::possible_regions); + CHECK(possible_regions_view.IsValid()); + for (uptr region_id = 0; region_id < kNumPossibleRegions; ++region_id) { + auto class_id = ByteMap::GetOOP(region_id, &possible_regions_view); + if (!class_id) + continue; + + uptr region_beg = region_id * kRegionSize; + uptr chunk_size = ClassIdToSize(class_id); + uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize); + for (uptr chunk = region_beg; + chunk < region_beg + max_chunks_in_region * chunk_size; + chunk += chunk_size) { + callback(chunk, arg); } + } } void PrintStats() {} @@ -268,7 +282,7 @@ }; COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0); - uptr ComputeRegionId(uptr mem) { + uptr ComputeRegionId(uptr mem) const { const uptr res = mem >> kRegionSizeLog; CHECK_LT(res, kNumPossibleRegions); return res; Index: lib/sanitizer_common/sanitizer_allocator_primary64.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_primary64.h +++ lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -290,12 +290,50 @@ // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { - for (uptr class_id = 1; class_id < kNumClasses; class_id++) { - RegionInfo *region = GetRegionInfo(class_id); + LocalObjectView local_view(this); + ThisT::ForEachChunkOOP(callback, arg, &local_view); + } + + template + ALWAYS_INLINE static void ForEachChunkOOP(ForEachChunkCallback callback, + void *arg, ObjectView *view) { + static_assert(std::is_same::value, + "ObjectView has wrong type"); + CHECK(view->IsValid()); + CHECK_NE(callback, nullptr); + + if (!view->IsLocal()) { + // Out-of-process + // Sanity checks + // FIXME(dliew): These checks don't really do what we want. They use + // global constants in this process rather than the target process. + CHECK_EQ(view->GetLocalAddress()->address_range.base(), + view->GetLocalAddress()->SpaceBeg()); + CHECK_EQ(view->GetLocalAddress()->address_range.size(), + kSpaceSize + ThisT::AdditionalSize()); + + // FIXME(dliew): Check that `kNumClasses` corresponds between this + // process and the target. + } + + // Skip class_id 0 because it is used for zero sized allocations. + for (uptr class_id = 1; class_id < kNumClasses; ++class_id) { + // Compute address to RegionInfo in target process. + uptr region_ptr = + reinterpret_cast(GetRegionInfoOOP(class_id, view)); + + // Copy RegionInfo from target process + typename ObjectView::template SmallAllocTy region_info_view( + region_ptr, view->GetReadContext()); + CHECK(region_info_view.IsValid()); + uptr chunk_size = ClassIdToSize(class_id); - uptr region_beg = SpaceBeg() + class_id * kRegionSize; - for (uptr chunk = region_beg; - chunk < region_beg + region->allocated_user; + uptr region_beg = SpaceBegOOP(view) + (class_id * kRegionSize); + uptr region_allocated_user = + region_info_view.GetLocalAddress()->allocated_user; + + // Iterate over the chunks in the region + for (uptr chunk = region_beg; chunk < region_beg + region_allocated_user; chunk += chunk_size) { // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk)); callback(chunk, arg); @@ -562,9 +600,35 @@ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0; uptr NonConstSpaceBeg; uptr SpaceBeg() const { - return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg; + LocalObjectView local_view(this); + return SpaceBegOOP(&local_view); + } + uptr SpaceEnd() const { + LocalObjectView local_view(this); + return SpaceEndOOP(&local_view); + } + + template + ALWAYS_INLINE static uptr SpaceBegOOP(ObjectView *view) { + static_assert( + std::is_same::value || + std::is_same::value, + "ObjectView has wrong type"); + CHECK(view->IsValid()); + return view->GetLocalAddress()->kUsingConstantSpaceBeg + ? kSpaceBeg + : view->GetLocalAddress()->NonConstSpaceBeg; + } + template + ALWAYS_INLINE static uptr SpaceEndOOP(ObjectView *view) { + static_assert( + std::is_same::value || + std::is_same::value, + "ObjectView has wrong type"); + CHECK(view->IsValid()); + return SpaceBegOOP(view) + kSpaceSize; } - uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; } + // kRegionSize must be >= 2^32. COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2))); // kRegionSize must be <= 2^36, see CompactPtrT. @@ -606,8 +670,20 @@ COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0); RegionInfo *GetRegionInfo(uptr class_id) const { + LocalObjectView local_view(this); + return GetRegionInfoOOP(class_id, &local_view); + } + + template + ALWAYS_INLINE static RegionInfo *GetRegionInfoOOP(uptr class_id, + ObjectView *view) { + static_assert( + std::is_same::value || + std::is_same::value, + "ObjectView has wrong type"); DCHECK_LT(class_id, kNumClasses); - RegionInfo *regions = reinterpret_cast(SpaceEnd()); + CHECK(view->IsValid()); + RegionInfo *regions = reinterpret_cast(SpaceEndOOP(view)); return ®ions[class_id]; } Index: lib/sanitizer_common/sanitizer_allocator_secondary.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_secondary.h +++ lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -71,6 +71,8 @@ class PtrArrayT = DefaultLargeMmapAllocatorPtrArray> class LargeMmapAllocator { public: + typedef LargeMmapAllocator ThisT; + void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); chunks_ = reinterpret_cast(ptr_array_.Init()); @@ -271,13 +273,55 @@ // Iterate over all existing chunks. // The allocator must be locked when calling this function. void ForEachChunk(ForEachChunkCallback callback, void *arg) { - EnsureSortedChunks(); // Avoid doing the sort while iterating. - for (uptr i = 0; i < n_chunks_; i++) { - auto t = chunks_[i]; - callback(reinterpret_cast(GetUser(t)), arg); - // Consistency check: verify that the array did not change. - CHECK_EQ(chunks_[i], t); - CHECK_EQ(chunks_[i]->chunk_idx, i); + LocalObjectView local_view(this); + ThisT::ForEachChunkOOP(callback, arg, &local_view); + } + + template + ALWAYS_INLINE static void ForEachChunkOOP(ForEachChunkCallback callback, + void *arg, ObjectView *view) { + static_assert(std::is_same::value, + "ObjectView has wrong type"); + CHECK_NE(callback, nullptr); + CHECK(view->IsValid()); + + if (view->IsLocal()) { + // In-process + // We can only modify memory when its local so only sort the chunks in + // this scenario. + view->GetLocalAddress() + ->EnsureSortedChunks(); // Avoid doing the sort while iterating. + } else { + // Out-of-process + // Sanity check + CHECK_EQ(view->GetLocalAddress()->page_size_, GetPageSize()); + } + auto num_chunks = view->GetLocalAddress()->n_chunks_; + Header **chunks_begin = view->GetLocalAddress()->chunks_; + for (uptr i = 0; i < num_chunks; ++i) { + // Compute address of pointer to Header pointer in target process. + uptr oop_ptr_to_header_ptr = + reinterpret_cast(chunks_begin) + (sizeof(Header *) * i); + + // Copy the Header pointer into the local process. + typename ObjectView::template SmallAllocTy
oop_header_ptr_view( + oop_ptr_to_header_ptr, view->GetReadContext()); + CHECK(oop_header_ptr_view.IsValid()); + Header *header_ptr = *(oop_header_ptr_view.GetLocalAddress()); + + // Compute chunk address + uptr chunk_addr = + reinterpret_cast(ThisT::GetUserOOP(header_ptr, view)); + callback(chunk_addr, arg); + + if (view->IsLocal()) { + // Consistency check: verify that the array did not change. + // NOTE: We are accessing memory in a way that is only safe locally + // here so that the check is easier to write. These need rewriting if + // we want to perform these checks in the out-of-process case. + CHECK_EQ(view->GetLocalAddress()->chunks_[i], header_ptr); + CHECK_EQ(view->GetLocalAddress()->chunks_[i]->chunk_idx, i); + } } } @@ -298,8 +342,17 @@ } void *GetUser(Header *h) { - CHECK(IsAligned((uptr)h, page_size_)); - return reinterpret_cast(reinterpret_cast(h) + page_size_); + LocalObjectView local_view(this); + return ThisT::GetUserOOP(h, &local_view); + } + + template + ALWAYS_INLINE static void *GetUserOOP(Header *h, ObjectView *view) { + static_assert(std::is_same::value, + "ObjectView has wrong type"); + auto page_size = view->GetLocalAddress()->page_size_; + CHECK(IsAligned((uptr)h, page_size)); + return reinterpret_cast(reinterpret_cast(h) + page_size); } uptr RoundUpMapSize(uptr size) { Index: lib/sanitizer_common/sanitizer_internal_defs.h =================================================================== --- lib/sanitizer_common/sanitizer_internal_defs.h +++ lib/sanitizer_common/sanitizer_internal_defs.h @@ -170,6 +170,16 @@ typedef int pid_t; #endif +// process_vm_read_handle_t is a platform specific type used by a platform to +// read memory from another process. +#if SANITIZER_MAC +typedef unsigned int task_t; +typedef task_t process_vm_read_handle_t; +#else +// Stub value for other platforms. +typedef int process_vm_read_handle_t; +#endif + #if SANITIZER_FREEBSD || SANITIZER_NETBSD || \ SANITIZER_OPENBSD || SANITIZER_MAC || \ (SANITIZER_LINUX && defined(__x86_64__)) Index: lib/sanitizer_common/sanitizer_local_object_view.h =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_local_object_view.h @@ -0,0 +1,107 @@ +//===-- sanitizer_local_object_view.h ---------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The "local" (i.e. in-process) implementation of the `ObjectView` abstraction. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_LOCAL_OBJECT_VIEW_H +#define SANITIZER_LOCAL_OBJECT_VIEW_H +#include +#include "sanitizer_internal_defs.h" + +namespace __sanitizer { + +// Forward declare. +class VMReadContext; + +template +struct ObjectViewBase { + uptr GetSize() const { return sizeof(T); } +}; + +// Provides the implementation of the `ObjectView` abstraction for efficient +// views on in-process objects of type `ObjectTy`. This implementation does not +// make copies of objects and thus does not allocate any storage for these +// copies. +// +// Generic in/out-of-process code should only use the subset of the interface +// shared between `LocalObjectView` and `RemoteObjectView`. +template +struct LocalObjectView : ObjectViewBase { + private: + uptr target_address; + + public: + using ObjectType = ObjectTy; + ALWAYS_INLINE LocalObjectView(uptr target_address, VMReadContext* ctx) + : target_address(target_address) {} + // Convenience constructor. + ALWAYS_INLINE LocalObjectView(ObjectTy* target_address) + : LocalObjectView(reinterpret_cast(target_address), nullptr) {} + ~LocalObjectView() {} + + // Forbid copying in most cases. + LocalObjectView(const LocalObjectView&) = delete; + // We implement the move constructor so that `MakeSliceView(...)` works. + LocalObjectView(const LocalObjectView&& other) + : target_address(other.target_address) {} + LocalObjectView& operator=(const LocalObjectView&) = delete; + LocalObjectView& operator=(const LocalObjectView&&) = delete; + + ALWAYS_INLINE bool IsLocal() const { return true; } + ALWAYS_INLINE bool IsValid() const { return true; } + ALWAYS_INLINE VMReadContext* GetReadContext() { return nullptr; } + + // Can't return `const ObjectTy*` because on some paths + // we may need to modify the object. + ALWAYS_INLINE ObjectTy* GetLocalAddress() const { + return reinterpret_cast(target_address); + } + ALWAYS_INLINE uptr GetTargetAddress() { return target_address; } + + // `LargeAllocTy` and `SmallAllocTy` are intended for use by generic + // in/out-of-process code to make decisions on what type of storage is + // required for an object view. + // + // For views on local objects no copy is necessary so we just provide + // `LocalObjectView` so that no storage allocations are performed. + template + using LargeAllocTy = LocalObjectView; + template + using SmallAllocTy = LocalObjectView; + + // Provides a type-safe and convenient way of creating a `LocalObjectView` of + // a field of a struct or class from an existing `LocalObjectView`. + // + // Example: + // ``` + // class Foo { + // public: + // int a_field; + // }; + // + // Foo foo_object; + // LocalObjectView foo_view(&foo_object); + // + // // Create view into the `a_field` member variable + // // using a member pointer to `a_field`. + // LocalObjectView a_field_view = foo_view.MakeSliceView(&Foo::a_field); + // ``` + template < + typename OtherObjectTy, typename Q = ObjectTy, + typename std::enable_if::value>::type* dummy = nullptr> + ALWAYS_INLINE LocalObjectView MakeSliceView( + OtherObjectTy Q::*member_ptr) const { + const OtherObjectTy* ptr = &(GetLocalAddress()->*member_ptr); + return LocalObjectView(reinterpret_cast(ptr), nullptr); + } +}; + +}; // namespace __sanitizer +#endif Index: lib/sanitizer_common/sanitizer_remote_object_view.h =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_remote_object_view.h @@ -0,0 +1,256 @@ +//===-- sanitizer_remote_object_view.h --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The "remote" (i.e. out-of-process) implementation of the `ObjectView` +// abstraction. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_REMOTE_OBJECT_VIEW_H +#define SANITIZER_REMOTE_OBJECT_VIEW_H +#include +#include // For std::move +#include "sanitizer_internal_defs.h" +#include "sanitizer_local_object_view.h" +#include "sanitizer_vm_read_context.h" + +namespace __sanitizer { + +// Implements stack-based storage for an object of type `ObjectTy`. +template +struct StackStorage { + private: + u8 storage[sizeof(ObjectTy)]; + uptr target_address = 0; + + public: + StackStorage() {} + ~StackStorage() {} + + // Prevent copying. + StackStorage(const StackStorage&) = delete; + StackStorage(const StackStorage&&) = delete; + StackStorage& operator=(const StackStorage&) = delete; + StackStorage& operator=(const StackStorage&&) = delete; + + bool DoRead(uptr target_address, VMReadContext* read_context) { + static_assert(sizeof(storage) == sizeof(ObjectTy), "Size mismatch"); + bool success = read_context->Read(target_address, sizeof(ObjectTy)); + if (!success) + return false; + this->target_address = target_address; + // Copy into our own storage. + internal_memcpy(storage, read_context->GetLocalAddress(), sizeof(storage)); + return true; + } + ObjectTy* GetLocalAddress() { return reinterpret_cast(storage); } + uptr GetTargetAddress() { return target_address; } +}; + +// Implements mmap-based storage for an object of type `ObjectTy`. +template +struct MmapStorage { + private: + void* storage_ptr = 0; + uptr target_address = 0; + uptr storage_size = 0; + + public: + MmapStorage() { + // Should we zero our storage? Mmap might guarantee this? + storage_size = RoundUpTo(sizeof(ObjectTy), GetPageSizeCached()); + CHECK_GE(storage_size, sizeof(ObjectTy)); + storage_ptr = MmapOrDie(storage_size, "MmapStorage", /*raw_report=*/false); + } + ~MmapStorage() { UnmapOrDie(storage_ptr, storage_size); } + + // Prevent copying. + MmapStorage(const MmapStorage&) = delete; + MmapStorage(const MmapStorage&&) = delete; + MmapStorage& operator=(const MmapStorage&) = delete; + MmapStorage& operator=(const MmapStorage&&) = delete; + + bool DoRead(uptr target_address, VMReadContext* read_context) { + if (!storage_ptr) + return false; + bool success = read_context->Read(target_address, sizeof(ObjectTy)); + if (!success) + return false; + this->target_address = target_address; + // Copy into our own storage. + internal_memcpy(storage_ptr, read_context->GetLocalAddress(), + sizeof(ObjectTy)); + return true; + } + ObjectTy* GetLocalAddress() { + return reinterpret_cast(storage_ptr); + } + uptr GetTargetAddress() { return target_address; } +}; + +// Forward decl +template +struct SliceStorage; + +// Provides the implementation of the `ObjectView` abstraction +// for view on out-of-process objects of type `ObjectTy`. +// +// This implementation takes a `StorageTy` template parameter +// which specifies what type of storage should be used to hold +// the copy of an object from another process. This should +// be `StackStorage`, `MmapStorage`, or `SliceStorage`. +// +// Despite being called `RemoteObjectView` it is possible to use +// this abstraction to provide a view into in-process objects. +// However this might perform allocation and copy operations. +// To avoid this it is advised to use `LocalObjectView` when +// doing in-process operations. +// +// Generic in/out-of-process code should only use the subset of the interface +// shared between `LocalObjectView` and `RemoteObjectView`. +template class StorageTy> +struct RemoteObjectView : ObjectViewBase { + template class> + friend struct RemoteObjectView; + + private: + StorageTy storage; + VMReadContext* read_context; + bool is_valid = false; + + public: + using ObjectType = ObjectTy; + RemoteObjectView(uptr target_address, VMReadContext* ctx) + : read_context(ctx) { + // Note: Storage has to perform read because the SliceStorage is fake + // storage + is_valid = storage.DoRead(target_address, ctx); + } + ~RemoteObjectView() {} + + // Forbid copying in most cases. + RemoteObjectView(const RemoteObjectView&) = delete; + // We implement the move constructor so that `MakeSliceView(...)` works. + // Calling this deliberately doesn't work when `StorageTy` is `StackStorage` + // or `MmapStorage` due to these classes having deleted move constructors. + RemoteObjectView(const RemoteObjectView&& other) + : storage(std::move(other.storage)), + read_context(other.read_context), + is_valid(other.is_valid) {} + RemoteObjectView& operator=(const RemoteObjectView&) = delete; + RemoteObjectView& operator=(const RemoteObjectView&&) = delete; + + bool IsLocal() const { return read_context->IsLocal(); } + bool IsValid() const { return is_valid; } + VMReadContext* GetReadContext() { return read_context; } + + // Can't be `const ObjectTy*` because on some paths + // we may need to modify the object. + ObjectTy* GetLocalAddress() { return storage.GetLocalAddress(); } + uptr GetTargetAddress() { return storage.GetTargetAddress(); } + + // These types are intended for use from generic in/out-of-process + // code to make decisions on the type of allocation required. + + // `SmallAllocTy` is intended for use on small objects that can + // be allocated on the stack. + // + template + using SmallAllocTy = RemoteObjectView; + // `LargeAllocTy` is intended for large objects that cannot be + // allocated on the stack. + template + using LargeAllocTy = RemoteObjectView; + + // Provides a type-safe and convenient way of creating a `RemoteObjectView` of + // a field of a struct or class from an existing `LocalObjectView`. + // + // Example: + // ``` + // class Foo { + // public: + // int a_field; + // }; + // + // uptr remote_foo_ptr = ... ; // get from somewhere + // VMReadContext read_ctx(/* config */, /* process); + // RemoteObjectView foo_view(remote_foo_ptr, &read_ctx); + // + // // Create view into the `a_field` member variable + // // using a member pointer to `a_field`. + // RemoteObjectView a_field_view = + // foo_view.MakeSliceView(&Foo::a_field); + // ``` + template < + typename OtherObjectTy, typename Q = ObjectTy, + typename std::enable_if::value>::type* dummy = nullptr> + RemoteObjectView MakeSliceView( + OtherObjectTy Q::*member_ptr) { + const OtherObjectTy* ptr = &(GetLocalAddress()->*member_ptr); + uptr slice_target_address = + GetTargetAddress() + (reinterpret_cast(ptr) - + reinterpret_cast(GetLocalAddress())); + CHECK_GE(slice_target_address, GetTargetAddress()); + RemoteObjectView sv(slice_target_address, + read_context); + // Now update with a pointer into existing storage. + sv.storage.ptr_into_slice = reinterpret_cast(ptr); + return sv; + } +}; + +// Implements fake storage that for provides a slice view into +// existing storage. +// +// Users of this storage type need to be careful that the lifetime of the +// underlying storage that the `SliceStorage` uses is a supserset of the +// `SliceStorage` lifetime. This is because `SliceStorage` does not own its own +// storage and no compile-time or run-time checks are implemented to ensure +// this. +// +// Clients should not use this type directly and instead should use the +// `RemoteObjectView::MakeSliceView(...)` method instead. +template +struct SliceStorage { + // SliceStorage doesn't really own its own storage. + // This means clients need to make sure the scope of the true + // storage out lives + template class> + friend struct RemoteObjectView; + + private: + uptr ptr_into_slice = 0; // Only RemoteObjectView should modify this. + uptr target_address = 0; + + public: + SliceStorage() {} + ~SliceStorage() {} + // Forbid copying in most cases. + SliceStorage(const SliceStorage&) = delete; + // We implement the move constructor so that + // `RemoteObjectView::MakeSliceView()` works. + SliceStorage(const SliceStorage&& other) + : ptr_into_slice(other.ptr_into_slice), + target_address(other.target_address) {} + SliceStorage& operator=(const SliceStorage&) = delete; + SliceStorage& operator=(const SliceStorage&&) = delete; + + bool DoRead(uptr target_address, VMReadContext* read_context) { + // Don't do anything. Instead let RemoteObjectView initialise + // us. + this->target_address = target_address; + return true; + } + ObjectTy* GetLocalAddress() { + return reinterpret_cast(ptr_into_slice); + } + uptr GetTargetAddress() { return target_address; } +}; +} // namespace __sanitizer + +#endif Index: lib/sanitizer_common/sanitizer_vm_read_context.h =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_vm_read_context.h @@ -0,0 +1,85 @@ +//===-- sanitizer_vm_read_context.h -----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// `VMReadContext` is a low-level abstraction the allows clients to read the +// memory of a target process. +// +// `RemoteObjectView` should be used in preference to `VMReadContext`. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_VM_READ_CONTEXT_H +#define SANITIZER_VM_READ_CONTEXT_H +#include "sanitizer_internal_defs.h" +#include "sanitizer_platform.h" + +namespace __sanitizer { + +class VMReadContext { + public: + typedef __sanitizer::process_vm_read_handle_t ProcessHandle; + + private: + uptr local_address; + uptr target_address; + u64 size; + void* config; + ProcessHandle target_process; + bool is_local; + + public: + // Out-of-process mode constructor + VMReadContext(void* config, ProcessHandle target_process); + + // In-process mode constructor + VMReadContext(); + + VMReadContext(const VMReadContext&) = delete; + + // Return the address to memory in the current process that + // is a copy of memory from the target process. + // + // Returns `nullptr` if no successful read has been performed. + void* GetLocalAddress() const { return (void*)local_address; } + + // Return the address provided to the last successful + // `Read()` call. + // + // Returns `0` if no successful read has been performed. + uptr GetTargetAddress() const { return target_address; } + + // Return the size of the memory chunk requested in the last + // successful `Read()` call. + // + // Returns `0` if no successful read has been performed. + u64 GetSize() { return size; } + + // Reset the state of `VMReadContext` to the state it was in + // after calling the `VMReadContext` constructor. + // + // The state of the memory returned by previous calls + // to `GetLocalAddress()` should be assumed to be invalid. + void Reset(); + + // Returns true iff `VMReadContext` is set up to read memory from the same + // process in which it is being called from. + bool IsLocal() const { return is_local; } + ProcessHandle GetTargetProcess() { return target_process; } + + // Read memory from a remote process. The memory will accessible at the + // address pointed to by `GetLocalAddress()`. + // + // The lifetime of the memory allocated by the `Read()` is valid until the + // next call to `Read()` or `Reset()`. + // + // Returns `true` on a sucessful read and `false` otherwise. + bool Read(uptr target_address, u64 size); +}; +} // namespace __sanitizer +#endif Index: lib/sanitizer_common/sanitizer_vm_read_context_common.cc =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_vm_read_context_common.cc @@ -0,0 +1,25 @@ +//===-- sanitizer_vm_read_context_common.cc ---------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementation of `VMReadContext` methods common to all +// platforms. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_common.h" +#include "sanitizer_platform.h" +#include "sanitizer_vm_read_context.h" + +namespace __sanitizer { + +void VMReadContext::Reset() { + local_address = 0; + target_address = 0; + size = 0; +} +} // namespace __sanitizer Index: lib/sanitizer_common/sanitizer_vm_read_context_mac.cc =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_vm_read_context_mac.cc @@ -0,0 +1,68 @@ +//===-- sanitizer_vm_read_context_mac.cc ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementation of `VMReadContext` for Apple platforms. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_platform.h" +#if SANITIZER_MAC +#include +#include "sanitizer_common.h" +#include "sanitizer_libc.h" +#include "sanitizer_vm_read_context.h" + +// Taken from +typedef kern_return_t memory_reader_t(task_t remote_task, + vm_address_t remote_address, + vm_size_t size, void** local_memory); + +namespace __sanitizer { + +VMReadContext::VMReadContext(void* config, ProcessHandle target_process) + : local_address(0), + target_address(0), + size(0), + config(config), + target_process(target_process), + is_local(false) { + if (config == nullptr || mach_task_self() == target_process) { + is_local = true; + } +} + +VMReadContext::VMReadContext() : VMReadContext(nullptr, mach_task_self()) {} + +bool VMReadContext::Read(uptr target_address, u64 size) { + if (is_local) { + // The remote is the current process, copy isn't necessary. + local_address = target_address; + this->size = size; + return true; + } + CHECK_NE(config, nullptr); + CHECK_GT(size, 0); + + memory_reader_t* native_reader = (memory_reader_t*)config; + kern_return_t result = native_reader(target_process, target_address, size, + (void**)(&(local_address))); + if (result == KERN_SUCCESS) { + this->target_address = target_address; + this->size = size; + return true; + } + Report("Failed to read %p of size %d from remote process. Error code: %d\n", + target_address, size, result); + // Reset the fields. + Reset(); + return false; +} + +} // namespace __sanitizer + +#endif Index: lib/sanitizer_common/sanitizer_vm_read_context_stubs.cc =================================================================== --- /dev/null +++ lib/sanitizer_common/sanitizer_vm_read_context_stubs.cc @@ -0,0 +1,49 @@ +//===-- sanitizer_vm_read_context_stubs.cc --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Stub implementation of `VMReadContext` for non-Apple +// platforms. +// +//===----------------------------------------------------------------------===// +#include "sanitizer_platform.h" +#if !SANITIZER_MAC +#include "sanitizer_common.h" +#include "sanitizer_vm_read_context.h" + +namespace __sanitizer { + +VMReadContext::VMReadContext(void* config, ProcessHandle target_process) + : target_address(0), + local_address(0), + size(0), + config(0), + target_process(0), + is_local(true) { + // Only in-process reading is allowed in + // stub implementation. + CHECK_EQ(config, nullptr); + CHECK_EQ(target_process, 0); +} + +VMReadContext::VMReadContext() : VMReadContext(nullptr, 0) {} + +bool VMReadContext::Read(uptr target_address, u64 size) { + // Stub implementation should only ever + // do in-process read. + CHECK_EQ(reader, nullptr); + CHECK_EQ(remote, 0); + this->local_address = target_address; + this->target_address = target_address; + this->size = size; + return true; +} + +} // namespace __sanitizer + +#endif Index: lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -14,6 +14,7 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_allocator_internal.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_remote_object_view.h" #include "sanitizer_test_utils.h" #include "sanitizer_pthread_wrappers.h" @@ -29,8 +30,6 @@ using namespace __sanitizer; -// Too slow for debug build -#if !SANITIZER_DEBUG #if SANITIZER_CAN_USE_ALLOCATOR64 #if SANITIZER_WINDOWS @@ -874,14 +873,28 @@ } std::set reported_chunks; + std::set reported_chunks_oop; + VMReadContext read_context; // In-process read interface + RemoteObjectView remote_view( + reinterpret_cast(a), &read_context); + ASSERT_TRUE(remote_view.IsValid()); a->ForceLock(); + a->ForEachChunk(IterationTestCallback, &reported_chunks); + // Test RemoteObjectView interface. + Allocator::ForEachChunkOOP(IterationTestCallback, &reported_chunks_oop, + &remote_view); + a->ForceUnlock(); + ASSERT_EQ(reported_chunks, reported_chunks_oop); + for (uptr i = 0; i < allocated.size(); i++) { // Don't use EXPECT_NE. Reporting the first mismatch is enough. ASSERT_NE(reported_chunks.find(reinterpret_cast(allocated[i])), reported_chunks.end()); + ASSERT_NE(reported_chunks_oop.find(reinterpret_cast(allocated[i])), + reported_chunks_oop.end()); } a->TestOnlyUnmap(); @@ -919,14 +932,28 @@ allocated[i] = (char *)a.Allocate(&stats, size, 1); std::set reported_chunks; + std::set reported_chunks_oop; + + VMReadContext read_context; // In-process read interface + RemoteObjectView remote_view( + reinterpret_cast(&a), &read_context); + a.ForceLock(); a.ForEachChunk(IterationTestCallback, &reported_chunks); + decltype(a)::ForEachChunkOOP(IterationTestCallback, &reported_chunks_oop, + &remote_view); a.ForceUnlock(); + ASSERT_EQ(reported_chunks, reported_chunks_oop); + + ASSERT_EQ(reported_chunks_oop.size(), reported_chunks.size()); + ASSERT_EQ(reported_chunks.size(), kNumAllocs); for (uptr i = 0; i < kNumAllocs; i++) { // Don't use EXPECT_NE. Reporting the first mismatch is enough. ASSERT_NE(reported_chunks.find(reinterpret_cast(allocated[i])), reported_chunks.end()); + ASSERT_NE(reported_chunks_oop.find(reinterpret_cast(allocated[i])), + reported_chunks_oop.end()); } for (uptr i = 0; i < kNumAllocs; i++) a.Deallocate(&stats, allocated[i]); @@ -1364,4 +1391,3 @@ EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1()); } -#endif // #if !SANITIZER_DEBUG