Index: compiler-rt/lib/lsan/CMakeLists.txt =================================================================== --- compiler-rt/lib/lsan/CMakeLists.txt +++ compiler-rt/lib/lsan/CMakeLists.txt @@ -11,6 +11,7 @@ set(LSAN_SOURCES lsan.cc lsan_allocator.cc + lsan_aarch64.cc lsan_linux.cc lsan_interceptors.cc lsan_mac.cc Index: compiler-rt/lib/lsan/lsan_aarch64.cc =================================================================== --- /dev/null +++ compiler-rt/lib/lsan/lsan_aarch64.cc @@ -0,0 +1,410 @@ +//=-- lsan_aarch64.cc ---------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of LeakSanitizer. +// See lsan_allocator.h for details. +// +//===----------------------------------------------------------------------===// + +#if defined(__aarch64__) +#include "lsan_allocator.h" + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "lsan_common.h" + +extern "C" void *memset(void *ptr, int value, uptr num); + +namespace __lsan { +static const uptr kMaxAllowedMallocSize = 4UL << 30; + +static bool useAllocator64 = true; +static Allocator64 allocator64; + +static Allocator allocator; + +void InitializeAllocator() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr) 1 << 48) - 1)) + useAllocator64 = false; + else + useAllocator64 = true; + + if (useAllocator64) + allocator64.InitLinkerInitialized( + common_flags()->allocator_release_to_os_interval_ms); + else + allocator.InitLinkerInitialized( + common_flags()->allocator_release_to_os_interval_ms); +} + +void AllocatorThreadFinish() { + if (useAllocator64) + allocator64.SwallowCache(GetAllocatorCache64()); + else + allocator.SwallowCache(GetAllocatorCache()); +} + +static ChunkMetadata *Metadata(const void *p) { + if (useAllocator64) + return reinterpret_cast(allocator64.GetMetaData(p)); + return reinterpret_cast(allocator.GetMetaData(p)); +} + +static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; + m->stack_trace_id = StackDepotPut(stack); + m->requested_size = size; + atomic_store(reinterpret_cast(m), 1, memory_order_relaxed); +} + +static void RegisterDeallocation(void *p) { + if (!p) return; + ChunkMetadata *m = Metadata(p); + CHECK(m); + atomic_store(reinterpret_cast(m), 0, memory_order_relaxed); +} + +static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { + if (AllocatorMayReturnNull()) { + Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); + return nullptr; + } + ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); +} + +void *Allocate(const StackTrace &stack, uptr size, uptr alignment, + bool cleared) { + if (size == 0) + size = 1; + if (size > kMaxAllowedMallocSize) + return ReportAllocationSizeTooBig(size, stack); + void *p; + if (useAllocator64) + p = allocator64.Allocate(GetAllocatorCache64(), size, alignment); + else + p = allocator.Allocate(GetAllocatorCache(), size, alignment); + + if (UNLIKELY(!p)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, &stack); + } + // Do not rely on the allocator to clear the memory (it's slow). + if (cleared) { + bool fromPrimary; + if (useAllocator64) + fromPrimary = allocator64.FromPrimary(p); + else + fromPrimary = allocator.FromPrimary(p); + + if (fromPrimary) + memset(p, 0, size); + } + RegisterAllocation(stack, p, size); + if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); + RunMallocHooks(p, size); + return p; +} + +static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, &stack); + } + size *= nmemb; + return Allocate(stack, size, 1, true); +} + +void Deallocate(void *p) { + if (&__sanitizer_free_hook) __sanitizer_free_hook(p); + RunFreeHooks(p); + RegisterDeallocation(p); + if (useAllocator64) + allocator64.Deallocate(GetAllocatorCache64(), p); + else + allocator.Deallocate(GetAllocatorCache(), p); +} + +void *Reallocate(const StackTrace &stack, void *p, uptr new_size, + uptr alignment) { + RegisterDeallocation(p); + if (new_size > kMaxAllowedMallocSize) { + if (useAllocator64) + allocator64.Deallocate(GetAllocatorCache64(), p); + else + allocator.Deallocate(GetAllocatorCache(), p); + return ReportAllocationSizeTooBig(new_size, stack); + } + if (useAllocator64) + p = allocator64.Reallocate(GetAllocatorCache64(), p, new_size, alignment); + else + p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); + RegisterAllocation(stack, p, new_size); + return p; +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + if (useAllocator64) { + *begin = (uptr)GetAllocatorCache64(); + *end = *begin + sizeof(AllocatorCache64); + } else { + *begin = (uptr)GetAllocatorCache(); + *end = *begin + sizeof(AllocatorCache); + } +} + +uptr GetMallocUsableSize(const void *p) { + ChunkMetadata *m = Metadata(p); + if (!m) return 0; + return m->requested_size; +} + +int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, + const StackTrace &stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by Allocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + +void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, &stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); +} + +void *lsan_malloc(uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); +} + +void lsan_free(void *p) { + Deallocate(p); +} + +void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Reallocate(stack, p, size, 1)); +} + +void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { + return SetErrnoOnNull(Calloc(nmemb, size, stack)); +} + +void *lsan_valloc(uptr size, const StackTrace &stack) { + return SetErrnoOnNull( + Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); +} + +void *lsan_pvalloc(uptr size, const StackTrace &stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); +} + +uptr lsan_mz_size(const void *p) { + return GetMallocUsableSize(p); +} + +///// Interface to the common LSan module. ///// + +void LockAllocator() { + if (useAllocator64) + allocator64.ForceLock(); + else + allocator.ForceLock(); +} + +void UnlockAllocator() { + if (useAllocator64) + allocator64.ForceUnlock(); + else + allocator.ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&allocator; + *end = *begin + sizeof(allocator); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast(p); + uptr chunk; + if (useAllocator64) + chunk = reinterpret_cast(allocator64.GetBlockBeginFastLocked(p)); + else + chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); + + if (!chunk) return 0; + // LargeMmapAllocator considers pointers to the meta-region of a chunk to be + // valid, but we don't want that. + if (addr < chunk) return 0; + ChunkMetadata *m = Metadata(reinterpret_cast(chunk)); + CHECK(m); + if (!m->allocated) + return 0; + if (addr < chunk + m->requested_size) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + return chunk; +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = Metadata(reinterpret_cast(chunk)); + CHECK(metadata_); +} + +bool LsanMetadata::allocated() const { + return reinterpret_cast(metadata_)->allocated; +} + +ChunkTag LsanMetadata::tag() const { + return reinterpret_cast(metadata_)->tag; +} + +void LsanMetadata::set_tag(ChunkTag value) { + reinterpret_cast(metadata_)->tag = value; +} + +uptr LsanMetadata::requested_size() const { + return reinterpret_cast(metadata_)->requested_size; +} + +u32 LsanMetadata::stack_trace_id() const { + return reinterpret_cast(metadata_)->stack_trace_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (useAllocator64) + allocator64.ForEachChunk(callback, arg); + else + allocator.ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + void *chunk; + if (useAllocator64) + chunk = allocator64.GetBlockBegin(p); + else + chunk = allocator.GetBlockBegin(p); + + if (!chunk || p < chunk) return kIgnoreObjectInvalid; + ChunkMetadata *m = Metadata(chunk); + CHECK(m); + if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { + if (m->tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->tag = kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +using namespace __lsan; + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + if (useAllocator64) + allocator64.GetStats(stats); + else + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + if (useAllocator64) + allocator64.GetStats(stats); + else + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_free_bytes() { return 0; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_unmapped_bytes() { return 0; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } + +SANITIZER_INTERFACE_ATTRIBUTE +int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_allocated_size(const void *p) { + return GetMallocUsableSize(p); +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_free_hook(void *ptr) { + (void)ptr; +} +#endif +} // extern "C" + +#endif // defined(__aarch64__) Index: compiler-rt/lib/lsan/lsan_allocator.h =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.h +++ compiler-rt/lib/lsan/lsan_allocator.h @@ -49,8 +49,58 @@ u32 stack_trace_id; }; -#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ - defined(__arm__) +#if defined(__aarch64__) +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +template +using ByteMapASVT = + TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; + +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __lsan::kRegionSizeLog; + using AddressSpaceView = AddressSpaceViewTy; + using ByteMap = __lsan::ByteMapASVT; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; + +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. + +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +using PrimeAlloc = SizeClassAllocator32>; +using PrimeAlloc64 = SizeClassAllocator64>; + +using AllocatorCache = SizeClassAllocatorLocalCache; +using AllocatorCache64 = SizeClassAllocatorLocalCache; + +using SecondAlloc = LargeMmapAllocator; + +using Allocator = CombinedAllocator< + PrimeAlloc, AllocatorCache, SecondAlloc, LocalAddressSpaceView>; +using Allocator64 = CombinedAllocator< + PrimeAlloc64, AllocatorCache64, SecondAlloc, LocalAddressSpaceView>; + +AllocatorCache *GetAllocatorCache(); +AllocatorCache64 *GetAllocatorCache64(); + +#elif defined(__mips64) || defined(__i386__) || defined(__arm__) static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; template @@ -96,6 +146,7 @@ using PrimaryAllocator = PrimaryAllocatorASVT; #endif +#if !defined(__aarch64__) template using AllocatorCacheASVT = SizeClassAllocatorLocalCache>; @@ -115,6 +166,7 @@ using Allocator = AllocatorASVT; AllocatorCache *GetAllocatorCache(); +#endif int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, const StackTrace &stack); Index: compiler-rt/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.cc +++ compiler-rt/lib/lsan/lsan_allocator.cc @@ -11,6 +11,9 @@ // //===----------------------------------------------------------------------===// +// AArch64 defines its allocators in lsan_aarch64.cc +#if !defined(__aarch64__) + #include "lsan_allocator.h" #include "sanitizer_common/sanitizer_allocator.h" @@ -340,3 +343,5 @@ } #endif } // extern "C" + +#endif // !defined(__aarch64__) Index: compiler-rt/lib/lsan/lsan_linux.cc =================================================================== --- compiler-rt/lib/lsan/lsan_linux.cc +++ compiler-rt/lib/lsan/lsan_linux.cc @@ -22,6 +22,11 @@ u32 GetCurrentThread() { return current_thread_tid; } void SetCurrentThread(u32 tid) { current_thread_tid = tid; } +#if defined(__aarch64__) +static THREADLOCAL AllocatorCache64 allocator_cache64; +AllocatorCache64 *GetAllocatorCache64() { return &allocator_cache64; } +#endif + static THREADLOCAL AllocatorCache allocator_cache; AllocatorCache *GetAllocatorCache() { return &allocator_cache; }