Index: compiler-rt/lib/lsan/lsan_allocator.h =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.h +++ compiler-rt/lib/lsan/lsan_allocator.h @@ -49,8 +49,58 @@ u32 stack_trace_id; }; -#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ - defined(__arm__) +#if defined(__aarch64__) +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +template +using ByteMapASVT = + TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; + +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __lsan::kRegionSizeLog; + using AddressSpaceView = AddressSpaceViewTy; + using ByteMap = __lsan::ByteMapASVT; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; + +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. + +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +using PrimeAlloc = SizeClassAllocator32>; +using PrimeAlloc64 = SizeClassAllocator64>; + +using AllocatorCache = SizeClassAllocatorLocalCache; +using AllocatorCache64 = SizeClassAllocatorLocalCache; + +using SecondAlloc = LargeMmapAllocator; + +using Allocator = CombinedAllocator< + PrimeAlloc, AllocatorCache, SecondAlloc, LocalAddressSpaceView>; +using Allocator64 = CombinedAllocator< + PrimeAlloc64, AllocatorCache64, SecondAlloc, LocalAddressSpaceView>; + +AllocatorCache *GetAllocatorCache(); +AllocatorCache64 *GetAllocatorCache64(); + +#elif defined(__mips64) || defined(__i386__) || defined(__arm__) static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; template @@ -96,6 +146,7 @@ using PrimaryAllocator = PrimaryAllocatorASVT; #endif +#if !defined(__aarch64__) template using AllocatorCacheASVT = SizeClassAllocatorLocalCache>; @@ -115,6 +166,7 @@ using Allocator = AllocatorASVT; AllocatorCache *GetAllocatorCache(); +#endif int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, const StackTrace &stack); Index: compiler-rt/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.cc +++ compiler-rt/lib/lsan/lsan_allocator.cc @@ -34,19 +34,46 @@ static const uptr kMaxAllowedMallocSize = 8UL << 30; #endif +#if defined(__aarch64__) +static bool useAllocator64 = true; +static Allocator64 allocator64; +#endif + static Allocator allocator; void InitializeAllocator() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); +#if defined(__aarch64__) + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr) 1 << 48) - 1)) + useAllocator64 = false; + else + useAllocator64 = true; + + if (useAllocator64) + allocator64.InitLinkerInitialized( + common_flags()->allocator_release_to_os_interval_ms); + else +#endif allocator.InitLinkerInitialized( common_flags()->allocator_release_to_os_interval_ms); } void AllocatorThreadFinish() { +#if defined(__aarch64__) + if (useAllocator64) + allocator64.SwallowCache(GetAllocatorCache64()); + else +#endif allocator.SwallowCache(GetAllocatorCache()); } static ChunkMetadata *Metadata(const void *p) { +#if defined(__aarch64__) + if (useAllocator64) + return reinterpret_cast(allocator64.GetMetaData(p)); +#endif return reinterpret_cast(allocator.GetMetaData(p)); } @@ -81,7 +108,14 @@ size = 1; if (size > kMaxAllowedMallocSize) return ReportAllocationSizeTooBig(size, stack); - void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + void *p; +#if defined(__aarch64__) + if (useAllocator64) + p = allocator64.Allocate(GetAllocatorCache64(), size, alignment); + else +#endif + p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) { SetAllocatorOutOfMemory(); if (AllocatorMayReturnNull()) @@ -89,8 +123,18 @@ ReportOutOfMemory(size, &stack); } // Do not rely on the allocator to clear the memory (it's slow). - if (cleared && allocator.FromPrimary(p)) - memset(p, 0, size); + if (cleared) { + bool fromPrimary; +#if defined(__aarch64__) + if (useAllocator64) + fromPrimary = allocator64.FromPrimary(p); + else +#endif + fromPrimary = allocator.FromPrimary(p); + + if (fromPrimary) + memset(p, 0, size); + } RegisterAllocation(stack, p, size); if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); RunMallocHooks(p, size); @@ -111,6 +155,11 @@ if (&__sanitizer_free_hook) __sanitizer_free_hook(p); RunFreeHooks(p); RegisterDeallocation(p); +#if defined(__aarch64__) + if (useAllocator64) + allocator64.Deallocate(GetAllocatorCache64(), p); + else +#endif allocator.Deallocate(GetAllocatorCache(), p); } @@ -118,17 +167,32 @@ uptr alignment) { RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { +#if defined(__aarch64__) + if (useAllocator64) + allocator64.Deallocate(GetAllocatorCache64(), p); + else +#endif allocator.Deallocate(GetAllocatorCache(), p); return ReportAllocationSizeTooBig(new_size, stack); } +#if defined(__aarch64__) + if (useAllocator64) + p = allocator64.Reallocate(GetAllocatorCache64(), p, new_size, alignment); + else +#endif p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); return p; } void GetAllocatorCacheRange(uptr *begin, uptr *end) { +#if defined(__aarch64__) + *begin = (uptr)GetAllocatorCache64(); + *end = *begin + sizeof(AllocatorCache64); +#else *begin = (uptr)GetAllocatorCache(); *end = *begin + sizeof(AllocatorCache); +#endif } uptr GetMallocUsableSize(const void *p) { @@ -214,10 +278,20 @@ ///// Interface to the common LSan module. ///// void LockAllocator() { +#if defined(__aarch64__) + if (useAllocator64) + allocator64.ForceLock(); + else +#endif allocator.ForceLock(); } void UnlockAllocator() { +#if defined(__aarch64__) + if (useAllocator64) + allocator64.ForceUnlock(); + else +#endif allocator.ForceUnlock(); } @@ -228,7 +302,14 @@ uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); - uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); + uptr chunk; +#if defined(__aarch64__) + if (useAllocator64) + chunk = reinterpret_cast(allocator64.GetBlockBeginFastLocked(p)); + else +#endif + chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); + if (!chunk) return 0; // LargeMmapAllocator considers pointers to the meta-region of a chunk to be // valid, but we don't want that. @@ -274,11 +355,23 @@ } void ForEachChunk(ForEachChunkCallback callback, void *arg) { +#if defined(__aarch64__) + if (useAllocator64) + allocator64.ForEachChunk(callback, arg); + else +#endif allocator.ForEachChunk(callback, arg); } IgnoreObjectResult IgnoreObjectLocked(const void *p) { - void *chunk = allocator.GetBlockBegin(p); + void *chunk; +#if defined(__aarch64__) + if (useAllocator64) + chunk = allocator64.GetBlockBegin(p); + else +#endif + chunk = allocator.GetBlockBegin(p); + if (!chunk || p < chunk) return kIgnoreObjectInvalid; ChunkMetadata *m = Metadata(chunk); CHECK(m); @@ -299,6 +392,11 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; +#if defined(__aarch64__) + if (useAllocator64) + allocator64.GetStats(stats); + else +#endif allocator.GetStats(stats); return stats[AllocatorStatAllocated]; } @@ -306,6 +404,11 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size() { uptr stats[AllocatorStatCount]; +#if defined(__aarch64__) + if (useAllocator64) + allocator64.GetStats(stats); + else +#endif allocator.GetStats(stats); return stats[AllocatorStatMapped]; } Index: compiler-rt/lib/lsan/lsan_linux.cc =================================================================== --- compiler-rt/lib/lsan/lsan_linux.cc +++ compiler-rt/lib/lsan/lsan_linux.cc @@ -22,6 +22,11 @@ u32 GetCurrentThread() { return current_thread_tid; } void SetCurrentThread(u32 tid) { current_thread_tid = tid; } +#if defined(__aarch64__) +static THREADLOCAL AllocatorCache64 allocator_cache64; +AllocatorCache64 *GetAllocatorCache64() { return &allocator_cache64; } +#endif + static THREADLOCAL AllocatorCache allocator_cache; AllocatorCache *GetAllocatorCache() { return &allocator_cache; }