Index: compiler-rt/lib/asan/asan_allocator.h =================================================================== --- compiler-rt/lib/asan/asan_allocator.h +++ compiler-rt/lib/asan/asan_allocator.h @@ -118,27 +118,92 @@ void OnUnmap(uptr p, uptr size) const; }; -#if SANITIZER_CAN_USE_ALLOCATOR64 -# if SANITIZER_FUCHSIA -const uptr kAllocatorSpace = ~(uptr)0; -const uptr kAllocatorSize = 0x40000000000ULL; // 4T. -typedef DefaultSizeClassMap SizeClassMap; -# elif defined(__powerpc64__) -const uptr kAllocatorSpace = ~(uptr)0; -const uptr kAllocatorSize = 0x20000000000ULL; // 2T. -typedef DefaultSizeClassMap SizeClassMap; -# elif defined(__aarch64__) && SANITIZER_ANDROID +#if defined(__aarch64__) +# if SANITIZER_ANDROID // Android needs to support 39, 42 and 48 bit VMA. const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x2000000000ULL; // 128G. typedef VeryCompactSizeClassMap SizeClassMap; -# elif defined(__aarch64__) +# else // AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA // so no need to different values for different VMA. const uptr kAllocatorSpace = 0x10000000000ULL; const uptr kAllocatorSize = 0x10000000000ULL; // 3T. typedef DefaultSizeClassMap SizeClassMap; -#elif defined(__sparc__) +# endif +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = 0; + typedef __asan::SizeClassMap SizeClassMap; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +template +using ByteMapASVT = FlatByteMap; +# elif SANITIZER_WORDSIZE == 64 +template +using ByteMapASVT = + TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; +# endif +typedef CompactSizeClassMap SizeClassMap32; +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = 16; + typedef __asan::SizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __asan::kRegionSizeLog; + using AddressSpaceView = AddressSpaceViewTy; + using ByteMap = __asan::ByteMapASVT; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; + +using PrimaryAllocator = SizeClassAllocator32>; +using PrimaryAllocator64 = SizeClassAllocator64>; + +static const uptr kNumberOfSizeClasses = SizeClassMap32::kNumClasses; +using AllocatorCache = SizeClassAllocatorLocalCache; +using AllocatorCache64 = SizeClassAllocatorLocalCache; + +using SecondaryAllocator = + LargeMmapAllocator; +using AsanAllocator = CombinedAllocator; +using AsanAllocator64 = CombinedAllocator; +struct AsanThreadLocalMallocStorage { + uptr quarantine_cache[16]; + AllocatorCache allocator_cache; + AllocatorCache64 allocator_cache64; + void CommitBack(); + private: + // These objects are allocated via mmap() and are zero-initialized. + AsanThreadLocalMallocStorage() {} +}; + +#elif SANITIZER_CAN_USE_ALLOCATOR64 +# if SANITIZER_FUCHSIA +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +typedef DefaultSizeClassMap SizeClassMap; +# elif defined(__powerpc64__) +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +typedef DefaultSizeClassMap SizeClassMap; +# elif defined(__sparc__) const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. typedef DefaultSizeClassMap SizeClassMap; @@ -194,6 +259,7 @@ using PrimaryAllocator = PrimaryAllocatorASVT; #endif // SANITIZER_CAN_USE_ALLOCATOR64 +#if !defined(__aarch64__) static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; template using AllocatorCacheASVT = @@ -220,6 +286,7 @@ // These objects are allocated via mmap() and are zero-initialized. AsanThreadLocalMallocStorage() {} }; +#endif void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, AllocType alloc_type); Index: compiler-rt/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/lib/asan/asan_allocator.cc +++ compiler-rt/lib/asan/asan_allocator.cc @@ -30,6 +30,10 @@ #include "sanitizer_common/sanitizer_quarantine.h" #include "lsan/lsan_common.h" +#if defined(__aarch64__) +static bool useAllocator64 = true; +#endif + namespace __asan { // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. @@ -48,6 +52,9 @@ return res; } +#if defined(__aarch64__) +static AsanAllocator64 &get_allocator64(); +#endif static AsanAllocator &get_allocator(); // The memory chunk allocated from the underlying allocator looks like this: @@ -112,7 +119,33 @@ struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } +#if defined(__aarch64__) uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + if (useAllocator64) + return *reinterpret_cast( + get_allocator64().GetMetaData(AllocBeg(locked_version))); + return *reinterpret_cast( + get_allocator().GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) { + if (useAllocator64) + return get_allocator64().GetBlockBeginFastLocked( + reinterpret_cast(this)); + return get_allocator().GetBlockBeginFastLocked( + reinterpret_cast(this)); + } + if (useAllocator64) + return get_allocator64().GetBlockBegin(reinterpret_cast(this)); + return get_allocator().GetBlockBegin(reinterpret_cast(this)); + } + return reinterpret_cast(Beg() - RZLog2Size(rz_log)); + } +#else + uptr UsedSize(bool locked_version = false) { if (user_requested_size != SizeClassMap::kMaxSize) return user_requested_size; return *reinterpret_cast( @@ -127,11 +160,66 @@ } return reinterpret_cast(Beg() - RZLog2Size(rz_log)); } +#endif bool AddrIsInside(uptr addr, bool locked_version = false) { return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); } }; +#if defined(__aarch64__) +struct QuarantineCallback64 { + QuarantineCallback64(AllocatorCache64 *cache, BufferedStackTrace *stack) + : cache_(cache), + stack_(stack) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + get_allocator64().Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + void *res = get_allocator64().Allocate(cache_, size, 1); + // TODO(alekseys): Consider making quarantine OOM-friendly. + if (UNLIKELY(!res)) + ReportOutOfMemory(size, stack_); + return res; + } + + void Deallocate(void *p) { + get_allocator64().Deallocate(cache_, p); + } + + private: + AllocatorCache64* const cache_; + BufferedStackTrace* const stack_; +}; + +typedef Quarantine AsanQuarantine64; +typedef AsanQuarantine64::Cache QuarantineCache64; +#endif + struct QuarantineCallback { QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack) : cache_(cache), @@ -205,6 +293,18 @@ // We can not use THREADLOCAL because it is not supported on some of the // platforms we care about (OSX 10.6, Android). // static THREADLOCAL AllocatorCache cache; +#if defined(__aarch64__) +AllocatorCache64 *GetAllocatorCache64(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator_cache64; +} + +QuarantineCache64 *GetQuarantineCache64(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache64), sizeof(ms->quarantine_cache)); + return reinterpret_cast(ms->quarantine_cache); +} +#endif AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { CHECK(ms); return &ms->allocator_cache; @@ -241,6 +341,13 @@ FIRST_32_SECOND_64(3UL << 30, 1ULL << 40); AsanAllocator allocator; +#if defined(__aarch64__) + AsanAllocator64 allocator64; + AsanQuarantine64 quarantine64; + AllocatorCache64 fallback_allocator_cache64; + QuarantineCache64 fallback_quarantine_cache64; +#endif + AsanQuarantine quarantine; StaticSpinMutex fallback_mutex; AllocatorCache fallback_allocator_cache; @@ -253,10 +360,199 @@ atomic_uint16_t max_redzone; atomic_uint8_t alloc_dealloc_mismatch; +#if defined(__aarch64__) // ------------------- Initialization ------------------------ + explicit Allocator(LinkerInitialized) + : quarantine64(LINKER_INITIALIZED), + fallback_quarantine_cache64(LINKER_INITIALIZED), + quarantine(LINKER_INITIALIZED), + fallback_quarantine_cache(LINKER_INITIALIZED) { + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr)1 << 48) - 1)) + useAllocator64 = false; + else + useAllocator64 = true; + } + void InitLinkerInitialized(s32 ms) { + if (useAllocator64) + allocator64.InitLinkerInitialized(ms); + else + allocator.InitLinkerInitialized(ms); + } + + uptr GetActuallyAllocatedSize(void *ac) { + if (useAllocator64) + return allocator64.GetActuallyAllocatedSize(ac); + return allocator.GetActuallyAllocatedSize(ac); + } + + void SetReleaseToOSIntervalMs(s32 ms) { + if (useAllocator64) + allocator64.SetReleaseToOSIntervalMs(ms); + else + allocator64.SetReleaseToOSIntervalMs(ms); + } + + s32 ReleaseToOSIntervalMs() const { + if (useAllocator64) + return allocator64.ReleaseToOSIntervalMs(); + return allocator.ReleaseToOSIntervalMs(); + } + + void ForceLock() { + if (useAllocator64) + allocator64.ForceLock(); + else + allocator.ForceLock(); + fallback_mutex.Lock(); + } + + void ForceUnlock() { + fallback_mutex.Unlock(); + if (useAllocator64) + allocator64.ForceUnlock(); + else + allocator.ForceUnlock(); + } + + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (useAllocator64) + allocator64.ForEachChunk(callback, arg); + else + allocator.ForEachChunk(callback, arg); + } + + void *Allocate(AsanThread *t, uptr needed_size) { + if (useAllocator64) { + AllocatorCache64 *cache = GetAllocatorCache64(&t->malloc_storage()); + return allocator64.Allocate(cache, needed_size, 8); + } + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + return allocator.Allocate(cache, needed_size, 8); + } + + void *Allocate(uptr needed_size) { + if (useAllocator64) { + AllocatorCache64 *cache = &fallback_allocator_cache64; + return allocator64.Allocate(cache, needed_size, 8); + } + AllocatorCache *cache = &fallback_allocator_cache; + return allocator.Allocate(cache, needed_size, 8); + } + + bool FromPrimary(void *p) { + if (useAllocator64) + return allocator64.FromPrimary(p); + return allocator.FromPrimary(p); + } + + uptr *GetMetaData(void *p) { + if (useAllocator64) + return reinterpret_cast(allocator64.GetMetaData(p)); + return reinterpret_cast(allocator.GetMetaData(p)); + } + + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { + if (useAllocator64) { + AllocatorCache64 *ac = GetAllocatorCache64(ms); + quarantine64.Drain(GetQuarantineCache64(ms), + QuarantineCallback64(ac, stack)); + allocator64.SwallowCache(ac); + } else { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); + allocator.SwallowCache(ac); + } + } + void *GetBlockBegin(uptr p) { + if (useAllocator64) + return allocator64.GetBlockBegin(reinterpret_cast(p)); + return allocator.GetBlockBegin(reinterpret_cast(p)); + } + + void *GetBlockBeginFastLocked(uptr p) { + if (useAllocator64) + return allocator64.GetBlockBeginFastLocked(reinterpret_cast(p)); + return allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + } + + void ForceReleaseToOS() { + if (useAllocator64) + allocator64.ForceReleaseToOS(); + else + allocator.ForceReleaseToOS(); + } + void PrintStats() { + if (useAllocator64) { + allocator64.PrintStats(); + quarantine64.PrintStats(); + } else { + allocator.PrintStats(); + quarantine.PrintStats(); + } + } +#else explicit Allocator(LinkerInitialized) : quarantine(LINKER_INITIALIZED), fallback_quarantine_cache(LINKER_INITIALIZED) {} + void InitLinkerInitialized(s32 ms) { + allocator.InitLinkerInitialized(ms); + } + uptr GetActuallyAllocatedSize(void *ac) { + return allocator.GetActuallyAllocatedSize(ac); + } + void SetReleaseToOSIntervalMs(s32 ms) { + allocator.SetReleaseToOSIntervalMs(ms); + } + s32 ReleaseToOSIntervalMs() const { + return allocator.ReleaseToOSIntervalMs(); + } + void ForceLock() { + allocator.ForceLock(); + fallback_mutex.Lock(); + } + void ForceUnlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); + } + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + allocator.ForEachChunk(callback, arg); + } + void *Allocate(AsanThread *t, uptr needed_size) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + return allocator.Allocate(cache, needed_size, 8); + } + void *Allocate(uptr needed_size) { + AllocatorCache *cache = &fallback_allocator_cache; + return allocator.Allocate(cache, needed_size, 8); + } + bool FromPrimary(void *p) { + return allocator.FromPrimary(p); + } + uptr *GetMetaData(void *p) { + return reinterpret_cast(allocator.GetMetaData(p)); + } + void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); + allocator.SwallowCache(ac); + } + void *GetBlockBegin(uptr p) { + return allocator.GetBlockBegin(reinterpret_cast(p)); + } + void *GetBlockBeginFastLocked(uptr p) { + return allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + } + void ForceReleaseToOS() { + allocator.ForceReleaseToOS(); + } + void PrintStats() { + allocator.PrintStats(); + quarantine.PrintStats(); + } + +#endif void CheckOptions(const AllocatorOptions &options) const { CHECK_GE(options.min_redzone, 16); @@ -278,7 +574,7 @@ void InitLinkerInitialized(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); - allocator.InitLinkerInitialized(options.release_to_os_interval_ms); + InitLinkerInitialized(options.release_to_os_interval_ms); SharedInitCode(options); } @@ -294,7 +590,7 @@ // This could be a user-facing chunk (with redzones), or some internal // housekeeping chunk, like TransferBatch. Start by assuming the former. AsanChunk *ac = GetAsanChunk((void *)chunk); - uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); + uptr allocated_size = GetActuallyAllocatedSize((void *)ac); uptr beg = ac->Beg(); uptr end = ac->Beg() + ac->UsedSize(true); uptr chunk_end = chunk + allocated_size; @@ -315,18 +611,18 @@ void ReInitialize(const AllocatorOptions &options) { SetAllocatorMayReturnNull(options.may_return_null); - allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); + SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); // Poison all existing allocation's redzones. if (CanPoisonMemory()) { - allocator.ForceLock(); - allocator.ForEachChunk( + ForceLock(); + ForEachChunk( [](uptr chunk, void *alloc) { ((Allocator *)alloc)->RePoisonChunk(chunk); }, this); - allocator.ForceUnlock(); + ForceUnlock(); } } @@ -338,7 +634,7 @@ options->may_return_null = AllocatorMayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); - options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); + options->release_to_os_interval_ms = ReleaseToOSIntervalMs(); } // -------------------- Helper methods. ------------------------- @@ -448,12 +744,10 @@ AsanThread *t = GetCurrentThread(); void *allocated; if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, needed_size, 8); + allocated = Allocate(t, needed_size); } else { SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, needed_size, 8); + Allocate(needed_size); } if (UNLIKELY(!allocated)) { SetAllocatorOutOfMemory(); @@ -467,7 +761,7 @@ // chunk. This is possible if CanPoisonMemory() was false for some // time, for example, due to flags()->start_disabled. // Anyway, poison the block before using it for anything else. - uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + uptr allocated_size = GetActuallyAllocatedSize(allocated); PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); } @@ -496,11 +790,11 @@ if (using_primary_allocator) { CHECK(size); m->user_requested_size = size; - CHECK(allocator.FromPrimary(allocated)); + CHECK(FromPrimary(allocated)); } else { - CHECK(!allocator.FromPrimary(allocated)); + CHECK(!FromPrimary(allocated)); m->user_requested_size = SizeClassMap::kMaxSize; - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); + uptr *meta = GetMetaData(allocated); meta[0] = size; meta[1] = chunk_beg; } @@ -681,7 +975,7 @@ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. - if (ptr && allocator.FromPrimary(ptr)) + if (ptr && FromPrimary(ptr)) REAL(memset)(ptr, 0, nmemb * size); return ptr; } @@ -693,19 +987,13 @@ ReportFreeNotMalloced((uptr)ptr, stack); } - void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) { - AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack)); - allocator.SwallowCache(ac); - } - // -------------------------- Chunk lookup ---------------------- // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). AsanChunk *GetAsanChunk(void *alloc_beg) { if (!alloc_beg) return nullptr; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); + if (!FromPrimary(alloc_beg)) { + uptr *meta = GetMetaData(alloc_beg); AsanChunk *m = reinterpret_cast(meta[1]); return m; } @@ -716,14 +1004,13 @@ } AsanChunk *GetAsanChunkByAddr(uptr p) { - void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); + void *alloc_beg = GetBlockBegin(p); return GetAsanChunk(alloc_beg); } // Allocator must be locked when this function is called. AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { - void *alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + void *alloc_beg = GetBlockBeginFastLocked(p); return GetAsanChunk(alloc_beg); } @@ -770,22 +1057,7 @@ stack)); } - allocator.ForceReleaseToOS(); - } - - void PrintStats() { - allocator.PrintStats(); - quarantine.PrintStats(); - } - - void ForceLock() { - allocator.ForceLock(); - fallback_mutex.Lock(); - } - - void ForceUnlock() { - fallback_mutex.Unlock(); - allocator.ForceUnlock(); + ForceReleaseToOS(); } }; @@ -794,6 +1066,11 @@ static AsanAllocator &get_allocator() { return instance.allocator; } +#if defined(__aarch64__) +static AsanAllocator64 &get_allocator64() { + return instance.allocator64; +} +#endif bool AsanChunkView::IsValid() const { return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; @@ -981,6 +1258,38 @@ // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { +#if defined(__aarch64__) +void LockAllocator() { + if (useAllocator64) + __asan::get_allocator64().ForceLock(); + else + __asan::get_allocator().ForceLock(); +} + +void UnlockAllocator() { + if (useAllocator64) + __asan::get_allocator64().ForceUnlock(); + else + __asan::get_allocator().ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + if (useAllocator64) { + *begin = (uptr)&__asan::get_allocator64(); + *end = *begin + sizeof(__asan::get_allocator64()); + } else { + *begin = (uptr)&__asan::get_allocator(); + *end = *begin + sizeof(__asan::get_allocator()); + } +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (useAllocator64) + __asan::get_allocator64().ForEachChunk(callback, arg); + else + __asan::get_allocator().ForEachChunk(callback, arg); +} +#else void LockAllocator() { __asan::get_allocator().ForceLock(); } @@ -994,6 +1303,11 @@ *end = *begin + sizeof(__asan::get_allocator()); } +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::get_allocator().ForEachChunk(callback, arg); +} +#endif + uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); @@ -1044,10 +1358,6 @@ return m->alloc_context_id; } -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - __asan::get_allocator().ForEachChunk(callback, arg); -} - IgnoreObjectResult IgnoreObjectLocked(const void *p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); Index: compiler-rt/lib/lsan/lsan_allocator.h =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.h +++ compiler-rt/lib/lsan/lsan_allocator.h @@ -49,8 +49,58 @@ u32 stack_trace_id; }; -#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ - defined(__arm__) +#if defined(__aarch64__) +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +template +using ByteMapASVT = + TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>; + +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __lsan::kRegionSizeLog; + using AddressSpaceView = AddressSpaceViewTy; + using ByteMap = __lsan::ByteMapASVT; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; + +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. + +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +using PrimeAlloc = SizeClassAllocator32>; +using PrimeAlloc64 = SizeClassAllocator64>; + +using AllocatorCache = SizeClassAllocatorLocalCache; +using AllocatorCache64 = SizeClassAllocatorLocalCache; + +using SecondAlloc = LargeMmapAllocator; + +using Allocator = CombinedAllocator< + PrimeAlloc, AllocatorCache, SecondAlloc, LocalAddressSpaceView>; +using Allocator64 = CombinedAllocator< + PrimeAlloc64, AllocatorCache64, SecondAlloc, LocalAddressSpaceView>; + +AllocatorCache *GetAllocatorCache(); +AllocatorCache64 *GetAllocatorCache64(); + +#elif defined(__mips64) || defined(__i386__) || defined(__arm__) static const uptr kRegionSizeLog = 20; static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; template @@ -96,6 +146,7 @@ using PrimaryAllocator = PrimaryAllocatorASVT; #endif +#if !defined(__aarch64__) template using AllocatorCacheASVT = SizeClassAllocatorLocalCache>; @@ -115,6 +166,7 @@ using Allocator = AllocatorASVT; AllocatorCache *GetAllocatorCache(); +#endif int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, const StackTrace &stack); Index: compiler-rt/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.cc +++ compiler-rt/lib/lsan/lsan_allocator.cc @@ -36,18 +36,166 @@ static Allocator allocator; +#if defined(__aarch64__) +static bool useAllocator64 = true; +static Allocator64 allocator64; + +static inline void InitLinkerInitialized(s32 ms) { + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr)1 << 48) - 1)) { + useAllocator64 = false; + allocator.InitLinkerInitialized(ms); + } else { + useAllocator64 = true; + allocator64.InitLinkerInitialized(ms); + } +} + +static inline void SwallowCache() { + if (useAllocator64) + allocator64.SwallowCache(GetAllocatorCache64()); + else + allocator.SwallowCache(GetAllocatorCache()); +} + +static inline ChunkMetadata *GetMetaData(const void *p) { + if (useAllocator64) + return reinterpret_cast(allocator64.GetMetaData(p)); + return reinterpret_cast(allocator.GetMetaData(p)); +} + +static inline void *Allocate(uptr size, uptr alignment) { + if (useAllocator64) + return allocator64.Allocate(GetAllocatorCache64(), size, alignment); + return allocator.Allocate(GetAllocatorCache(), size, alignment); +} + +static inline bool FromPrimary(void *p) { + if (useAllocator64) + return allocator64.FromPrimary(p); + return allocator.FromPrimary(p); +} + +static inline void deallocate(void *p) { + if (useAllocator64) + allocator64.Deallocate(GetAllocatorCache64(), p); + else + allocator.Deallocate(GetAllocatorCache(), p); +} + +static inline void *Reallocate(void *p, uptr new_size, uptr alignment) { + if (useAllocator64) + return allocator64.Reallocate(GetAllocatorCache64(), p, new_size, + alignment); + return allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + if (useAllocator64) { + *begin = (uptr)GetAllocatorCache64(); + *end = *begin + sizeof(AllocatorCache64); + } else { + *begin = (uptr)GetAllocatorCache(); + *end = *begin + sizeof(AllocatorCache); + } +} + +static inline void ForceLock() { + if (useAllocator64) + allocator64.ForceLock(); + else + allocator.ForceLock(); +} + +static inline void ForceUnlock() { + if (useAllocator64) + allocator64.ForceUnlock(); + else + allocator.ForceUnlock(); +} + +static inline uptr GetBlockBeginFastLocked(void *p) { + if (useAllocator64) + return reinterpret_cast(allocator64.GetBlockBeginFastLocked(p)); + return reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (useAllocator64) + allocator64.ForEachChunk(callback, arg); + else + allocator.ForEachChunk(callback, arg); +} + +static inline void *GetBlockBegin(const void *p) { + if (useAllocator64) + return allocator64.GetBlockBegin(p); + return allocator.GetBlockBegin(p); +} + +#else // __aarch64__ + +static inline void InitLinkerInitialized(s32 ms) { + allocator.InitLinkerInitialized(ms); +} + +static inline void SwallowCache() { + allocator.SwallowCache(GetAllocatorCache()); +} + +static inline ChunkMetadata *GetMetaData(const void *p) { + return reinterpret_cast(allocator.GetMetaData(p)); +} + +static inline void *Allocate(uptr size, uptr alignment) { + return allocator.Allocate(GetAllocatorCache(), size, alignment); +} + +static inline bool FromPrimary(void *p) { return allocator.FromPrimary(p); } + +static inline void deallocate(void *p) { + allocator.Deallocate(GetAllocatorCache(), p); +} + +static inline void *Reallocate(void *p, uptr new_size, uptr alignment) { + return allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); +} + +void GetAllocatorCacheRange(uptr *begin, uptr *end) { + *begin = (uptr)GetAllocatorCache(); + *end = *begin + sizeof(AllocatorCache); +} + +static inline void ForceLock() { allocator.ForceLock(); } + +static inline void ForceUnlock() { allocator.ForceUnlock(); } + +static inline uptr GetBlockBeginFastLocked(void *p) { + return reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + allocator.ForEachChunk(callback, arg); +} + +static inline void *GetBlockBegin(const void *p) { + return allocator.GetBlockBegin(p); +} + +#endif // __aarch64__ + void InitializeAllocator() { SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - allocator.InitLinkerInitialized( - common_flags()->allocator_release_to_os_interval_ms); + InitLinkerInitialized(common_flags()->allocator_release_to_os_interval_ms); } void AllocatorThreadFinish() { - allocator.SwallowCache(GetAllocatorCache()); + SwallowCache(); } static ChunkMetadata *Metadata(const void *p) { - return reinterpret_cast(allocator.GetMetaData(p)); + return GetMetaData(p); } static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { @@ -81,7 +229,7 @@ size = 1; if (size > kMaxAllowedMallocSize) return ReportAllocationSizeTooBig(size, stack); - void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + void *p = Allocate(size, alignment); if (UNLIKELY(!p)) { SetAllocatorOutOfMemory(); if (AllocatorMayReturnNull()) @@ -89,7 +237,7 @@ ReportOutOfMemory(size, &stack); } // Do not rely on the allocator to clear the memory (it's slow). - if (cleared && allocator.FromPrimary(p)) + if (cleared && FromPrimary(p)) memset(p, 0, size); RegisterAllocation(stack, p, size); if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); @@ -111,26 +259,21 @@ if (&__sanitizer_free_hook) __sanitizer_free_hook(p); RunFreeHooks(p); RegisterDeallocation(p); - allocator.Deallocate(GetAllocatorCache(), p); + deallocate(p); } void *Reallocate(const StackTrace &stack, void *p, uptr new_size, uptr alignment) { RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { - allocator.Deallocate(GetAllocatorCache(), p); + deallocate(p); return ReportAllocationSizeTooBig(new_size, stack); } - p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); + p = Reallocate(p, new_size, alignment); RegisterAllocation(stack, p, new_size); return p; } -void GetAllocatorCacheRange(uptr *begin, uptr *end) { - *begin = (uptr)GetAllocatorCache(); - *end = *begin + sizeof(AllocatorCache); -} - uptr GetMallocUsableSize(const void *p) { ChunkMetadata *m = Metadata(p); if (!m) return 0; @@ -214,11 +357,11 @@ ///// Interface to the common LSan module. ///// void LockAllocator() { - allocator.ForceLock(); + ForceLock(); } void UnlockAllocator() { - allocator.ForceUnlock(); + ForceUnlock(); } void GetAllocatorGlobalRange(uptr *begin, uptr *end) { @@ -228,7 +371,7 @@ uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); - uptr chunk = reinterpret_cast(allocator.GetBlockBeginFastLocked(p)); + uptr chunk = GetBlockBeginFastLocked(p); if (!chunk) return 0; // LargeMmapAllocator considers pointers to the meta-region of a chunk to be // valid, but we don't want that. @@ -273,12 +416,8 @@ return reinterpret_cast(metadata_)->stack_trace_id; } -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - allocator.ForEachChunk(callback, arg); -} - IgnoreObjectResult IgnoreObjectLocked(const void *p) { - void *chunk = allocator.GetBlockBegin(p); + void *chunk = GetBlockBegin(p); if (!chunk || p < chunk) return kIgnoreObjectInvalid; ChunkMetadata *m = Metadata(chunk); CHECK(m); @@ -296,6 +435,27 @@ using namespace __lsan; extern "C" { +#if defined(__aarch64__) +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + if (useAllocator64) + allocator64.GetStats(stats); + else + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + if (useAllocator64) + allocator64.GetStats(stats); + else + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} +#else // __aarch64__ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes() { uptr stats[AllocatorStatCount]; @@ -310,6 +470,7 @@ return stats[AllocatorStatMapped]; } +#endif // __aarch64__ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes() { return 0; } Index: compiler-rt/lib/lsan/lsan_linux.cc =================================================================== --- compiler-rt/lib/lsan/lsan_linux.cc +++ compiler-rt/lib/lsan/lsan_linux.cc @@ -22,6 +22,11 @@ u32 GetCurrentThread() { return current_thread_tid; } void SetCurrentThread(u32 tid) { current_thread_tid = tid; } +#if defined(__aarch64__) +static THREADLOCAL AllocatorCache64 allocator_cache64; +AllocatorCache64 *GetAllocatorCache64() { return &allocator_cache64; } +#endif + static THREADLOCAL AllocatorCache allocator_cache; AllocatorCache *GetAllocatorCache() { return &allocator_cache; }