Index: compiler-rt/lib/asan/asan_allocator.h =================================================================== --- compiler-rt/lib/asan/asan_allocator.h +++ compiler-rt/lib/asan/asan_allocator.h @@ -118,39 +118,244 @@ void OnUnmap(uptr p, uptr size) const; }; -#if SANITIZER_CAN_USE_ALLOCATOR64 +#if defined(__aarch64__) +// AArch64 supports 39, 42 and 48-bit VMA. +const uptr kAllocatorSpace = ~(uptr)0; +# if SANITIZER_ANDROID +const uptr kAllocatorSize = 0x2000000000ULL; // 128G. +typedef VeryCompactSizeClassMap SizeClassMap64; +# else +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +typedef DefaultSizeClassMap SizeClassMap64; +# endif + +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = 0; + typedef __asan::SizeClassMap64 SizeClassMap; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; +template +using Allocator64ASVT = SizeClassAllocator64>; +using Allocator64 = Allocator64ASVT; + +typedef CompactSizeClassMap SizeClassMap32; +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = 16; + typedef __asan::SizeClassMap32 SizeClassMap; + static const uptr kRegionSizeLog = 20; + using AddressSpaceView = AddressSpaceViewTy; + typedef AsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; +template +using Allocator32ASVT = SizeClassAllocator32>; +using Allocator32 = Allocator32ASVT; + +extern bool UseAllocator32; + +static inline uptr getKNumClasses() { + if (UseAllocator32) + return SizeClassMap32::kNumClasses; + return SizeClassMap64::kNumClasses; +} + +static inline uptr getKMaxSize() { + if (UseAllocator32) + return SizeClassMap32::kMaxSize; + return SizeClassMap64::kMaxSize; +} + +static inline uptr getClassID(uptr size) { + if (UseAllocator32) + return SizeClassMap32::ClassID(size); + return SizeClassMap64::ClassID(size); +} + +static const uptr kMaxNumberOfSizeClasses = + SizeClassMap32::kNumClasses < SizeClassMap64::kNumClasses + ? SizeClassMap64::kNumClasses + : SizeClassMap32::kNumClasses; + + +template +class DoubleAllocator { + A32 a32; + A64 a64; + + public: + class DoubleAllocatorCache { + typename A32::AllocatorCache a32; + typename A64::AllocatorCache a64; + + public: + void Init(AllocatorGlobalStats *s) { + if (UseAllocator32) + a32.Init(s); + else + a64.Init(s); + } + void *Allocate(DoubleAllocator *allocator, uptr class_id) { + if (UseAllocator32) + return a32.Allocate(&allocator->a32, class_id); + return a64.Allocate(&allocator->a64, class_id); + } + + void Deallocate(DoubleAllocator *allocator, uptr class_id, void *p) { + if (UseAllocator32) + a32.Deallocate(&allocator->a32, class_id, p); + else + a64.Deallocate(&allocator->a64, class_id, p); + } + + void Drain(DoubleAllocator *allocator) { + if (UseAllocator32) + a32.Drain(&allocator->a32); + else + a64.Drain(&allocator->a64); + } + + void Destroy(DoubleAllocator *allocator, AllocatorGlobalStats *s) { + if (UseAllocator32) + a32.Destroy(&allocator->a32, s); + else + a64.Destroy(&allocator->a64, s); + } + }; + + using MapUnmapCallback = Allocator32::MapUnmapCallback; + using AddressSpaceView = Allocator32::AddressSpaceView; + using AllocatorCache = DoubleAllocatorCache; + + void Init(s32 release_to_os_interval_ms) { + if (UseAllocator32) + a32.Init(release_to_os_interval_ms); + else + a64.Init(release_to_os_interval_ms); + } + + static bool CanAllocate(uptr size, uptr alignment) { + if (UseAllocator32) + return A32::CanAllocate(size, alignment); + return A64::CanAllocate(size, alignment); + } + + static uptr ClassID(uptr size) { + if (UseAllocator32) + return A32::ClassID(size); + return A64::ClassID(size); + } + + bool PointerIsMine(const void *p) { + if (UseAllocator32) + return a32.PointerIsMine(p); + return a64.PointerIsMine(p); + } + + void *GetMetaData(const void *p) { + if (UseAllocator32) + return a32.GetMetaData(p); + return a64.GetMetaData(p); + } + + uptr GetSizeClass(const void *p) { + if (UseAllocator32) + return a32.GetSizeClass(p); + return a64.GetSizeClass(p); + } + + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (UseAllocator32) + a32.ForEachChunk(callback, arg); + else + a64.ForEachChunk(callback, arg); + } + + void TestOnlyUnmap() { + if (UseAllocator32) + a32.TestOnlyUnmap(); + else + a64.TestOnlyUnmap(); + } + void ForceLock() { + if (UseAllocator32) + a32.ForceLock(); + else + a64.ForceLock(); + } + void ForceUnlock() { + if (UseAllocator32) + a32.ForceUnlock(); + else + a64.ForceUnlock(); + } + void *GetBlockBegin(const void *p) { + if (UseAllocator32) + return a32.GetBlockBegin(p); + return a64.GetBlockBegin(p); + } + uptr GetActuallyAllocatedSize(void *p) { + if (UseAllocator32) + return a32.GetActuallyAllocatedSize(p); + return a64.GetActuallyAllocatedSize(p); + } + void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { + if (UseAllocator32) + a32.SetReleaseToOSIntervalMs(release_to_os_interval_ms); + else + a64.SetReleaseToOSIntervalMs(release_to_os_interval_ms); + } + s32 ReleaseToOSIntervalMs() const { + if (UseAllocator32) + return a32.ReleaseToOSIntervalMs(); + return a64.ReleaseToOSIntervalMs(); + } + void ForceReleaseToOS() { + if (UseAllocator32) + a32.ForceReleaseToOS(); + else + a64.ForceReleaseToOS(); + } + void PrintStats() { + if (UseAllocator32) + a32.PrintStats(); + else + a64.PrintStats(); + } +}; + +template +using PrimaryAllocatorASVT = DoubleAllocator, + Allocator64ASVT>; +#elif SANITIZER_CAN_USE_ALLOCATOR64 # if SANITIZER_FUCHSIA const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. -typedef DefaultSizeClassMap SizeClassMap; # elif defined(__powerpc64__) const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. -typedef DefaultSizeClassMap SizeClassMap; -# elif defined(__aarch64__) && SANITIZER_ANDROID -// Android needs to support 39, 42 and 48 bit VMA. -const uptr kAllocatorSpace = ~(uptr)0; -const uptr kAllocatorSize = 0x2000000000ULL; // 128G. -typedef VeryCompactSizeClassMap SizeClassMap; -# elif defined(__aarch64__) -// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA -// so no need to different values for different VMA. -const uptr kAllocatorSpace = 0x10000000000ULL; -const uptr kAllocatorSize = 0x10000000000ULL; // 3T. -typedef DefaultSizeClassMap SizeClassMap; -#elif defined(__sparc__) +# elif defined(__sparc__) const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. -typedef DefaultSizeClassMap SizeClassMap; # elif SANITIZER_WINDOWS const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x8000000000ULL; // 500G -typedef DefaultSizeClassMap SizeClassMap; # else const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. -typedef DefaultSizeClassMap SizeClassMap; # endif +typedef DefaultSizeClassMap SizeClassMap; +static inline uptr getKNumClasses() { return SizeClassMap::kNumClasses; } +static inline uptr getKMaxSize() { return SizeClassMap::kMaxSize; } +static inline uptr getClassID(uptr size) { return SizeClassMap::ClassID(size); } +static const uptr kMaxNumberOfSizeClasses = SizeClassMap::kNumClasses; template struct AP64 { // Allocator64 parameters. Deliberately using a short name. static const uptr kSpaceBeg = kAllocatorSpace; @@ -164,9 +369,12 @@ template using PrimaryAllocatorASVT = SizeClassAllocator64>; -using PrimaryAllocator = PrimaryAllocatorASVT; #else // Fallback to SizeClassAllocator32. typedef CompactSizeClassMap SizeClassMap; +static inline uptr getKNumClasses() { return SizeClassMap::kNumClasses; } +static inline uptr getKMaxSize() { return SizeClassMap::kMaxSize; } +static inline uptr getClassID(uptr size) { return SizeClassMap::ClassID(size); } +static const uptr kMaxNumberOfSizeClasses = SizeClassMap::kNumClasses; template struct AP32 { static const uptr kSpaceBeg = 0; @@ -180,16 +388,14 @@ }; template using PrimaryAllocatorASVT = SizeClassAllocator32 >; -using PrimaryAllocator = PrimaryAllocatorASVT; #endif // SANITIZER_CAN_USE_ALLOCATOR64 -static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; - template using AsanAllocatorASVT = CombinedAllocator>; using AsanAllocator = AsanAllocatorASVT; using AllocatorCache = AsanAllocator::AllocatorCache; +using PrimaryAllocator = PrimaryAllocatorASVT; struct AsanThreadLocalMallocStorage { uptr quarantine_cache[16]; Index: compiler-rt/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/lib/asan/asan_allocator.cc +++ compiler-rt/lib/asan/asan_allocator.cc @@ -31,6 +31,7 @@ #include "lsan/lsan_common.h" namespace __asan { +bool UseAllocator32 = false; // Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. // We use adaptive redzones: for larger allocation larger redzones are used. @@ -113,7 +114,7 @@ struct AsanChunk: ChunkBase { uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) + if (user_requested_size != getKMaxSize()) return user_requested_size; return *reinterpret_cast( get_allocator().GetMetaData(AllocBeg(locked_version))); @@ -277,6 +278,12 @@ } void InitLinkerInitialized(const AllocatorOptions &options) { + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr)1ULL << 48) - 1)) + UseAllocator32 = true; + else + UseAllocator32 = false; SetAllocatorMayReturnNull(options.may_return_null); allocator.InitLinkerInitialized(options.release_to_os_interval_ms); SharedInitCode(options); @@ -499,7 +506,7 @@ CHECK(allocator.FromPrimary(allocated)); } else { CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; + m->user_requested_size = getKMaxSize(); uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta[0] = size; meta[1] = chunk_beg; @@ -524,10 +531,10 @@ thread_stats.mallocs++; thread_stats.malloced += size; thread_stats.malloced_redzones += needed_size - size; - if (needed_size > SizeClassMap::kMaxSize) + if (needed_size > getKMaxSize()) thread_stats.malloc_large++; else - thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; + thread_stats.malloced_by_size[getClassID(needed_size)]++; void *res = reinterpret_cast(user_beg); if (can_fill && fl.max_malloc_fill_size) { Index: compiler-rt/lib/asan/asan_stats.h =================================================================== --- compiler-rt/lib/asan/asan_stats.h +++ compiler-rt/lib/asan/asan_stats.h @@ -38,7 +38,7 @@ uptr munmaps; uptr munmaped; uptr malloc_large; - uptr malloced_by_size[kNumberOfSizeClasses]; + uptr malloced_by_size[kMaxNumberOfSizeClasses]; // Ctor for global AsanStats (accumulated stats for dead threads). explicit AsanStats(LinkerInitialized) { } Index: compiler-rt/lib/asan/asan_stats.cc =================================================================== --- compiler-rt/lib/asan/asan_stats.cc +++ compiler-rt/lib/asan/asan_stats.cc @@ -30,9 +30,9 @@ } static void PrintMallocStatsArray(const char *prefix, - uptr (&array)[kNumberOfSizeClasses]) { + uptr *array) { Printf("%s", prefix); - for (uptr i = 0; i < kNumberOfSizeClasses; i++) { + for (uptr i = 0; i < getKNumClasses(); i++) { if (!array[i]) continue; Printf("%zu:%zu; ", i, array[i]); } Index: compiler-rt/lib/lsan/lsan_allocator.h =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.h +++ compiler-rt/lib/lsan/lsan_allocator.h @@ -49,8 +49,171 @@ u32 stack_trace_id; }; -#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ - defined(__arm__) +#if defined(__aarch64__) + +template +struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = 20; + using AddressSpaceView = AddressSpaceViewTy; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; +}; + +const uptr kAllocatorSpace = 0x600000000000ULL; +const uptr kAllocatorSize = 0x40000000000ULL; // 4T. + +template +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = kAllocatorSize; + static const uptr kMetadataSize = sizeof(ChunkMetadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = AddressSpaceViewTy; +}; + +template +using Allocator32ASVT = SizeClassAllocator32>; +template +using Allocator64ASVT = SizeClassAllocator64>; + +using Allocator32 = Allocator32ASVT; +using Allocator64 = Allocator64ASVT; + +extern bool UseAllocator32; + +template +class DoubleAllocator { + A32 a32; + A64 a64; + + public: + class DoubleAllocatorCache { + typename A32::AllocatorCache a32; + typename A64::AllocatorCache a64; + + public: + void Init(AllocatorGlobalStats *s) { + if (UseAllocator32) + a32.Init(s); + else + a64.Init(s); + } + void *Allocate(DoubleAllocator *allocator, uptr class_id) { + if (UseAllocator32) + return a32.Allocate(&allocator->a32, class_id); + return a64.Allocate(&allocator->a64, class_id); + } + + void Deallocate(DoubleAllocator *allocator, uptr class_id, void *p) { + if (UseAllocator32) + a32.Deallocate(&allocator->a32, class_id, p); + else + a64.Deallocate(&allocator->a64, class_id, p); + } + + void Drain(DoubleAllocator *allocator) { + if (UseAllocator32) + a32.Drain(&allocator->a32); + else + a64.Drain(&allocator->a64); + } + + void Destroy(DoubleAllocator *allocator, AllocatorGlobalStats *s) { + if (UseAllocator32) + a32.Destroy(&allocator->a32, s); + else + a64.Destroy(&allocator->a64, s); + } + }; + + using MapUnmapCallback = Allocator32::MapUnmapCallback; + using AddressSpaceView = Allocator32::AddressSpaceView; + using AllocatorCache = DoubleAllocatorCache; + + void Init(s32 release_to_os_interval_ms) { + if (UseAllocator32) + a32.Init(release_to_os_interval_ms); + else + a64.Init(release_to_os_interval_ms); + } + + static bool CanAllocate(uptr size, uptr alignment) { + if (UseAllocator32) + return A32::CanAllocate(size, alignment); + return A64::CanAllocate(size, alignment); + } + + static uptr ClassID(uptr size) { + if (UseAllocator32) + return A32::ClassID(size); + return A64::ClassID(size); + } + + bool PointerIsMine(const void *p) { + if (UseAllocator32) + return a32.PointerIsMine(p); + return a64.PointerIsMine(p); + } + + void *GetMetaData(const void *p) { + if (UseAllocator32) + return a32.GetMetaData(p); + return a64.GetMetaData(p); + } + + uptr GetSizeClass(const void *p) { + if (UseAllocator32) + return a32.GetSizeClass(p); + return a64.GetSizeClass(p); + } + + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (UseAllocator32) + a32.ForEachChunk(callback, arg); + else + a64.ForEachChunk(callback, arg); + } + + void TestOnlyUnmap() { + if (UseAllocator32) + a32.TestOnlyUnmap(); + else + a64.TestOnlyUnmap(); + } + void ForceLock() { + if (UseAllocator32) + a32.ForceLock(); + else + a64.ForceLock(); + } + void ForceUnlock() { + if (UseAllocator32) + a32.ForceUnlock(); + else + a64.ForceUnlock(); + } + void *GetBlockBegin(const void *p) { + if (UseAllocator32) + return a32.GetBlockBegin(p); + return a64.GetBlockBegin(p); + } + uptr GetActuallyAllocatedSize(void *p) { + if (UseAllocator32) + return a32.GetActuallyAllocatedSize(p); + return a64.GetActuallyAllocatedSize(p); + } +}; + +template +using PrimaryAllocatorASVT = DoubleAllocator, + Allocator64ASVT>; +#elif defined(__mips64) || defined(__i386__) || defined(__arm__) template struct AP32 { static const uptr kSpaceBeg = 0; @@ -64,7 +227,6 @@ }; template using PrimaryAllocatorASVT = SizeClassAllocator32>; -using PrimaryAllocator = PrimaryAllocatorASVT; #elif defined(__x86_64__) || defined(__powerpc64__) # if defined(__powerpc64__) const uptr kAllocatorSpace = 0xa0000000000ULL; @@ -86,13 +248,13 @@ template using PrimaryAllocatorASVT = SizeClassAllocator64>; -using PrimaryAllocator = PrimaryAllocatorASVT; #endif template using AllocatorASVT = CombinedAllocator>; using Allocator = AllocatorASVT; using AllocatorCache = Allocator::AllocatorCache; +using PrimaryAllocator = PrimaryAllocatorASVT; Allocator::AllocatorCache *GetAllocatorCache(); Index: compiler-rt/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/lib/lsan/lsan_allocator.cc +++ compiler-rt/lib/lsan/lsan_allocator.cc @@ -35,8 +35,16 @@ #endif static Allocator allocator; +bool UseAllocator32 = false; void InitializeAllocator() { + // Check whether the address space is 47-bit, that is large enough for the + // 64-bit allocator. + if (GetMaxVirtualAddress() < (((uptr)1 << 48) - 1)) + UseAllocator32 = true; + else + UseAllocator32 = false; + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator.InitLinkerInitialized( common_flags()->allocator_release_to_os_interval_ms); Index: compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -690,6 +690,123 @@ TestCombinedAllocator(); } +static bool use1 = false; + +template +class DoubleAllocator { + A1 a1; + A2 a2; + + public: + class DoubleAllocatorCache { + typename A1::AllocatorCache a1; + typename A2::AllocatorCache a2; + + public: + void Init(AllocatorGlobalStats *s) { + if (use1) + return a1.Init(s); + else + return a2.Init(s); + } + void *Allocate(DoubleAllocator *allocator, uptr class_id) { + if (use1) + return a1.Allocate(&allocator->a1, class_id); + else + return a2.Allocate(&allocator->a2, class_id); + } + + void Deallocate(DoubleAllocator *allocator, uptr class_id, void *p) { + if (use1) + return a1.Deallocate(&allocator->a1, class_id, p); + else + return a2.Deallocate(&allocator->a2, class_id, p); + } + + void Drain(DoubleAllocator *allocator) { + if (use1) + return a1.Drain(&allocator->a1); + else + return a2.Drain(&allocator->a2); + } + + void Destroy(DoubleAllocator *allocator, AllocatorGlobalStats *s) { + if (use1) + return a1.Destroy(&allocator->a1, s); + else + return a2.Destroy(&allocator->a2, s); + } + }; + + using MapUnmapCallback = Allocator32Compact::MapUnmapCallback; + using AddressSpaceView = Allocator32Compact::AddressSpaceView; + using AllocatorCache = DoubleAllocatorCache; + + void Init(s32 release_to_os_interval_ms) { + if (use1) + return a1.Init(release_to_os_interval_ms); + else + return a2.Init(release_to_os_interval_ms); + } + + static bool CanAllocate(uptr size, uptr alignment) { + if (use1) + return A1::CanAllocate(size, alignment); + else + return A2::CanAllocate(size, alignment); + } + + static uptr ClassID(uptr size) { + if (use1) + return A1::ClassID(size); + else + return A2::ClassID(size); + } + + bool PointerIsMine(const void *p) { + if (use1) + return a1.PointerIsMine(p); + else + return a2.PointerIsMine(p); + } + + void *GetMetaData(const void *p) { + if (use1) + return a1.GetMetaData(p); + else + return a2.GetMetaData(p); + } + + uptr GetSizeClass(const void *p) { + if (use1) + return a1.GetSizeClass(p); + else + return a2.GetSizeClass(p); + } + + void ForEachChunk(ForEachChunkCallback callback, void *arg) { + if (use1) + return a1.ForEachChunk(callback, arg); + else + return a2.ForEachChunk(callback, arg); + } + + void TestOnlyUnmap() { + if (use1) + return a1.TestOnlyUnmap(); + else + return a2.TestOnlyUnmap(); + } +}; + +TEST(SanitizerCommon, CombinedDoubleAllocator) { + TestCombinedAllocator< + DoubleAllocator>(); + use1 = true; + TestCombinedAllocator< + DoubleAllocator>(); +} + #if !SANITIZER_ANDROID TEST(SanitizerCommon, CombinedAllocator64Compact) { TestCombinedAllocator();