diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -35,9 +35,9 @@ secondary_.InitLinkerInitialized(); } - void Init(s32 release_to_os_interval_ms) { + void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { stats_.Init(); - primary_.Init(release_to_os_interval_ms); + primary_.Init(release_to_os_interval_ms, heap_start); secondary_.Init(); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -119,7 +119,8 @@ typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocator32LocalCache AllocatorCache; - void Init(s32 release_to_os_interval_ms) { + void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { + CHECK(!heap_start); possible_regions.Init(); internal_memset(size_class_info_array, 0, sizeof(size_class_info_array)); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -69,25 +69,45 @@ return base + (static_cast(ptr32) << kCompactPtrScale); } - void Init(s32 release_to_os_interval_ms) { + // If heap_start is nonzero, assumes kSpaceSize bytes are already mapped R/W + // at heap_start and places the heap there. This mode requires kSpaceBeg == + // ~(uptr)0. + void Init(s32 release_to_os_interval_ms, uptr heap_start = 0) { uptr TotalSpaceSize = kSpaceSize + AdditionalSize(); - if (kUsingConstantSpaceBeg) { - CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); - CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize, - PrimaryAllocatorName, kSpaceBeg)); + PremappedHeap = heap_start != 0; + if (PremappedHeap) { + CHECK(!kUsingConstantSpaceBeg); + NonConstSpaceBeg = heap_start; + uptr RegionInfoSize = AdditionalSize(); + RegionInfoSpace = + address_range.Init(RegionInfoSize, PrimaryAllocatorName); + CHECK_NE(RegionInfoSpace, ~(uptr)0); + CHECK_EQ(RegionInfoSpace, + address_range.MapOrDie(RegionInfoSpace, RegionInfoSize, + "SizeClassAllocator: region info")); + MapUnmapCallback().OnMap(RegionInfoSpace, RegionInfoSize); } else { - // Combined allocator expects that an 2^N allocation is always aligned to - // 2^N. For this to work, the start of the space needs to be aligned as - // high as the largest size class (which also needs to be a power of 2). - NonConstSpaceBeg = address_range.InitAligned( - TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); - CHECK_NE(NonConstSpaceBeg, ~(uptr)0); + if (kUsingConstantSpaceBeg) { + CHECK(IsAligned(kSpaceBeg, SizeClassMap::kMaxSize)); + CHECK_EQ(kSpaceBeg, + address_range.Init(TotalSpaceSize, PrimaryAllocatorName, + kSpaceBeg)); + } else { + // Combined allocator expects that an 2^N allocation is always aligned + // to 2^N. For this to work, the start of the space needs to be aligned + // as high as the largest size class (which also needs to be a power of + // 2). + NonConstSpaceBeg = address_range.InitAligned( + TotalSpaceSize, SizeClassMap::kMaxSize, PrimaryAllocatorName); + CHECK_NE(NonConstSpaceBeg, ~(uptr)0); + } + RegionInfoSpace = SpaceEnd(); + MapWithCallbackOrDie(RegionInfoSpace, AdditionalSize(), + "SizeClassAllocator: region info"); } SetReleaseToOSIntervalMs(release_to_os_interval_ms); - MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(), - "SizeClassAllocator: region info"); // Check that the RegionInfo array is aligned on the CacheLine size. - DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0); + DCHECK_EQ(RegionInfoSpace % kCacheLineSize, 0); } s32 ReleaseToOSIntervalMs() const { @@ -596,6 +616,11 @@ atomic_sint32_t release_to_os_interval_ms_; + uptr RegionInfoSpace; + + // True if the user has already mapped the entire heap R/W. + bool PremappedHeap; + struct Stats { uptr n_allocated; uptr n_freed; @@ -625,7 +650,7 @@ RegionInfo *GetRegionInfo(uptr class_id) const { DCHECK_LT(class_id, kNumClasses); - RegionInfo *regions = reinterpret_cast(SpaceEnd()); + RegionInfo *regions = reinterpret_cast(RegionInfoSpace); return ®ions[class_id]; } @@ -650,6 +675,9 @@ } bool MapWithCallback(uptr beg, uptr size, const char *name) { + if (PremappedHeap) + return beg >= NonConstSpaceBeg && + beg + size <= NonConstSpaceBeg + kSpaceSize; uptr mapped = address_range.Map(beg, size, name); if (UNLIKELY(!mapped)) return false; @@ -659,11 +687,18 @@ } void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) { + if (PremappedHeap) { + CHECK_GE(beg, NonConstSpaceBeg); + CHECK_LE(beg + size, NonConstSpaceBeg + kSpaceSize); + return; + } CHECK_EQ(beg, address_range.MapOrDie(beg, size, name)); MapUnmapCallback().OnMap(beg, size); } void UnmapWithCallbackOrDie(uptr beg, uptr size) { + if (PremappedHeap) + return; MapUnmapCallback().OnUnmap(beg, size); address_range.Unmap(beg, size); } @@ -832,6 +867,9 @@ // Attempts to release RAM occupied by freed chunks back to OS. The region is // expected to be locked. + // + // TODO(morehouse): Support a callback on memory release so HWASan can release + // aliases as well. void MaybeReleaseToOS(uptr class_id, bool force) { RegionInfo *region = GetRegionInfo(class_id); const uptr chunk_size = ClassIdToSize(class_id); diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp --- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp +++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp @@ -196,9 +196,9 @@ } template -void TestSizeClassAllocator() { +void TestSizeClassAllocator(uptr premapped_heap = 0) { Allocator *a = new Allocator; - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); typename Allocator::AllocatorCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -265,6 +265,25 @@ } #if SANITIZER_CAN_USE_ALLOCATOR64 + +// Allocates kAllocatorSize aligned bytes on construction and frees it on +// destruction. +class ScopedPremappedHeap { + public: + ScopedPremappedHeap() { + BasePtr = MmapNoReserveOrDie(2 * kAllocatorSize, "preallocated heap"); + AlignedAddr = RoundUpTo(reinterpret_cast(BasePtr), kAllocatorSize); + } + + ~ScopedPremappedHeap() { UnmapOrDie(BasePtr, kAllocatorSize); } + + uptr Addr() { return AlignedAddr; } + + private: + void *BasePtr; + uptr AlignedAddr; +}; + // These tests can fail on Windows if memory is somewhat full and lit happens // to run them all at the same time. FIXME: Make them not flaky and reenable. #if !SANITIZER_WINDOWS @@ -277,6 +296,13 @@ } #if !SANITIZER_ANDROID +// Android only has 39-bit address space, so mapping 2 * kAllocatorSize +// sometimes fails. +TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) { + ScopedPremappedHeap h; + TestSizeClassAllocator(h.Addr()); +} + //FIXME(kostyak): find values so that those work on Android as well. TEST(SanitizerCommon, SizeClassAllocator64Compact) { TestSizeClassAllocator(); @@ -320,9 +346,9 @@ } template -void SizeClassAllocatorMetadataStress() { +void SizeClassAllocatorMetadataStress(uptr premapped_heap = 0) { Allocator *a = new Allocator; - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); typename Allocator::AllocatorCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -362,6 +388,11 @@ } #if !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) { + ScopedPremappedHeap h; + SizeClassAllocatorMetadataStress(h.Addr()); +} + TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) { SizeClassAllocatorMetadataStress(); } @@ -374,9 +405,10 @@ } template -void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) { +void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize, + uptr premapped_heap = 0) { Allocator *a = new Allocator; - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); typename Allocator::AllocatorCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -409,6 +441,11 @@ 1ULL << (SANITIZER_ANDROID ? 31 : 33)); } #if !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) { + ScopedPremappedHeap h; + SizeClassAllocatorGetBlockBeginStress( + 1ULL << (SANITIZER_ANDROID ? 31 : 33), h.Addr()); +} TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) { SizeClassAllocatorGetBlockBeginStress(1ULL << 33); } @@ -624,10 +661,10 @@ } template -void TestCombinedAllocator() { +void TestCombinedAllocator(uptr premapped_heap = 0) { typedef CombinedAllocator Allocator; Allocator *a = new Allocator; - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); std::mt19937 r; typename Allocator::AllocatorCache cache; @@ -699,6 +736,14 @@ } #if !SANITIZER_ANDROID +#if !SANITIZER_WINDOWS +// Windows fails to map 1TB, so disable this test. +TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) { + ScopedPremappedHeap h; + TestCombinedAllocator(h.Addr()); +} +#endif + TEST(SanitizerCommon, CombinedAllocator64Compact) { TestCombinedAllocator(); } @@ -714,12 +759,12 @@ } template -void TestSizeClassAllocatorLocalCache() { +void TestSizeClassAllocatorLocalCache(uptr premapped_heap = 0) { using AllocatorCache = typename Allocator::AllocatorCache; AllocatorCache cache; Allocator *a = new Allocator(); - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -760,6 +805,11 @@ } #if !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) { + ScopedPremappedHeap h; + TestSizeClassAllocatorLocalCache(h.Addr()); +} + TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) { TestSizeClassAllocatorLocalCache(); } @@ -891,9 +941,9 @@ } template -void TestSizeClassAllocatorIteration() { +void TestSizeClassAllocatorIteration(uptr premapped_heap = 0) { Allocator *a = new Allocator; - a->Init(kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever, premapped_heap); typename Allocator::AllocatorCache cache; memset(&cache, 0, sizeof(cache)); cache.Init(0); @@ -942,6 +992,12 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) { TestSizeClassAllocatorIteration(); } +#if !SANITIZER_ANDROID +TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) { + ScopedPremappedHeap h; + TestSizeClassAllocatorIteration(h.Addr()); +} +#endif #endif #endif