Index: compiler-rt/trunk/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/asan/asan_allocator.cc +++ compiler-rt/trunk/lib/asan/asan_allocator.cc @@ -398,7 +398,7 @@ if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) - return AsanAllocator::FailureHandler::OnOOM(); + return ReturnNullOrDieOnFailure::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -433,7 +433,7 @@ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } AsanThread *t = GetCurrentThread(); @@ -446,8 +446,8 @@ AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8); } - if (!allocated) - return nullptr; + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned @@ -660,8 +660,8 @@ } void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CheckForCallocOverflow(size, nmemb)) - return AsanAllocator::FailureHandler::OnBadRequest(); + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) + return ReturnNullOrDieOnFailure::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. @@ -883,7 +883,7 @@ uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -895,7 +895,7 @@ AllocType alloc_type) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return AsanAllocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull( instance.Allocate(size, alignment, stack, alloc_type, true)); @@ -904,7 +904,7 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - AsanAllocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); Index: compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc @@ -128,7 +128,7 @@ if (size > kMaxAllowedMallocSize) { Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n", (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } HwasanThread *t = GetCurrentThread(); void *allocated; @@ -140,6 +140,8 @@ AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->state = CHUNK_ALLOCATED; Index: compiler-rt/trunk/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/lsan/lsan_allocator.cc +++ compiler-rt/trunk/lib/lsan/lsan_allocator.cc @@ -76,9 +76,11 @@ size = 1; if (size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); + if (UNLIKELY(!p)) + return ReturnNullOrDieOnFailure::OnOOM(); // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); @@ -90,7 +92,7 @@ static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); size *= nmemb; return Allocate(stack, size, 1, true); } @@ -108,7 +110,7 @@ if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(GetAllocatorCache(), p); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -129,7 +131,7 @@ void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } Index: compiler-rt/trunk/lib/msan/msan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/msan/msan_allocator.cc +++ compiler-rt/trunk/lib/msan/msan_allocator.cc @@ -141,7 +141,7 @@ if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; @@ -153,6 +153,8 @@ AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } + if (UNLIKELY(!allocated)) + return ReturnNullOrDieOnFailure::OnOOM(); Metadata *meta = reinterpret_cast(allocator.GetMetaData(allocated)); meta->requested_size = size; @@ -236,7 +238,7 @@ void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest()); return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true)); } @@ -258,7 +260,7 @@ uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; @@ -268,7 +270,7 @@ void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -276,7 +278,7 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); } @@ -284,7 +286,7 @@ int msan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - Allocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = MsanAllocate(stack, size, alignment, false); Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc @@ -140,8 +140,8 @@ if (size + sizeof(u64) < size) return nullptr; void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + return DieOnFailure::OnOOM(); ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } @@ -155,16 +155,17 @@ size = size + sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); void *p = RawInternalRealloc(addr, size, cache); - if (!p) - return nullptr; + if (UNLIKELY(!p)) + return DieOnFailure::OnOOM(); return (char*)p + sizeof(u64); } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { if (UNLIKELY(CheckForCallocOverflow(count, size))) - return InternalAllocator::FailureHandler::OnBadRequest(); + return DieOnFailure::OnBadRequest(); void *p = InternalAlloc(count * size, cache); - if (p) internal_memset(p, 0, count * size); + if (LIKELY(p)) + internal_memset(p, 0, count * size); return p; } Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,8 +24,6 @@ class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized(s32 release_to_os_interval_ms) { primary_.Init(release_to_os_interval_ms); secondary_.InitLinkerInitialized(); @@ -42,8 +40,12 @@ // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; - if (size + alignment < size) - return FailureHandler::OnBadRequest(); + if (size + alignment < size) { + Report("WARNING: %s: CombinedAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, size, alignment); + return nullptr; + } uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -62,8 +64,6 @@ res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); - if (!res) - return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast(res) & (alignment - 1), 0); return res; Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -47,7 +47,7 @@ InternalAllocatorCache; typedef CombinedAllocator + LargeMmapAllocator > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -17,12 +17,9 @@ // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template +template class LargeMmapAllocator { public: - typedef FailureHandlerT FailureHandler; - void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); } @@ -38,12 +35,16 @@ if (alignment > page_size_) map_size += alignment; // Overflow. - if (map_size < size) - return FailureHandler::OnBadRequest(); + if (map_size < size) { + Report("WARNING: %s: LargeMmapAllocator allocation overflow: " + "0x%zx bytes with 0x%zx alignment requested\n", + SanitizerToolName, map_size, alignment); + return nullptr; + } uptr map_beg = reinterpret_cast( MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); if (!map_beg) - return FailureHandler::OnOOM(); + return nullptr; CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; Index: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -444,7 +444,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator a; + LargeMmapAllocator a; a.Init(); AllocatorStats stats; stats.Init(); @@ -482,7 +482,7 @@ #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator a; + LargeMmapAllocator a; a.Init(); AllocatorStats stats; stats.Init(); @@ -565,7 +565,6 @@ typedef CombinedAllocator Allocator; - SetAllocatorMayReturnNull(true); Allocator *a = new Allocator; a->Init(kReleaseToOSIntervalNever); std::mt19937 r; @@ -579,11 +578,7 @@ EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0); EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); - - // Set to false - SetAllocatorMayReturnNull(false); - EXPECT_DEATH(a->Allocate(&cache, -1, 1), - "allocator is terminating the process"); + EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0); const uptr kNumAllocs = 100000; const uptr kNumIter = 10; @@ -893,7 +888,7 @@ } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator a; + LargeMmapAllocator a; a.Init(); AllocatorStats stats; stats.Init(); @@ -920,7 +915,7 @@ } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator a; + LargeMmapAllocator a; a.Init(); AllocatorStats stats; stats.Init(); Index: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc @@ -153,10 +153,10 @@ void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (UNLIKELY(p == 0)) - return 0; + return ReturnNullOrDieOnFailure::OnOOM(); if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); if (signal) @@ -179,7 +179,7 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { if (UNLIKELY(CheckForCallocOverflow(size, n))) - return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest()); void *p = user_alloc_internal(thr, pc, n * size); if (p) internal_memset(p, 0, n * size); @@ -224,7 +224,7 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!IsPowerOfTwo(align))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -232,7 +232,7 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, uptr sz) { if (UNLIKELY(!CheckPosixMemalignAlignment(align))) { - Allocator::FailureHandler::OnBadRequest(); + ReturnNullOrDieOnFailure::OnBadRequest(); return errno_EINVAL; } void *ptr = user_alloc_internal(thr, pc, sz, align); @@ -246,7 +246,7 @@ void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) { if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) { errno = errno_EINVAL; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); } @@ -259,7 +259,7 @@ uptr PageSize = GetPageSizeCached(); if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) { errno = errno_ENOMEM; - return Allocator::FailureHandler::OnBadRequest(); + return ReturnNullOrDieOnFailure::OnBadRequest(); } // pvalloc(0) should allocate one page. sz = sz ? RoundUpTo(sz, PageSize) : PageSize;