Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -144,8 +144,10 @@ CHECK_NE(class_id, 0UL); CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; - if (UNLIKELY(c->count == 0)) - Refill(allocator, class_id); + if (UNLIKELY(c->count == 0)) { + if (UNLIKELY(!Refill(allocator, class_id))) + return nullptr; + } stats_.Add(AllocatorStatAllocated, c->class_size); void *res = c->batch[--c->count]; PREFETCH(c->batch[c->count - 1]); @@ -227,14 +229,17 @@ Deallocate(allocator, batch_class_id, b); } - NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) { + NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { InitCache(); PerClass *c = &per_class_[class_id]; TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); + if (UNLIKELY(!b)) + return false; CHECK_GT(b->Count(), 0); b->CopyToArray(c->batch); c->count = b->Count(); DestroyBatch(class_id, allocator, b); + return true; } NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { @@ -244,6 +249,10 @@ uptr first_idx_to_drain = c->count - cnt; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); + // Failure to allocate a batch while releasing memory is non recoverable. + // TODO(alekseys): Figure out how to do it without allocating a new batch. + if (UNLIKELY(!b)) + DieOnFailure::OnOOM(); b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), &c->batch[first_idx_to_drain], cnt); c->count -= cnt; Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -24,7 +24,8 @@ // be returned by MmapOrDie(). // // Region: -// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize). +// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize, +// kRegionSize). // Since the regions are aligned by kRegionSize, there are exactly // kNumPossibleRegions possible regions in the address space and so we keep // a ByteMap possible_regions to store the size classes of each Region. @@ -149,8 +150,9 @@ CHECK_LT(class_id, kNumClasses); SizeClassInfo *sci = GetSizeClassInfo(class_id); SpinMutexLock l(&sci->mutex); - if (sci->free_list.empty()) - PopulateFreeList(stat, c, sci, class_id); + if (sci->free_list.empty() && + UNLIKELY(!PopulateFreeList(stat, c, sci, class_id))) + return nullptr; CHECK(!sci->free_list.empty()); TransferBatch *b = sci->free_list.front(); sci->free_list.pop_front(); @@ -277,8 +279,10 @@ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) { CHECK_LT(class_id, kNumClasses); - uptr res = reinterpret_cast(MmapAlignedOrDie(kRegionSize, kRegionSize, - "SizeClassAllocator32")); + uptr res = reinterpret_cast(MmapAlignedOrDieOnFatalError( + kRegionSize, kRegionSize, "SizeClassAllocator32")); + if (UNLIKELY(!res)) + return 0; MapUnmapCallback().OnMap(res, kRegionSize); stat->Add(AllocatorStatMapped, kRegionSize); CHECK_EQ(0U, (res & (kRegionSize - 1))); @@ -291,16 +295,20 @@ return &size_class_info_array[class_id]; } - void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, + bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c, SizeClassInfo *sci, uptr class_id) { uptr size = ClassIdToSize(class_id); uptr reg = AllocateRegion(stat, class_id); + if (UNLIKELY(!reg)) + return false; uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = TransferBatch::MaxCached(class_id); TransferBatch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { if (!b) { b = c->CreateBatch(class_id, this, (TransferBatch*)i); + if (!b) + return false; b->Clear(); } b->Add((void*)i); @@ -314,6 +322,7 @@ CHECK_GT(b->Count(), 0); sci->free_list.push_back(b); } + return true; } ByteMap possible_regions; Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h @@ -95,7 +95,9 @@ void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); void *MmapNoAccess(uptr size); // Map aligned chunk of address space; size and alignment are powers of two. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); +// Dies on all but out of memory errors, in the latter case returns nullptr. +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type); // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an // unaccessible memory. bool MprotectNoAccess(uptr addr, uptr size); Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc @@ -164,11 +164,14 @@ // We want to map a chunk of address space aligned to 'alignment'. // We do it by maping a bit more and then unmaping redundant pieces. // We probably can do it with fewer syscalls in some OS-dependent way. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); uptr map_size = size + alignment; - uptr map_res = (uptr)MmapOrDie(map_size, mem_type); + uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type); + if (!map_res) + return nullptr; uptr map_end = map_res + map_size; uptr res = map_res; if (res & (alignment - 1)) // Not aligned. Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc @@ -131,18 +131,24 @@ } } +static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type, + const char *mmap_type) { + error_t last_error = GetLastError(); + if (last_error == ERROR_NOT_ENOUGH_MEMORY) + return nullptr; + ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error); +} + void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); - if (rv == 0) { - error_t last_error = GetLastError(); - if (last_error != ERROR_NOT_ENOUGH_MEMORY) - ReportMmapFailureAndDie(size, mem_type, "allocate", last_error); - } + if (rv == 0) + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate"); return rv; } // We want to map a chunk of address space aligned to 'alignment'. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { +void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, + const char *mem_type) { CHECK(IsPowerOfTwo(size)); CHECK(IsPowerOfTwo(alignment)); @@ -152,7 +158,7 @@ uptr mapped_addr = (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (!mapped_addr) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // If we got it right on the first try, return. Otherwise, unmap it and go to // the slow path. @@ -172,8 +178,7 @@ mapped_addr = (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); if (!mapped_addr) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", - GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); // Find the aligned address. uptr aligned_addr = RoundUpTo(mapped_addr, alignment); @@ -191,7 +196,7 @@ // Fail if we can't make this work quickly. if (retries == kMaxRetries && mapped_addr == 0) - ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned"); return (void *)mapped_addr; } Index: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc +++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -72,12 +72,12 @@ EXPECT_TRUE(IsSorted(array, 2)); } -TEST(SanitizerCommon, MmapAlignedOrDie) { +TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) { uptr PageSize = GetPageSizeCached(); for (uptr size = 1; size <= 32; size *= 2) { for (uptr alignment = 1; alignment <= 32; alignment *= 2) { for (int iter = 0; iter < 100; iter++) { - uptr res = (uptr)MmapAlignedOrDie( + uptr res = (uptr)MmapAlignedOrDieOnFatalError( size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest"); EXPECT_EQ(0U, res % (alignment * PageSize)); internal_memset((void*)res, 1, size * PageSize);