Index: compiler-rt/trunk/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/asan/asan_allocator.cc +++ compiler-rt/trunk/lib/asan/asan_allocator.cc @@ -266,7 +266,8 @@ } void Initialize(const AllocatorOptions &options) { - allocator.Init(options.may_return_null, options.release_to_os_interval_ms); + SetAllocatorMayReturnNull(options.may_return_null); + allocator.Init(options.release_to_os_interval_ms); SharedInitCode(options); } @@ -302,7 +303,7 @@ } void ReInitialize(const AllocatorOptions &options) { - allocator.SetMayReturnNull(options.may_return_null); + SetAllocatorMayReturnNull(options.may_return_null); allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms); SharedInitCode(options); @@ -323,7 +324,7 @@ options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10; options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); - options->may_return_null = allocator.MayReturnNull(); + options->may_return_null = AllocatorMayReturnNull(); options->alloc_dealloc_mismatch = atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs(); @@ -374,7 +375,7 @@ if (UNLIKELY(!asan_inited)) AsanInitFromRtl(); if (RssLimitExceeded()) - return allocator.ReturnNullOrDieOnOOM(); + return AsanAllocator::FailureHandler::OnOOM(); Flags &fl = *flags(); CHECK(stack); const uptr min_alignment = SHADOW_GRANULARITY; @@ -407,7 +408,7 @@ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", (void*)size); - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); } AsanThread *t = GetCurrentThread(); @@ -420,8 +421,8 @@ AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, needed_size, 8); } - - if (!allocated) return allocator.ReturnNullOrDieOnOOM(); + if (!allocated) + return nullptr; if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { // Heap poisoning is enabled, but the allocator provides an unpoisoned @@ -632,7 +633,7 @@ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDieOnBadRequest(); + return AsanAllocator::FailureHandler::OnBadRequest(); void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); // If the memory comes from the secondary allocator no need to clear it // as it comes directly from mmap. Index: compiler-rt/trunk/lib/lsan/lsan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/lsan/lsan_allocator.cc +++ compiler-rt/trunk/lib/lsan/lsan_allocator.cc @@ -38,8 +38,8 @@ static Allocator allocator; void InitializeAllocator() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); allocator.InitLinkerInitialized( - common_flags()->allocator_may_return_null, common_flags()->allocator_release_to_os_interval_ms); } Index: compiler-rt/trunk/lib/msan/msan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/msan/msan_allocator.cc +++ compiler-rt/trunk/lib/msan/msan_allocator.cc @@ -119,9 +119,8 @@ static SpinMutex fallback_mutex; void MsanAllocatorInit() { - allocator.Init( - common_flags()->allocator_may_return_null, - common_flags()->allocator_release_to_os_interval_ms); + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); } AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { @@ -139,7 +138,7 @@ if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); - return allocator.ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); } MsanThread *t = GetCurrentThread(); void *allocated; @@ -197,7 +196,7 @@ void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return allocator.ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); } Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h @@ -24,12 +24,28 @@ namespace __sanitizer { -// Returns true if ReportAllocatorCannotReturnNull(true) was called. -// Can be use to avoid memory hungry operations. -bool IsReportingOOM(); +// Since flags are immutable and allocator behavior can be changed at runtime +// (unit tests or ASan on Android are some examples), allocator_may_return_null +// flag value is cached here and can be altered later. +bool AllocatorMayReturnNull(); +void SetAllocatorMayReturnNull(bool may_return_null); + +// Allocator failure handling policies: +// Implements AllocatorMayReturnNull policy, returns null when the flag is set, +// dies otherwise. +struct ReturnNullOrDieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; +// Always dies on the failure. +struct DieOnFailure { + static void *OnBadRequest(); + static void *OnOOM(); +}; -// Prints error message and kills the program. -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory); +// Returns true if allocator detected OOM condition. Can be used to avoid memory +// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called. +bool IsAllocatorOutOfMemory(); // Allocators call these callbacks on mmap/munmap. struct NoOpMapUnmapCallback { Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.cc @@ -94,8 +94,7 @@ SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { - internal_allocator_instance->Init( - /* may_return_null */ false, kReleaseToOSIntervalNever); + internal_allocator_instance->Init(kReleaseToOSIntervalNever); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } @@ -162,7 +161,7 @@ void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { if (CallocShouldReturnNullDueToOverflow(count, size)) - return internal_allocator()->ReturnNullOrDieOnBadRequest(); + return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); return p; @@ -209,12 +208,15 @@ return (max / size) < n; } -static atomic_uint8_t reporting_out_of_memory = {0}; +static atomic_uint8_t allocator_out_of_memory = {0}; +static atomic_uint8_t allocator_may_return_null = {0}; -bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); } +bool IsAllocatorOutOfMemory() { + return atomic_load_relaxed(&allocator_out_of_memory); +} -void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) { - if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1); +// Prints error message and kills the program. +void NORETURN ReportAllocatorCannotReturnNull() { Report("%s's allocator is terminating the process instead of returning 0\n", SanitizerToolName); Report("If you don't like this behavior set allocator_may_return_null=1\n"); @@ -222,4 +224,35 @@ Die(); } +bool AllocatorMayReturnNull() { + return atomic_load(&allocator_may_return_null, memory_order_relaxed); +} + +void SetAllocatorMayReturnNull(bool may_return_null) { + atomic_store(&allocator_may_return_null, may_return_null, + memory_order_relaxed); +} + +void *ReturnNullOrDieOnFailure::OnBadRequest() { + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *ReturnNullOrDieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + if (AllocatorMayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnBadRequest() { + ReportAllocatorCannotReturnNull(); +} + +void *DieOnFailure::OnOOM() { + atomic_store_relaxed(&allocator_out_of_memory, 1); + ReportAllocatorCannotReturnNull(); +} + } // namespace __sanitizer Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -24,22 +24,18 @@ class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) { - primary_.Init(release_to_os_interval_ms); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); - } + typedef typename SecondaryAllocator::FailureHandler FailureHandler; - void InitLinkerInitialized( - bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.InitLinkerInitialized(may_return_null); + void InitLinkerInitialized(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.InitLinkerInitialized(); stats_.InitLinkerInitialized(); - InitCommon(may_return_null, release_to_os_interval_ms); } - void Init(bool may_return_null, s32 release_to_os_interval_ms) { - secondary_.Init(may_return_null); + void Init(s32 release_to_os_interval_ms) { + primary_.Init(release_to_os_interval_ms); + secondary_.Init(); stats_.Init(); - InitCommon(may_return_null, release_to_os_interval_ms); } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { @@ -47,7 +43,7 @@ if (size == 0) size = 1; if (size + alignment < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr original_size = size; // If alignment requirements are to be fulfilled by the frontend allocator // rather than by the primary or secondary, passing an alignment lower than @@ -55,44 +51,24 @@ // alignment check. if (alignment > 8) size = RoundUpTo(size, alignment); - void *res; - bool from_primary = primary_.CanAllocate(size, alignment); // The primary allocator should return a 2^x aligned allocation when // requested 2^x bytes, hence using the rounded up 'size' when being // serviced by the primary (this is no longer true when the primary is // using a non-fixed base address). The secondary takes care of the // alignment without such requirement, and allocating 'size' would use // extraneous memory, so we employ 'original_size'. - if (from_primary) + void *res; + if (primary_.CanAllocate(size, alignment)) res = cache->Allocate(&primary_, primary_.ClassID(size)); else res = secondary_.Allocate(&stats_, original_size, alignment); + if (!res) + return FailureHandler::OnOOM(); if (alignment > 8) CHECK_EQ(reinterpret_cast(res) & (alignment - 1), 0); return res; } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) - return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - secondary_.SetMayReturnNull(may_return_null); - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - s32 ReleaseToOSIntervalMs() const { return primary_.ReleaseToOSIntervalMs(); } @@ -213,6 +189,5 @@ PrimaryAllocator primary_; SecondaryAllocator secondary_; AllocatorGlobalStats stats_; - atomic_uint8_t may_return_null_; }; Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h @@ -47,7 +47,8 @@ InternalAllocatorCache; typedef CombinedAllocator > InternalAllocator; + LargeMmapAllocator + > InternalAllocator; void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr, uptr alignment = 0); Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -17,17 +17,19 @@ // This class can (de)allocate only large chunks of memory using mmap/unmap. // The main purpose of this allocator is to cover large and rare allocation // sizes not covered by more efficient allocators (e.g. SizeClassAllocator64). -template +template class LargeMmapAllocator { public: - void InitLinkerInitialized(bool may_return_null) { + typedef FailureHandlerT FailureHandler; + + void InitLinkerInitialized() { page_size_ = GetPageSizeCached(); - atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); } - void Init(bool may_return_null) { + void Init() { internal_memset(this, 0, sizeof(*this)); - InitLinkerInitialized(may_return_null); + InitLinkerInitialized(); } void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { @@ -37,11 +39,11 @@ map_size += alignment; // Overflow. if (map_size < size) - return ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); uptr map_beg = reinterpret_cast( MmapOrDieOnFatalError(map_size, "LargeMmapAllocator")); if (!map_beg) - return ReturnNullOrDieOnOOM(); + return FailureHandler::OnOOM(); CHECK(IsAligned(map_beg, page_size_)); MapUnmapCallback().OnMap(map_beg, map_size); uptr map_end = map_beg + map_size; @@ -75,24 +77,6 @@ return reinterpret_cast(res); } - bool MayReturnNull() const { - return atomic_load(&may_return_null_, memory_order_acquire); - } - - void *ReturnNullOrDieOnBadRequest() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (MayReturnNull()) return nullptr; - ReportAllocatorCannotReturnNull(true); - } - - void SetMayReturnNull(bool may_return_null) { - atomic_store(&may_return_null_, may_return_null, memory_order_release); - } - void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { @@ -278,7 +262,6 @@ struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; - atomic_uint8_t may_return_null_; SpinMutex mutex_; }; Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc @@ -495,7 +495,7 @@ VReport(2, "Symbolizer is disabled.\n"); return; } - if (IsReportingOOM()) { + if (IsAllocatorOutOfMemory()) { VReport(2, "Cannot use internal symbolizer: out of memory\n"); } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) { VReport(2, "Using internal symbolizer.\n"); Index: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc +++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc @@ -426,8 +426,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) { TestMapUnmapCallback::map_count = 0; TestMapUnmapCallback::unmap_count = 0; - LargeMmapAllocator a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator a; + a.Init(); AllocatorStats stats; stats.Init(); void *x = a.Allocate(&stats, 1 << 20, 1); @@ -463,8 +463,8 @@ #endif TEST(SanitizerCommon, LargeMmapAllocator) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator a; + a.Init(); AllocatorStats stats; stats.Init(); @@ -546,8 +546,9 @@ typedef CombinedAllocator Allocator; + SetAllocatorMayReturnNull(true); Allocator *a = new Allocator; - a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever); + a->Init(kReleaseToOSIntervalNever); std::mt19937 r; AllocatorCache cache; @@ -561,7 +562,7 @@ EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0); // Set to false - a->SetMayReturnNull(false); + SetAllocatorMayReturnNull(false); EXPECT_DEATH(a->Allocate(&cache, -1, 1), "allocator is terminating the process"); @@ -873,8 +874,8 @@ } TEST(SanitizerCommon, LargeMmapAllocatorIteration) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator a; + a.Init(); AllocatorStats stats; stats.Init(); @@ -900,8 +901,8 @@ } TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) { - LargeMmapAllocator<> a; - a.Init(/* may_return_null */ false); + LargeMmapAllocator a; + a.Init(); AllocatorStats stats; stats.Init(); Index: compiler-rt/trunk/lib/scudo/scudo_allocator.cpp =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator.cpp +++ compiler-rt/trunk/lib/scudo/scudo_allocator.cpp @@ -273,6 +273,8 @@ static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); + typedef ReturnNullOrDieOnFailure FailureHandler; + ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; @@ -326,7 +328,8 @@ DeallocationTypeMismatch = Options.DeallocationTypeMismatch; DeleteSizeMismatch = Options.DeleteSizeMismatch; ZeroContents = Options.ZeroContents; - BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs); + SetAllocatorMayReturnNull(Options.MayReturnNull); + BackendAllocator.Init(Options.ReleaseToOSIntervalMs); AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); @@ -354,11 +357,11 @@ dieWithMessage("ERROR: alignment is not a power of 2\n"); } if (Alignment > MaxAlignment) - return BackendAllocator.ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); if (Alignment < MinAlignment) Alignment = MinAlignment; if (Size >= MaxAllowedMallocSize) - return BackendAllocator.ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); if (Size == 0) Size = 1; @@ -366,7 +369,7 @@ uptr AlignedSize = (Alignment > MinAlignment) ? NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; if (AlignedSize >= MaxAllowedMallocSize) - return BackendAllocator.ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); // Primary and Secondary backed allocations have a different treatment. We // deal with alignment requirements of Primary serviced allocations here, @@ -391,7 +394,7 @@ AllocationAlignment, FromPrimary); } if (!Ptr) - return BackendAllocator.ReturnNullOrDieOnOOM(); + return FailureHandler::OnOOM(); // If requested, we will zero out the entire contents of the returned chunk. if ((ForceZeroContents || ZeroContents) && FromPrimary) @@ -583,7 +586,7 @@ initThreadMaybe(); uptr Total = NMemB * Size; if (Size != 0 && Total / Size != NMemB) // Overflow check - return BackendAllocator.ReturnNullOrDieOnBadRequest(); + return FailureHandler::OnBadRequest(); return allocate(Total, MinAlignment, FromMalloc, true); } Index: compiler-rt/trunk/lib/scudo/scudo_allocator_combined.h =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator_combined.h +++ compiler-rt/trunk/lib/scudo/scudo_allocator_combined.h @@ -23,11 +23,10 @@ class SecondaryAllocator> class ScudoCombinedAllocator { public: - void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) { + void Init(s32 ReleaseToOSIntervalMs) { Primary.Init(ReleaseToOSIntervalMs); - Secondary.Init(AllocatorMayReturnNull); + Secondary.Init(); Stats.Init(); - atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull); } void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment, @@ -37,18 +36,6 @@ return Secondary.Allocate(&Stats, Size, Alignment); } - void *ReturnNullOrDieOnBadRequest() { - if (atomic_load_relaxed(&MayReturnNull)) - return nullptr; - ReportAllocatorCannotReturnNull(false); - } - - void *ReturnNullOrDieOnOOM() { - if (atomic_load_relaxed(&MayReturnNull)) - return nullptr; - ReportAllocatorCannotReturnNull(true); - } - void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) { if (FromPrimary) Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr); @@ -78,7 +65,6 @@ PrimaryAllocator Primary; SecondaryAllocator Secondary; AllocatorGlobalStats Stats; - atomic_uint8_t MayReturnNull; }; #endif // SCUDO_ALLOCATOR_COMBINED_H_ Index: compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h +++ compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h @@ -24,9 +24,8 @@ class ScudoLargeMmapAllocator { public: - void Init(bool AllocatorMayReturnNull) { + void Init() { PageSize = GetPageSizeCached(); - atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull); } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { @@ -42,7 +41,7 @@ uptr MapBeg = reinterpret_cast(MmapNoAccess(MapSize)); if (MapBeg == ~static_cast(0)) - return ReturnNullOrDieOnOOM(); + return ReturnNullOrDieOnFailure::OnOOM(); // A page-aligned pointer is assumed after that, so check it now. CHECK(IsAligned(MapBeg, PageSize)); uptr MapEnd = MapBeg + MapSize; @@ -96,12 +95,6 @@ return reinterpret_cast(Ptr); } - void *ReturnNullOrDieOnOOM() { - if (atomic_load_relaxed(&MayReturnNull)) - return nullptr; - ReportAllocatorCannotReturnNull(true); - } - void Deallocate(AllocatorStats *Stats, void *Ptr) { SecondaryHeader *Header = getHeader(Ptr); { @@ -140,7 +133,6 @@ const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; uptr PageSize; SpinMutex StatsMutex; - atomic_uint8_t MayReturnNull; }; #endif // SCUDO_ALLOCATOR_SECONDARY_H_ Index: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc @@ -112,9 +112,8 @@ } void InitializeAllocator() { - allocator()->Init( - common_flags()->allocator_may_return_null, - common_flags()->allocator_release_to_os_interval_ms); + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator()->Init(common_flags()->allocator_release_to_os_interval_ms); } void InitializeAllocatorLate() { @@ -151,7 +150,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) - return allocator()->ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); if (p == 0) return 0; @@ -164,7 +163,7 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { if (CallocShouldReturnNullDueToOverflow(size, n)) - return allocator()->ReturnNullOrDieOnBadRequest(); + return Allocator::FailureHandler::OnBadRequest(); void *p = user_alloc(thr, pc, n * size); if (p) internal_memset(p, 0, n * size);