diff --git a/compiler-rt/lib/msan/msan.h b/compiler-rt/lib/msan/msan.h --- a/compiler-rt/lib/msan/msan.h +++ b/compiler-rt/lib/msan/msan.h @@ -308,6 +308,7 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack); int msan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack); +void msan_dealloc(void *ptr, StackTrace *stack); void InstallTrapHandler(); void InstallAtExitHandler(); diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp --- a/compiler-rt/lib/msan/msan_allocator.cpp +++ b/compiler-rt/lib/msan/msan_allocator.cpp @@ -11,16 +11,15 @@ // MemorySanitizer allocator. //===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_checks.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_allocator_report.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "msan.h" #include "msan_allocator.h" + +#include "msan.h" #include "msan_origin.h" -#include "msan_thread.h" #include "msan_poisoning.h" +#include "msan_thread.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_api.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" namespace __msan { @@ -122,225 +121,104 @@ }; typedef SizeClassAllocator32 PrimaryAllocator; #endif -typedef CombinedAllocator Allocator; -typedef Allocator::AllocatorCache AllocatorCache; - -static Allocator allocator; -static AllocatorCache fallback_allocator_cache; -static StaticSpinMutex fallback_mutex; - -static uptr max_malloc_size; - -void MsanAllocatorInit() { - SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - allocator.Init(common_flags()->allocator_release_to_os_interval_ms); - if (common_flags()->max_allocation_size_mb) - max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, - kMaxAllowedMallocSize); - else - max_malloc_size = kMaxAllowedMallocSize; -} - -AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { - CHECK(ms); - CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); - return reinterpret_cast(ms->allocator_cache); -} - -void MsanThreadLocalMallocStorage::CommitBack() { - allocator.SwallowCache(GetAllocatorCache(this)); -} -static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, - bool zeroise) { - if (size > max_malloc_size) { - if (AllocatorMayReturnNull()) { - Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size); - return nullptr; - } - ReportAllocationSizeTooBig(size, max_malloc_size, stack); - } - MsanThread *t = GetCurrentThread(); - void *allocated; - if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, size, alignment); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, size, alignment); - } - if (UNLIKELY(!allocated)) { - SetAllocatorOutOfMemory(); - if (AllocatorMayReturnNull()) - return nullptr; - ReportOutOfMemory(size, stack); - } - Metadata *meta = - reinterpret_cast(allocator.GetMetaData(allocated)); - meta->requested_size = size; - if (zeroise) { - __msan_clear_and_unpoison(allocated, size); - } else if (flags()->poison_in_malloc) { - __msan_poison(allocated, size); - if (__msan_get_track_origins()) { - stack->tag = StackTrace::TAG_ALLOC; - Origin o = Origin::CreateHeapOrigin(stack); - __msan_set_origin(allocated, size, o.raw_id()); +class MSanAllocatorAPI + : public AllocatorAPI { + public: + MSanAllocatorAPI(const char *sanitizer_name_, uptr max_allowed_malloc_size_) + : AllocatorAPI(sanitizer_name_, max_allowed_malloc_size_) {} + + private: + MsanThread *GetCurrentThread() override { return __msan::GetCurrentThread(); } + void AllocateHook(StackTrace *stack, void *allocated, uptr size, + bool zeroise) override { + if (zeroise) { + __msan_clear_and_unpoison(allocated, size); + } else if (flags()->poison_in_malloc) { + __msan_poison(allocated, size); + if (__msan_get_track_origins()) { + stack->tag = StackTrace::TAG_ALLOC; + Origin o = Origin::CreateHeapOrigin(stack); + __msan_set_origin(allocated, size, o.raw_id()); + } } - } - MSAN_MALLOC_HOOK(allocated, size); - return allocated; -} - -void MsanDeallocate(StackTrace *stack, void *p) { - CHECK(p); - MSAN_FREE_HOOK(p); - Metadata *meta = reinterpret_cast(allocator.GetMetaData(p)); - uptr size = meta->requested_size; - meta->requested_size = 0; - // This memory will not be reused by anyone else, so we are free to keep it - // poisoned. - if (flags()->poison_in_free) { - __msan_poison(p, size); - if (__msan_get_track_origins()) { - stack->tag = StackTrace::TAG_DEALLOC; - Origin o = Origin::CreateHeapOrigin(stack); - __msan_set_origin(p, size, o.raw_id()); + MSAN_MALLOC_HOOK(allocated, size); + } + void DeallocateHook(StackTrace *stack, void *p, uptr size) override { + MSAN_FREE_HOOK(p); + // This memory will not be reused by anyone else, so we are free to keep it + // poisoned. + if (flags()->poison_in_free) { + __msan_poison(p, size); + if (__msan_get_track_origins()) { + stack->tag = StackTrace::TAG_DEALLOC; + Origin o = Origin::CreateHeapOrigin(stack); + __msan_set_origin(p, size, o.raw_id()); + } } } - MsanThread *t = GetCurrentThread(); - if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocator.Deallocate(cache, p); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocator.Deallocate(cache, p); - } -} - -static void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, - uptr alignment) { - Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); - uptr old_size = meta->requested_size; - uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); - if (new_size <= actually_allocated_size) { - // We are not reallocating here. - meta->requested_size = new_size; - if (new_size > old_size) { - if (flags()->poison_in_malloc) { - stack->tag = StackTrace::TAG_ALLOC; - PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); - } + void RellocateReuseHook(StackTrace *stack, void *old_p, uptr old_size, + uptr new_size) override { + if (flags()->poison_in_malloc) { + stack->tag = StackTrace::TAG_ALLOC; + PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); } - return old_p; } - uptr memcpy_size = Min(new_size, old_size); - void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/); - if (new_p) { + void RellocateNewHook(StackTrace *stack, void *old_p, void *new_p, + uptr memcpy_size) override { CopyMemory(new_p, old_p, memcpy_size, stack); - MsanDeallocate(stack, old_p); } - return new_p; -} +}; -static void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - if (AllocatorMayReturnNull()) - return nullptr; - ReportCallocOverflow(nmemb, size, stack); - } - return MsanAllocate(stack, nmemb * size, sizeof(u64), true); -} +static MSanAllocatorAPI allocator_api("MemorySanitizer", kMaxAllowedMallocSize); + +void MsanAllocatorInit() { allocator_api.Init(); } -static uptr AllocationSize(const void *p) { - if (!p) return 0; - const void *beg = allocator.GetBlockBegin(p); - if (beg != p) return 0; - Metadata *b = (Metadata *)allocator.GetMetaData(p); - return b->requested_size; +void MsanThreadLocalMallocStorage::CommitBack() { + allocator_api.CommitBack(this); } void *msan_malloc(uptr size, StackTrace *stack) { - return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); + return allocator_api.Malloc(size, stack); } void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - return SetErrnoOnNull(MsanCalloc(stack, nmemb, size)); + return allocator_api.Calloc(nmemb, size, stack); } void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { - if (!ptr) - return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); - if (size == 0) { - MsanDeallocate(stack, ptr); - return nullptr; - } - return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64))); + return allocator_api.Realloc(ptr, size, stack); } void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportReallocArrayOverflow(nmemb, size, stack); - } - return msan_realloc(ptr, nmemb * size, stack); + return allocator_api.Reallocarray(ptr, nmemb, size, stack); } void *msan_valloc(uptr size, StackTrace *stack) { - return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false)); + return allocator_api.Valloc(size, stack); } void *msan_pvalloc(uptr size, StackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportPvallocOverflow(size, stack); - } - // pvalloc(0) should allocate one page. - size = size ? RoundUpTo(size, PageSize) : PageSize; - return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false)); + return allocator_api.Pvalloc(size, stack); } void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { - if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAlignedAllocAlignment(size, alignment, stack); - } - return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); + return allocator_api.AlignedAlloc(alignment, size, stack); } void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { - if (UNLIKELY(!IsPowerOfTwo(alignment))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAllocationAlignment(alignment, stack); - } - return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); + return allocator_api.Memalign(alignment, size, stack); } int msan_posix_memalign(void **memptr, uptr alignment, uptr size, StackTrace *stack) { - if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - if (AllocatorMayReturnNull()) - return errno_EINVAL; - ReportInvalidPosixMemalignAlignment(alignment, stack); - } - void *ptr = MsanAllocate(stack, size, alignment, false); - if (UNLIKELY(!ptr)) - // OOM error is already taken care of by MsanAllocate. - return errno_ENOMEM; - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; + return allocator_api.PosixMemalign(memptr, alignment, size, stack); +} + +void msan_dealloc(void *p, StackTrace *stack) { + return allocator_api.Deallocate(p, stack); } } // namespace __msan @@ -348,16 +226,10 @@ using namespace __msan; uptr __sanitizer_get_current_allocated_bytes() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatAllocated]; + return allocator_api.GetCurrentAllocatedBytes(); } -uptr __sanitizer_get_heap_size() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatMapped]; -} +uptr __sanitizer_get_heap_size() { return allocator_api.GetHeapSize(); } uptr __sanitizer_get_free_bytes() { return 1; } @@ -365,6 +237,10 @@ uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } -int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } +int __sanitizer_get_ownership(const void *p) { + return allocator_api.AllocationSize(p) != 0; +} -uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } +uptr __sanitizer_get_allocated_size(const void *p) { + return allocator_api.AllocationSize(p); +} diff --git a/compiler-rt/lib/msan/msan_interceptors.cpp b/compiler-rt/lib/msan/msan_interceptors.cpp --- a/compiler-rt/lib/msan/msan_interceptors.cpp +++ b/compiler-rt/lib/msan/msan_interceptors.cpp @@ -221,14 +221,14 @@ INTERCEPTOR(void, free, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; - MsanDeallocate(&stack, ptr); + msan_dealloc(ptr, &stack); } #if !SANITIZER_FREEBSD && !SANITIZER_NETBSD INTERCEPTOR(void, cfree, void *ptr) { GET_MALLOC_STACK_TRACE; if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; - MsanDeallocate(&stack, ptr); + msan_dealloc(ptr, &stack); } #define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree) #else diff --git a/compiler-rt/lib/msan/msan_new_delete.cpp b/compiler-rt/lib/msan/msan_new_delete.cpp --- a/compiler-rt/lib/msan/msan_new_delete.cpp +++ b/compiler-rt/lib/msan/msan_new_delete.cpp @@ -67,8 +67,9 @@ { OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); } #define OPERATOR_DELETE_BODY \ - GET_MALLOC_STACK_TRACE; \ - if (ptr) MsanDeallocate(&stack, ptr) + GET_MALLOC_STACK_TRACE; \ + if (ptr) \ + msan_dealloc(ptr, &stack) INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt --- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt @@ -99,6 +99,7 @@ sancov_flags.inc sanitizer_addrhashmap.h sanitizer_allocator.h + sanitizer_allocator_api.h sanitizer_allocator_bytemap.h sanitizer_allocator_checks.h sanitizer_allocator_combined.h diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_api.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_api.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_api.h @@ -0,0 +1,349 @@ +//===-- sanitizer_allocator_api.h -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +/// Shared allocator API definitions for MemorySanitizer, DataFlowSanitizer. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ALLOCATOR_API_H +#define SANITIZER_ALLOCATOR_API_H + +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __sanitizer { + +template +class AllocatorAPI { + public: + AllocatorAPI(const char *sanitizer_name_, uptr max_allowed_malloc_size_) + : sanitizer_name(sanitizer_name_), + kMaxAllowedMallocSize(max_allowed_malloc_size_) {} + void Init(); + + void *Malloc(uptr size, StackTrace *stack = nullptr); + void Deallocate(void *p, StackTrace *stack = nullptr); + void *Calloc(uptr nmemb, uptr size, StackTrace *stack = nullptr); + void *Realloc(void *ptr, uptr size, StackTrace *stack = nullptr); + void *Reallocarray(void *ptr, uptr nmemb, uptr size, + StackTrace *stack = nullptr); + void *Valloc(uptr size, StackTrace *stack = nullptr); + void *Pvalloc(uptr size, StackTrace *stack = nullptr); + void *AlignedAlloc(uptr alignment, uptr size, StackTrace *stack = nullptr); + void *Memalign(uptr alignment, uptr size, StackTrace *stack = nullptr); + int PosixMemalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack = nullptr); + + void CommitBack(ThreadLocalMallocStorage *ms); + uptr AllocationSize(const void *p); + uptr GetCurrentAllocatedBytes(); + uptr GetHeapSize(); + + private: + typedef CombinedAllocator Allocator; + typedef typename Allocator::AllocatorCache AllocatorCache; + + AllocatorCache *GetAllocatorCache(ThreadLocalMallocStorage *ms); + + void *Allocate(StackTrace *stack, uptr size, uptr alignment, bool zeroise); + void *InternalCalloc(StackTrace *stack, uptr nmemb, uptr size); + void *InternalReallocate(StackTrace *stack, void *old_p, uptr new_size, + uptr alignment); + + virtual Thread *GetCurrentThread() = 0; + virtual void AllocateHook(StackTrace *stack, void *allocated, uptr size, + bool zeroise) = 0; + virtual void DeallocateHook(StackTrace *stack, void *p, uptr size) = 0; + virtual void RellocateReuseHook(StackTrace *stack, void *old_p, uptr old_size, + uptr new_size) = 0; + virtual void RellocateNewHook(StackTrace *stack, void *old_p, void *new_p, + uptr memcpy_size) = 0; + + Allocator allocator; + AllocatorCache fallback_allocator_cache; + StaticSpinMutex fallback_mutex; + uptr max_malloc_size; + + const char *sanitizer_name; + const uptr kMaxAllowedMallocSize; +}; + +template +void AllocatorAPI::Init() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); + if (common_flags()->max_allocation_size_mb) + max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, + kMaxAllowedMallocSize); + else + max_malloc_size = kMaxAllowedMallocSize; +} + +template +typename AllocatorAPI::AllocatorCache * +AllocatorAPI::GetAllocatorCache(ThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); + return reinterpret_cast(ms->allocator_cache); +} + +template +void AllocatorAPI::CommitBack(ThreadLocalMallocStorage *ms) { + allocator.SwallowCache(GetAllocatorCache(ms)); +} + +template +void *AllocatorAPI::Allocate(StackTrace *stack, uptr size, + uptr alignment, bool zeroise) { + if (size > max_malloc_size) { + if (AllocatorMayReturnNull()) { + Report("WARNING: %s failed to allocate 0x%zx bytes\n", sanitizer_name, + size); + return nullptr; + } + ReportAllocationSizeTooBig(size, max_malloc_size, stack); + } + Thread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, size, alignment); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, size, alignment); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(allocated)); + meta->requested_size = size; + AllocateHook(stack, allocated, size, zeroise); + return allocated; +} + +template +void AllocatorAPI::Deallocate(void *p, StackTrace *stack) { + CHECK(p); + Metadata *meta = reinterpret_cast(allocator.GetMetaData(p)); + uptr size = meta->requested_size; + DeallocateHook(stack, p, size); + meta->requested_size = 0; + Thread *t = GetCurrentThread(); + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocator.Deallocate(cache, p); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocator.Deallocate(cache, p); + } +} + +template +void *AllocatorAPI::InternalReallocate(StackTrace *stack, void *old_p, + uptr new_size, + uptr alignment) { + Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); + uptr old_size = meta->requested_size; + uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); + if (new_size <= actually_allocated_size) { + // We are not reallocating here. + meta->requested_size = new_size; + if (new_size > old_size) + RellocateReuseHook(stack, old_p, old_size, new_size); + return old_p; + } + uptr memcpy_size = Min(new_size, old_size); + void *new_p = Allocate(stack, new_size, alignment, false /*zeroise*/); + if (new_p) { + RellocateNewHook(stack, old_p, new_p, memcpy_size); + Deallocate(old_p, stack); + } + return new_p; +} + +template +void *AllocatorAPI::InternalCalloc(StackTrace *stack, uptr nmemb, + uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + return Allocate(stack, nmemb * size, sizeof(u64), true); +} + +template +uptr AllocatorAPI::AllocationSize(const void *p) { + if (!p) + return 0; + const void *beg = allocator.GetBlockBegin(p); + if (beg != p) + return 0; + Metadata *b = (Metadata *)allocator.GetMetaData(p); + return b->requested_size; +} + +template +void *AllocatorAPI::Malloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(Allocate(stack, size, sizeof(u64), false)); +} + +template +void *AllocatorAPI::Calloc(uptr nmemb, uptr size, StackTrace *stack) { + return SetErrnoOnNull(InternalCalloc(stack, nmemb, size)); +} + +template +void *AllocatorAPI::Realloc(void *ptr, uptr size, StackTrace *stack) { + if (!ptr) + return SetErrnoOnNull(Allocate(stack, size, sizeof(u64), false)); + if (size == 0) { + Deallocate(ptr, stack); + return nullptr; + } + return SetErrnoOnNull(InternalReallocate(stack, ptr, size, sizeof(u64))); +} + +template +void *AllocatorAPI::Reallocarray(void *ptr, uptr nmemb, uptr size, + StackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportReallocArrayOverflow(nmemb, size, stack); + } + return Realloc(ptr, nmemb * size, stack); +} + +template +void *AllocatorAPI::Valloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(Allocate(stack, size, GetPageSizeCached(), false)); +} + +template +void *AllocatorAPI::Pvalloc(uptr size, StackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(Allocate(stack, size, PageSize, false)); +} + +template +void *AllocatorAPI::AlignedAlloc(uptr alignment, uptr size, + StackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, false)); +} + +template +void *AllocatorAPI::Memalign(uptr alignment, uptr size, + StackTrace *stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); + } + return SetErrnoOnNull(Allocate(stack, size, alignment, false)); +} + +template +int AllocatorAPI::PosixMemalign(void **memptr, uptr alignment, + uptr size, StackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); + } + void *ptr = Allocate(stack, size, alignment, false); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by MsanAllocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +template +uptr AllocatorAPI::GetCurrentAllocatedBytes() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +template +uptr AllocatorAPI::GetHeapSize() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +} // namespace __sanitizer + +#endif // SANITIZER_ALLOCATOR_API_H