Index: compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_fd.cc @@ -48,8 +48,8 @@ } static FdSync *allocsync(ThreadState *thr, uptr pc) { - FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment, - false); + FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync), + kDefaultAlignment, false); atomic_store(&s->rc, 1, memory_order_relaxed); return s; } @@ -79,7 +79,7 @@ if (l1 == 0) { uptr size = kTableSizeL2 * sizeof(FdDesc); // We need this to reside in user memory to properly catch races on it. - void *p = user_alloc(thr, pc, size, kDefaultAlignment, false); + void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false); internal_memset(p, 0, size); MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size); if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel)) Index: compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_interceptors.cc @@ -584,7 +584,7 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); - return user_alloc(thr, pc, sz, align); + return user_memalign(thr, pc, align, sz); } TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { @@ -730,7 +730,7 @@ #if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(memalign, align, sz); - return user_alloc(thr, pc, sz, align); + return user_memalign(thr, pc, align, sz); } #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) #else @@ -739,21 +739,20 @@ #if !SANITIZER_MAC TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { - SCOPED_INTERCEPTOR_RAW(memalign, align, sz); - return user_alloc(thr, pc, sz, align); + SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz); + return user_aligned_alloc(thr, pc, align, sz); } TSAN_INTERCEPTOR(void*, valloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(valloc, sz); - return user_alloc(thr, pc, sz, GetPageSizeCached()); + return user_valloc(thr, pc, sz); } #endif #if SANITIZER_LINUX TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { SCOPED_INTERCEPTOR_RAW(pvalloc, sz); - sz = RoundUp(sz, GetPageSizeCached()); - return user_alloc(thr, pc, sz, GetPageSizeCached()); + return user_pvalloc(thr, pc, sz); } #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) #else @@ -763,8 +762,7 @@ #if !SANITIZER_MAC TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); - *memptr = user_alloc(thr, pc, sz, align); - return 0; + return user_posix_memalign(thr, pc, memptr, align, sz); } #endif Index: compiler-rt/trunk/lib/tsan/rtl/tsan_libdispatch_mac.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_libdispatch_mac.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_libdispatch_mac.cc @@ -86,7 +86,8 @@ void *orig_context, dispatch_function_t orig_work) { tsan_block_context_t *new_context = - (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t)); + (tsan_block_context_t *)user_alloc_internal(thr, pc, + sizeof(tsan_block_context_t)); new_context->queue = queue; new_context->orig_context = orig_context; new_context->orig_work = orig_work; Index: compiler-rt/trunk/lib/tsan/rtl/tsan_malloc_mac.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_malloc_mac.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_malloc_mac.cc @@ -26,7 +26,7 @@ #define COMMON_MALLOC_FORCE_UNLOCK() #define COMMON_MALLOC_MEMALIGN(alignment, size) \ void *p = \ - user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment) + user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size) #define COMMON_MALLOC_MALLOC(size) \ if (cur_thread()->in_symbolizer) return InternalAlloc(size); \ SCOPED_INTERCEPTOR_RAW(malloc, size); \ @@ -43,7 +43,7 @@ if (cur_thread()->in_symbolizer) \ return InternalAlloc(size, nullptr, GetPageSizeCached()); \ SCOPED_INTERCEPTOR_RAW(valloc, size); \ - void *p = user_alloc(thr, pc, size, GetPageSizeCached()) + void *p = user_valloc(thr, pc, size) #define COMMON_MALLOC_FREE(ptr) \ if (cur_thread()->in_symbolizer) return InternalFree(ptr); \ SCOPED_INTERCEPTOR_RAW(free, ptr); \ Index: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h +++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h @@ -27,13 +27,20 @@ void AllocatorPrintStats(); // For user allocations. -void *user_alloc(ThreadState *thr, uptr pc, uptr sz, - uptr align = kDefaultAlignment, bool signal = true); -void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n); +void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, + uptr align = kDefaultAlignment, bool signal = true); // Does not accept NULL. void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true); +// Interceptor implementations. +void *user_alloc(ThreadState *thr, uptr pc, uptr sz); +void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); -void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align); +void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz); +int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, + uptr sz); +void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz); +void *user_valloc(ThreadState *thr, uptr pc, uptr sz); +void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz); uptr user_alloc_usable_size(const void *p); // Invoking malloc/free hooks that may be installed by the user. Index: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc =================================================================== --- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc +++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc @@ -149,11 +149,12 @@ OutputReport(thr, rep); } -void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { +void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align, + bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) return Allocator::FailureHandler::OnBadRequest(); void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align); - if (p == 0) + if (UNLIKELY(p == 0)) return 0; if (ctx && ctx->initialized) OnUserAlloc(thr, pc, (uptr)p, sz, true); @@ -162,15 +163,6 @@ return p; } -void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { - if (CheckForCallocOverflow(size, n)) - return Allocator::FailureHandler::OnBadRequest(); - void *p = user_alloc(thr, pc, n * size); - if (p) - internal_memset(p, 0, n * size); - return p; -} - void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { ScopedGlobalProcessor sgp; if (ctx && ctx->initialized) @@ -180,6 +172,19 @@ SignalUnsafeCall(thr, pc); } +void *user_alloc(ThreadState *thr, uptr pc, uptr sz) { + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment)); +} + +void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { + if (UNLIKELY(CheckForCallocOverflow(size, n))) + return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + void *p = user_alloc_internal(thr, pc, n * size); + if (p) + internal_memset(p, 0, n * size); + return SetErrnoOnNull(p); +} + void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) { DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p); ctx->metamap.AllocBlock(thr, pc, p, sz); @@ -200,15 +205,60 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) { // FIXME: Handle "shrinking" more efficiently, // it seems that some software actually does this. - void *p2 = user_alloc(thr, pc, sz); - if (p2 == 0) - return 0; - if (p) { - uptr oldsz = user_alloc_usable_size(p); - internal_memcpy(p2, p, min(oldsz, sz)); + if (!p) + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz)); + if (!sz) { user_free(thr, pc, p); + return nullptr; } - return p2; + void *new_p = user_alloc_internal(thr, pc, sz); + if (new_p) { + uptr old_sz = user_alloc_usable_size(p); + internal_memcpy(new_p, p, min(old_sz, sz)); + user_free(thr, pc, p); + } + return SetErrnoOnNull(new_p); +} + +void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) { + if (UNLIKELY(!IsPowerOfTwo(align))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); +} + +int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align, + uptr sz) { + if (UNLIKELY(!CheckPosixMemalignAlignment(align))) { + Allocator::FailureHandler::OnBadRequest(); + return errno_EINVAL; + } + void *ptr = user_alloc_internal(thr, pc, sz, align); + if (UNLIKELY(!ptr)) + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, align)); + *memptr = ptr; + return 0; +} + +void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align)); +} + +void *user_valloc(ThreadState *thr, uptr pc, uptr sz) { + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached())); +} + +void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) { + uptr PageSize = GetPageSizeCached(); + // pvalloc(0) should allocate one page. + sz = sz ? RoundUpTo(sz, PageSize) : PageSize; + return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize)); } uptr user_alloc_usable_size(const void *p) { Index: compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc =================================================================== --- compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc +++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc @@ -56,6 +56,7 @@ // Realloc(NULL, N) is equivalent to malloc(N), thus must return // non-NULL pointer. EXPECT_NE(p, (void*)0); + user_free(thr, pc, p); } { void *p = user_realloc(thr, pc, 0, 100); @@ -67,8 +68,9 @@ void *p = user_alloc(thr, pc, 100); EXPECT_NE(p, (void*)0); memset(p, 0xde, 100); + // Realloc(P, 0) is equivalent to free(P) and returns NULL. void *p2 = user_realloc(thr, pc, p, 0); - EXPECT_NE(p2, (void*)0); + EXPECT_EQ(p2, (void*)0); } { void *p = user_realloc(thr, pc, 0, 100); @@ -135,12 +137,28 @@ EXPECT_EQ(unmapped0, __sanitizer_get_unmapped_bytes()); } +TEST(Mman, Valloc) { + ThreadState *thr = cur_thread(); + + void *p = user_valloc(thr, 0, 100); + EXPECT_NE(p, (void*)0); + user_free(thr, 0, p); + + p = user_pvalloc(thr, 0, 100); + EXPECT_NE(p, (void*)0); + user_free(thr, 0, p); + + p = user_pvalloc(thr, 0, 0); + EXPECT_NE(p, (void*)0); + EXPECT_EQ(GetPageSizeCached(), __sanitizer_get_allocated_size(p)); + user_free(thr, 0, p); +} + +#if !SANITIZER_DEBUG +// EXPECT_DEATH clones a thread with 4K stack, +// which is overflown by tsan memory accesses functions in debug mode. + TEST(Mman, CallocOverflow) { -#if SANITIZER_DEBUG - // EXPECT_DEATH clones a thread with 4K stack, - // which is overflown by tsan memory accesses functions in debug mode. - return; -#endif ThreadState *thr = cur_thread(); uptr pc = 0; size_t kArraySize = 4096; @@ -152,4 +170,57 @@ EXPECT_EQ(0L, p); } +TEST(Mman, Memalign) { + ThreadState *thr = cur_thread(); + + void *p = user_memalign(thr, 0, 8, 100); + EXPECT_NE(p, (void*)0); + user_free(thr, 0, p); + + p = NULL; + EXPECT_DEATH(p = user_memalign(thr, 0, 7, 100), + "allocator is terminating the process instead of returning 0"); + EXPECT_EQ(0L, p); +} + +TEST(Mman, PosixMemalign) { + ThreadState *thr = cur_thread(); + + void *p = NULL; + int res = user_posix_memalign(thr, 0, &p, 8, 100); + EXPECT_NE(p, (void*)0); + EXPECT_EQ(res, 0); + user_free(thr, 0, p); + + p = NULL; + // Alignment is not a power of two, although is a multiple of sizeof(void*). + EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 3 * sizeof(p), 100), + "allocator is terminating the process instead of returning 0"); + EXPECT_EQ(0L, p); + // Alignment is not a multiple of sizeof(void*), although is a power of 2. + EXPECT_DEATH(res = user_posix_memalign(thr, 0, &p, 2, 100), + "allocator is terminating the process instead of returning 0"); + EXPECT_EQ(0L, p); +} + +TEST(Mman, AlignedAlloc) { + ThreadState *thr = cur_thread(); + + void *p = user_aligned_alloc(thr, 0, 8, 64); + EXPECT_NE(p, (void*)0); + user_free(thr, 0, p); + + p = NULL; + // Alignement is not a power of 2. + EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 7, 100), + "allocator is terminating the process instead of returning 0"); + EXPECT_EQ(0L, p); + // Size is not a multiple of alignment. + EXPECT_DEATH(p = user_aligned_alloc(thr, 0, 8, 100), + "allocator is terminating the process instead of returning 0"); + EXPECT_EQ(0L, p); +} + +#endif + } // namespace __tsan Index: compiler-rt/trunk/test/tsan/allocator_returns_null.cc =================================================================== --- compiler-rt/trunk/test/tsan/allocator_returns_null.cc +++ compiler-rt/trunk/test/tsan/allocator_returns_null.cc @@ -37,9 +37,10 @@ // RUN: | FileCheck %s --check-prefix=CHECK-nnNULL #include -#include +#include #include #include +#include #include #include @@ -51,6 +52,7 @@ const char *action = argv[1]; fprintf(stderr, "%s:\n", action); + // The limit enforced in tsan_mman.cc, user_alloc_internal function. static const size_t kMaxAllowedMallocSizePlusOne = (1ULL << 40) + 1; void *x = 0; @@ -78,10 +80,13 @@ assert(0); } + fprintf(stderr, "errno: %d\n", errno); + // The NULL pointer is printed differently on different systems, while (long)0 // is always the same. fprintf(stderr, "x: %lx\n", (long)x); free(x); + return x != 0; } @@ -101,14 +106,19 @@ // CHECK-nnCRASH: ThreadSanitizer's allocator is terminating the process // CHECK-mNULL: malloc: +// CHECK-mNULL: errno: 12 // CHECK-mNULL: x: 0 // CHECK-cNULL: calloc: +// CHECK-cNULL: errno: 12 // CHECK-cNULL: x: 0 // CHECK-coNULL: calloc-overflow: +// CHECK-coNULL: errno: 12 // CHECK-coNULL: x: 0 // CHECK-rNULL: realloc: +// CHECK-rNULL: errno: 12 // CHECK-rNULL: x: 0 // CHECK-mrNULL: realloc-after-malloc: +// CHECK-mrNULL: errno: 12 // CHECK-mrNULL: x: 0 // CHECK-nnNULL: new-nothrow: // CHECK-nnNULL: x: 0