Index: lib/scudo/standalone/CMakeLists.txt =================================================================== --- lib/scudo/standalone/CMakeLists.txt +++ lib/scudo/standalone/CMakeLists.txt @@ -33,29 +33,6 @@ append_list_if(COMPILER_RT_HAS_Z_GLOBAL -Wl,-z,global SCUDO_LINK_FLAGS) endif() -set(SCUDO_SOURCES - checksum.cc - crc32_hw.cc - common.cc - flags.cc - flags_parser.cc - fuchsia.cc - linux.cc - report.cc - secondary.cc - string_utils.cc) - -# Enable the SSE 4.2 instruction set for crc32_hw.cc, if available. -if (COMPILER_RT_HAS_MSSE4_2_FLAG) - set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -msse4.2) -endif() - -# Enable the AArch64 CRC32 feature for crc32_hw.cc, if available. -# Note that it is enabled by default starting with armv8.1-a. -if (COMPILER_RT_HAS_MCRC_FLAG) - set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -mcrc) -endif() - set(SCUDO_HEADERS allocator_config.h atomic_helpers.h @@ -85,7 +62,38 @@ tsd.h tsd_exclusive.h tsd_shared.h - vector.h) + vector.h + wrappers_c_checks.h + wrappers_c.h) + +set(SCUDO_SOURCES + checksum.cc + crc32_hw.cc + common.cc + flags.cc + flags_parser.cc + fuchsia.cc + linux.cc + report.cc + secondary.cc + string_utils.cc) + +# Enable the SSE 4.2 instruction set for crc32_hw.cc, if available. +if (COMPILER_RT_HAS_MSSE4_2_FLAG) + set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -msse4.2) +endif() + +# Enable the AArch64 CRC32 feature for crc32_hw.cc, if available. +# Note that it is enabled by default starting with armv8.1-a. +if (COMPILER_RT_HAS_MCRC_FLAG) + set_source_files_properties(crc32_hw.cc PROPERTIES COMPILE_FLAGS -mcrc) +endif() + +set(SCUDO_SOURCES_C_WRAPPERS + wrappers_c.cc) + +set(SCUDO_SOURCES_CXX_WRAPPERS + wrappers_cpp.cc) if(COMPILER_RT_HAS_SCUDO_STANDALONE) add_compiler_rt_object_libraries(RTScudoStandalone @@ -93,11 +101,28 @@ SOURCES ${SCUDO_SOURCES} ADDITIONAL_HEADERS ${SCUDO_HEADERS} CFLAGS ${SCUDO_CFLAGS}) + add_compiler_rt_object_libraries(RTScudoStandaloneCWrappers + ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES_C_WRAPPERS} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + CFLAGS ${SCUDO_CFLAGS}) + add_compiler_rt_object_libraries(RTScudoStandaloneCxxWrappers + ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + CFLAGS ${SCUDO_CFLAGS}) add_compiler_rt_runtime(clang_rt.scudo_standalone STATIC ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} - SOURCES ${SCUDO_SOURCES} + SOURCES ${SCUDO_SOURCES} ${SCUDO_SOURCES_C_WRAPPERS} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + CFLAGS ${SCUDO_CFLAGS} + PARENT_TARGET scudo_standalone) + add_compiler_rt_runtime(clang_rt.scudo_standalone_cxx + STATIC + ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES_CXX_WRAPPERS} ADDITIONAL_HEADERS ${SCUDO_HEADERS} CFLAGS ${SCUDO_CFLAGS} PARENT_TARGET scudo_standalone) Index: lib/scudo/standalone/tests/CMakeLists.txt =================================================================== --- lib/scudo/standalone/tests/CMakeLists.txt +++ lib/scudo/standalone/tests/CMakeLists.txt @@ -10,7 +10,10 @@ -I${COMPILER_RT_SOURCE_DIR}/include -I${COMPILER_RT_SOURCE_DIR}/lib -I${COMPILER_RT_SOURCE_DIR}/lib/scudo/standalone - -DGTEST_HAS_RTTI=0) + -DGTEST_HAS_RTTI=0 + # Extra flags for the C++ tests + -fsized-deallocation + -Wno-mismatched-new-delete) set(SCUDO_TEST_ARCH ${SCUDO_STANDALONE_SUPPORTED_ARCH}) @@ -21,27 +24,30 @@ endforeach() list(APPEND LINK_FLAGS -pthread) -set(TEST_HEADERS) +set(SCUDO_TEST_HEADERS) foreach (header ${SCUDO_HEADERS}) - list(APPEND TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header}) + list(APPEND SCUDO_TEST_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/../${header}) endforeach() -# add_scudo_unittest( -# SOURCES -# HEADERS ) macro(add_scudo_unittest testname) - cmake_parse_arguments(TEST "" "" "SOURCES;HEADERS" ${ARGN}) + cmake_parse_arguments(TEST "" "" "SOURCES;ADDITIONAL_RTOBJECTS" ${ARGN}) if(COMPILER_RT_HAS_SCUDO_STANDALONE) foreach(arch ${SCUDO_TEST_ARCH}) + # Additional runtime objects get added along RTScudoStandalone + set(SCUDO_TEST_RTOBJECTS $) + foreach(rtobject ${TEST_ADDITIONAL_RTOBJECTS}) + list(APPEND SCUDO_TEST_RTOBJECTS $) + endforeach() + # Add the static runtime library made of all the runtime objects + set(RUNTIME RT${testname}.${arch}) + add_library(${RUNTIME} STATIC ${SCUDO_TEST_RTOBJECTS}) set(ScudoUnitTestsObjects) - add_library("RTScudoStandalone.test.${arch}" STATIC - $) generate_compiler_rt_tests(ScudoUnitTestsObjects ScudoUnitTests "${testname}-${arch}-Test" ${arch} SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE} - COMPILE_DEPS ${TEST_HEADERS} + COMPILE_DEPS ${SCUDO_TEST_HEADERS} DEPS gtest scudo_standalone - RUNTIME RTScudoStandalone.test.${arch} + RUNTIME ${RUNTIME} CFLAGS ${SCUDO_UNITTEST_CFLAGS} LINK_FLAGS ${LINK_FLAGS}) endforeach() @@ -72,3 +78,19 @@ add_scudo_unittest(ScudoUnitTest SOURCES ${SCUDO_UNIT_TEST_SOURCES}) + +set(SCUDO_C_UNIT_TEST_SOURCES + wrappers_c_test.cc + scudo_unit_test_main.cc) + +add_scudo_unittest(ScudoCUnitTest + SOURCES ${SCUDO_C_UNIT_TEST_SOURCES} + ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers) + +set(SCUDO_CXX_UNIT_TEST_SOURCES + wrappers_cpp_test.cc + scudo_unit_test_main.cc) + +add_scudo_unittest(ScudoCxxUnitTest + SOURCES ${SCUDO_CXX_UNIT_TEST_SOURCES} + ADDITIONAL_RTOBJECTS RTScudoStandaloneCWrappers RTScudoStandaloneCxxWrappers) Index: lib/scudo/standalone/tests/wrappers_c_test.cc =================================================================== --- /dev/null +++ lib/scudo/standalone/tests/wrappers_c_test.cc @@ -0,0 +1,226 @@ +//===-- wrappers_c_test.cc --------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +#include "gtest/gtest.h" + +#include +#include +#include + +// Note that every C allocation function in the test binary will be fulfilled +// by Scudo (this includes the gtest APIs, etc.), which is a test by itself. +// But this might also lead to unexpected side-effects, since the allocation and +// deallocation operations in the TEST functions will coexist with others (see +// the EXPECT_DEATH comment below). + +// We have to use a small quarantine to make sure that our double-free tests +// trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just +// freed (this depends on the size obviously) and the following free succeeds. +extern "C" __attribute__((visibility("default"))) const char * +__scudo_default_options() { + return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:" + "quarantine_max_chunk_size=512"; +} + +static const size_t Size = 100U; + +TEST(ScudoWrappersCTest, Malloc) { + void *P = malloc(Size); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + EXPECT_EQ(reinterpret_cast(P) % FIRST_32_SECOND_64(8U, 16U), 0U); + EXPECT_DEATH( + free(reinterpret_cast(reinterpret_cast(P) | 1U)), ""); + free(P); + EXPECT_DEATH(free(P), ""); + + P = malloc(0U); + EXPECT_NE(P, nullptr); + free(P); + + errno = 0; + EXPECT_EQ(malloc(SIZE_MAX), nullptr); + EXPECT_EQ(errno, ENOMEM); +} + +TEST(ScudoWrappersCTest, Calloc) { + void *P = calloc(1U, Size); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + for (size_t I = 0; I < Size; I++) + EXPECT_EQ((reinterpret_cast(P))[I], 0U); + free(P); + + P = calloc(1U, 0U); + EXPECT_NE(P, nullptr); + free(P); + P = calloc(0U, 1U); + EXPECT_NE(P, nullptr); + free(P); + + errno = 0; + EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr); + EXPECT_EQ(errno, ENOMEM); + errno = 0; + EXPECT_EQ(calloc(static_cast(LONG_MAX) + 1U, 2U), nullptr); + if (SCUDO_ANDROID) + EXPECT_EQ(errno, ENOMEM); + errno = 0; + EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr); + if (SCUDO_ANDROID) + EXPECT_EQ(errno, ENOMEM); +} + +TEST(ScudoWrappersCTest, Memalign) { + void *P; + for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) { + const size_t Alignment = 1U << I; + + P = memalign(Alignment, Size); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + EXPECT_EQ(reinterpret_cast(P) % Alignment, 0U); + free(P); + + P = nullptr; + EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + EXPECT_EQ(reinterpret_cast(P) % Alignment, 0U); + free(P); + } + + EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr); + EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL); + EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM); + + // Android's memalign accepts non power-of-2 alignments, and 0. + if (SCUDO_ANDROID) { + for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) { + P = memalign(Alignment, 1024U); + EXPECT_NE(P, nullptr); + free(P); + } + } +} + +TEST(ScudoWrappersCTest, AlignedAlloc) { + const size_t Alignment = 4096U; + void *P = aligned_alloc(Alignment, Alignment * 4U); + EXPECT_NE(P, nullptr); + EXPECT_LE(Alignment * 4U, malloc_usable_size(P)); + EXPECT_EQ(reinterpret_cast(P) % Alignment, 0U); + free(P); + + errno = 0; + P = aligned_alloc(Alignment, Size); + EXPECT_EQ(P, nullptr); + EXPECT_EQ(errno, EINVAL); +} + +TEST(ScudoWrappersCTest, Realloc) { + // realloc(nullptr, N) is malloc(N) + void *P = realloc(nullptr, 0U); + EXPECT_NE(P, nullptr); + free(P); + + P = malloc(Size); + EXPECT_NE(P, nullptr); + // realloc(P, 0U) is free(P) and returns nullptr + EXPECT_EQ(realloc(P, 0U), nullptr); + + P = malloc(Size); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + memset(P, 0x42, Size); + + P = realloc(P, Size * 2U); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size * 2U, malloc_usable_size(P)); + for (size_t I = 0; I < Size; I++) + EXPECT_EQ(0x42, (reinterpret_cast(P))[I]); + + P = realloc(P, Size / 2U); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size / 2U, malloc_usable_size(P)); + for (size_t I = 0; I < Size / 2U; I++) + EXPECT_EQ(0x42, (reinterpret_cast(P))[I]); + free(P); + + EXPECT_DEATH(P = realloc(P, Size), ""); + + errno = 0; + EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr); + EXPECT_EQ(errno, ENOMEM); + P = malloc(Size); + EXPECT_NE(P, nullptr); + errno = 0; + EXPECT_EQ(realloc(P, SIZE_MAX), nullptr); + EXPECT_EQ(errno, ENOMEM); + free(P); + + // Android allows realloc of memalign pointers. + if (SCUDO_ANDROID) { + const size_t Alignment = 1024U; + P = memalign(Alignment, Size); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size, malloc_usable_size(P)); + EXPECT_EQ(reinterpret_cast(P) % Alignment, 0U); + memset(P, 0x42, Size); + + P = realloc(P, Size * 2U); + EXPECT_NE(P, nullptr); + EXPECT_LE(Size * 2U, malloc_usable_size(P)); + for (size_t I = 0; I < Size; I++) + EXPECT_EQ(0x42, (reinterpret_cast(P))[I]); + free(P); + } +} + +#ifndef M_DECAY_TIME +#define M_DECAY_TIME -100 +#endif + +#ifndef M_PURGE +#define M_PURGE -101 +#endif + +TEST(ScudoWrappersCTest, Mallopt) { + errno = 0; + EXPECT_EQ(mallopt(-1000, 1), 0); + // mallopt doesn't set errno. + EXPECT_EQ(errno, 0); + + EXPECT_EQ(mallopt(M_PURGE, 0), 1); + + EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1); + EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1); + EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1); + EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1); +} + +TEST(ScudoWrappersCTest, OtherAlloc) { + const size_t PageSize = sysconf(_SC_PAGESIZE); + + void *P = pvalloc(Size); + EXPECT_NE(P, nullptr); + EXPECT_EQ(reinterpret_cast(P) & (PageSize - 1), 0U); + EXPECT_LE(PageSize, malloc_usable_size(P)); + free(P); + + EXPECT_EQ(pvalloc(SIZE_MAX), nullptr); + + P = pvalloc(Size); + EXPECT_NE(P, nullptr); + EXPECT_EQ(reinterpret_cast(P) & (PageSize - 1), 0U); + free(P); + + EXPECT_EQ(valloc(SIZE_MAX), nullptr); +} Index: lib/scudo/standalone/tests/wrappers_cpp_test.cc =================================================================== --- /dev/null +++ lib/scudo/standalone/tests/wrappers_cpp_test.cc @@ -0,0 +1,108 @@ +//===-- wrappers_cpp_test.cc ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gtest/gtest.h" + +#include +#include +#include + +// Note that every Cxx allocation function in the test binary will be fulfilled +// by Scudo. See the comment in the C counterpart of this file. + +extern "C" __attribute__((visibility("default"))) const char * +__scudo_default_options() { + return "quarantine_size_kb=256:thread_local_quarantine_size_kb=128:" + "quarantine_max_chunk_size=512:dealloc_type_mismatch=true"; +} + +template static void testCxxNew() { + T *P = new T; + EXPECT_NE(P, nullptr); + memset(P, 0x42, sizeof(T)); + EXPECT_DEATH(delete[] P, ""); + delete P; + EXPECT_DEATH(delete P, ""); + + P = new T; + EXPECT_NE(P, nullptr); + memset(P, 0x42, sizeof(T)); + operator delete(P, sizeof(T)); + + P = new (std::nothrow) T; + EXPECT_NE(P, nullptr); + memset(P, 0x42, sizeof(T)); + delete P; + + const size_t N = 16U; + T *A = new T[N]; + EXPECT_NE(A, nullptr); + memset(A, 0x42, sizeof(T) * N); + EXPECT_DEATH(delete A, ""); + delete[] A; + EXPECT_DEATH(delete[] A, ""); + + A = new T[N]; + EXPECT_NE(A, nullptr); + memset(A, 0x42, sizeof(T) * N); + operator delete[](A, sizeof(T) * N); + + A = new (std::nothrow) T[N]; + EXPECT_NE(A, nullptr); + memset(A, 0x42, sizeof(T) * N); + delete[] A; +} + +TEST(ScudoWrappersCppTest, New) { + testCxxNew(); + testCxxNew(); + testCxxNew(); + testCxxNew(); + testCxxNew(); + testCxxNew(); + testCxxNew(); + testCxxNew(); +} + +static std::mutex Mutex; +static std::condition_variable Cv; +static bool Ready = false; + +static void stressNew() { + std::vector V; + { + std::unique_lock Lock(Mutex); + while (!Ready) + Cv.wait(Lock); + } + for (size_t I = 0; I < 256U; I++) { + const size_t N = std::rand() % 128U; + uintptr_t *P = new uintptr_t[N]; + if (P) { + memset(P, 0x42, sizeof(uintptr_t) * N); + V.push_back(P); + } + } + while (!V.empty()) { + delete[] V.back(); + V.pop_back(); + } +} + +TEST(ScudoWrappersCppTest, ThreadedNew) { + std::thread Threads[32]; + for (size_t I = 0U; I < sizeof(Threads) / sizeof(Threads[0]); I++) + Threads[I] = std::thread(stressNew); + { + std::unique_lock Lock(Mutex); + Ready = true; + Cv.notify_all(); + } + for (auto &T : Threads) + T.join(); +} Index: lib/scudo/standalone/tsd.h =================================================================== --- lib/scudo/standalone/tsd.h +++ lib/scudo/standalone/tsd.h @@ -15,6 +15,11 @@ #include // for PTHREAD_DESTRUCTOR_ITERATIONS +// With some build setups, this might still not be defined. +#ifndef PTHREAD_DESTRUCTOR_ITERATIONS +#define PTHREAD_DESTRUCTOR_ITERATIONS 4 +#endif + namespace scudo { template struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD { Index: lib/scudo/standalone/wrappers.inc =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers.inc @@ -0,0 +1,175 @@ +//===-- wrappers.inc --------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_PREFIX +#error "Define SCUDO_PREFIX prior to including this file!" +#endif + +// malloc-type functions have to be aligned to std::max_align_t. This is +// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions +// do not have to abide by the same requirement. +#ifndef SCUDO_MALLOC_ALIGNMENT +#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U) +#endif + +INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) { + scudo::uptr Product; + if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) { + // There doesn't appear to be a specific errno for this situation in the + // standard, but Android uses ENOMEM. + if (SCUDO_ANDROID) + errno = ENOMEM; + if (SCUDO_ALLOCATOR.canReturnNull()) + return nullptr; + scudo::reportCallocOverflow(nmemb, size); + } + return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( + Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true)); +} + +INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) { + SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); +} + +INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) { + struct SCUDO_MALLINFO Info = {}; + scudo::StatCounters Stats; + SCUDO_ALLOCATOR.getStats(Stats); + Info.uordblks = + static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]); + return Info; +} + +INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) { + return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( + size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT)); +} + +#if SCUDO_ANDROID +INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) { +#else +INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) { +#endif + return SCUDO_ALLOCATOR.getUsableSize(ptr); +} + +INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) { + // Android rounds up the alignment to a power of two if isn't one. + if (SCUDO_ANDROID) { + if (UNLIKELY(!alignment)) { + alignment = 1U; + } else { + if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) + alignment = scudo::roundUpToPowerOfTwo(alignment); + } + } else { + if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) { + errno = EINVAL; + if (SCUDO_ALLOCATOR.canReturnNull()) + return nullptr; + scudo::reportAlignmentNotPowerOfTwo(alignment); + } + } + return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, + alignment); +} + +INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment, + size_t size) { + if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) { + if (!SCUDO_ALLOCATOR.canReturnNull()) + scudo::reportInvalidPosixMemalignAlignment(alignment); + return EINVAL; + } + void *Ptr = + SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment); + if (UNLIKELY(!Ptr)) + return ENOMEM; + *memptr = Ptr; + return 0; +} + +INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) { + const scudo::uptr PageSize = scudo::getPageSizeCached(); + if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) { + errno = ENOMEM; + if (SCUDO_ALLOCATOR.canReturnNull()) + return nullptr; + scudo::reportPvallocOverflow(size); + } + // pvalloc(0) should allocate one page. + return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( + size ? scudo::roundUpTo(size, PageSize) : PageSize, + scudo::Chunk::Origin::Memalign, PageSize)); +} + +INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) { + if (!ptr) + return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( + size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT)); + if (size == 0) { + SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc); + return nullptr; + } + return scudo::setErrnoOnNull( + SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT)); +} + +INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) { + return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( + size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached())); +} + +// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate +// which is somewhat inconsistent with the rest, workaround that. +#if SCUDO_ANDROID && _BIONIC +#define SCUDO_ITERATE iterate +#else +#define SCUDO_ITERATE malloc_iterate +#endif + +INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)( + uintptr_t base, size_t size, + void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) { + SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg); + return 0; +} + +INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() { + SCUDO_ALLOCATOR.disable(); +} + +INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); } + +INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) { + if (param == M_DECAY_TIME) { + // TODO(kostyak): set release_to_os_interval_ms accordingly. + return 1; + } else if (param == M_PURGE) { + SCUDO_ALLOCATOR.releaseToOS(); + return 1; + } + return 0; +} + +INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment, + size_t size) { + if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) { + errno = EINVAL; + if (SCUDO_ALLOCATOR.canReturnNull()) + return nullptr; + scudo::reportInvalidAlignedAllocAlignment(alignment, size); + } + return scudo::setErrnoOnNull( + SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment)); +} + +INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(int, FILE *) { + errno = ENOTSUP; + return -1; +} Index: lib/scudo/standalone/wrappers_c.h =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers_c.h @@ -0,0 +1,52 @@ +//===-- wrappers_c.h --------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_WRAPPERS_C_H_ +#define SCUDO_WRAPPERS_C_H_ + +#include "platform.h" +#include "stats.h" + +// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int). +#if SCUDO_ANDROID +typedef size_t __scudo_mallinfo_data_t; +#else +typedef int __scudo_mallinfo_data_t; +#endif + +struct __scudo_mallinfo { + __scudo_mallinfo_data_t arena; + __scudo_mallinfo_data_t ordblks; + __scudo_mallinfo_data_t smblks; + __scudo_mallinfo_data_t hblks; + __scudo_mallinfo_data_t hblkhd; + __scudo_mallinfo_data_t usmblks; + __scudo_mallinfo_data_t fsmblks; + __scudo_mallinfo_data_t uordblks; + __scudo_mallinfo_data_t fordblks; + __scudo_mallinfo_data_t keepcost; +}; + +// Android sometimes includes malloc.h no matter what, which yields to +// conflicting return types for mallinfo() if we use our own structure. So if +// struct mallinfo is declared (#define courtesy of malloc.h), use it directly. +#if STRUCT_MALLINFO_DECLARED +#define SCUDO_MALLINFO mallinfo +#else +#define SCUDO_MALLINFO __scudo_mallinfo +#endif + +#ifndef M_DECAY_TIME +#define M_DECAY_TIME -100 +#endif + +#ifndef M_PURGE +#define M_PURGE -101 +#endif + +#endif // SCUDO_WRAPPERS_C_H_ Index: lib/scudo/standalone/wrappers_c.cc =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers_c.cc @@ -0,0 +1,39 @@ +//===-- wrappers_c.cc -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +// Skip this compilation unit if compiled as part of Bionic. +#if !SCUDO_ANDROID || !_BIONIC + +#include "allocator_config.h" +#include "wrappers_c.h" +#include "wrappers_c_checks.h" + +#include +#include + +static scudo::Allocator Allocator; +// Pointer to the static allocator so that the C++ wrappers can access it. +// Technically we could have a completely separated heap for C & C++ but in +// reality the amount of cross pollination between the two is staggering. +scudo::Allocator *AllocatorPtr = &Allocator; + +extern "C" { + +#define SCUDO_PREFIX(name) name +#define SCUDO_ALLOCATOR Allocator +#include "wrappers.inc" +#undef SCUDO_ALLOCATOR +#undef SCUDO_PREFIX + +INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); } + +} // extern "C" + +#endif // !SCUDO_ANDROID || !_BIONIC Index: lib/scudo/standalone/wrappers_c_bionic.cc =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers_c_bionic.cc @@ -0,0 +1,49 @@ +//===-- wrappers_bionic.cc --------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +// This is only used when compiled as part of Bionic. +#if SCUDO_ANDROID && _BIONIC + +#include "allocator.h" +#include "wrappers_c.h" +#include "wrappers_c_checks.h" + +#include +#include + +static scudo::Allocator Allocator; +static scudo::Allocator SvelteAllocator; + +extern "C" { + +// Regular MallocDispatch definitions. +#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name) +#define SCUDO_ALLOCATOR Allocator +#include "wrappers.inc" +#undef SCUDO_ALLOCATOR +#undef SCUDO_PREFIX + +// Svelte MallocDispatch definitions. +#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name) +#define SCUDO_ALLOCATOR SvelteAllocator +#include "wrappers.inc" +#undef SCUDO_ALLOCATOR +#undef SCUDO_PREFIX + +// The following is the only function that will end up initializing both +// allocators, which will result in a slight increase in memory footprint. +INTERFACE void __scudo_print_stats(void) { + Allocator.printStats(); + SvelteAllocator.printStats(); +} + +} // extern "C" + +#endif // SCUDO_ANDROID && _BIONIC Index: lib/scudo/standalone/wrappers_c_checks.h =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers_c_checks.h @@ -0,0 +1,56 @@ +//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_CHECKS_H_ +#define SCUDO_CHECKS_H_ + +#include "common.h" + +#include + +namespace scudo { + +// A common errno setting logic shared by almost all Scudo C wrappers. +INLINE void *setErrnoOnNull(void *Ptr) { + if (UNLIKELY(!Ptr)) + errno = ENOMEM; + return Ptr; +} + +// Checks return true on failure. + +// Checks aligned_alloc() parameters, verifies that the alignment is a power of +// two and that the size is a multiple of alignment. +INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) { + return Alignment == 0 || !isPowerOfTwo(Alignment) || + !isAligned(Size, Alignment); +} + +// Checks posix_memalign() parameters, verifies that alignment is a power of two +// and a multiple of sizeof(void *). +INLINE bool checkPosixMemalignAlignment(uptr Alignment) { + return Alignment == 0 || !isPowerOfTwo(Alignment) || + !isAligned(Alignment, sizeof(void *)); +} + +// Returns true if calloc(Size, N) overflows on Size*N calculation. The typical +// way would be to check for (UINTPTR_MAX / Size) < N, but the division ends up +// being very costly, so use a builtin supported by recent clang & GCC. +INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) { + return __builtin_umull_overflow(Size, N, Product); +} + +// Returns true if the size passed to pvalloc overflows when rounded to the next +// multiple of PageSize. +INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) { + return roundUpTo(Size, PageSize) < Size; +} + +} // namespace scudo + +#endif // SCUDO_CHECKS_H_ Index: lib/scudo/standalone/wrappers_cpp.cc =================================================================== --- /dev/null +++ lib/scudo/standalone/wrappers_cpp.cc @@ -0,0 +1,107 @@ +//===-- wrappers_cpp.cc -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +// Skip this compilation unit if compiled as part of Bionic. +#if !SCUDO_ANDROID || !_BIONIC + +#include "allocator_config.h" + +#include + +extern scudo::Allocator *AllocatorPtr; + +namespace std { +struct nothrow_t {}; +enum class align_val_t : size_t {}; +} // namespace std + +INTERFACE WEAK void *operator new(size_t size) { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New); +} +INTERFACE WEAK void *operator new[](size_t size) { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray); +} +INTERFACE WEAK void *operator new(size_t size, + std::nothrow_t const &) NOEXCEPT { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New); +} +INTERFACE WEAK void *operator new[](size_t size, + std::nothrow_t const &) NOEXCEPT { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray); +} +INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New, + static_cast(align)); +} +INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray, + static_cast(align)); +} +INTERFACE WEAK void *operator new(size_t size, std::align_val_t align, + std::nothrow_t const &) NOEXCEPT { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New, + static_cast(align)); +} +INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align, + std::nothrow_t const &) NOEXCEPT { + return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray, + static_cast(align)); +} + +INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New); +} +INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray); +} +INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New); +} +INTERFACE WEAK void operator delete[](void *ptr, + std::nothrow_t const &) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray); +} +INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size); +} +INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size); +} +INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0, + static_cast(align)); +} +INTERFACE WEAK void operator delete[](void *ptr, + std::align_val_t align) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, + static_cast(align)); +} +INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align, + std::nothrow_t const &)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0, + static_cast(align)); +} +INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align, + std::nothrow_t const &) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0, + static_cast(align)); +} +INTERFACE WEAK void operator delete(void *ptr, size_t size, + std::align_val_t align)NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size, + static_cast(align)); +} +INTERFACE WEAK void operator delete[](void *ptr, size_t size, + std::align_val_t align) NOEXCEPT { + AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size, + static_cast(align)); +} + +#endif // !SCUDO_ANDROID || !_BIONIC