Index: lib/scudo/standalone/CMakeLists.txt =================================================================== --- lib/scudo/standalone/CMakeLists.txt +++ lib/scudo/standalone/CMakeLists.txt @@ -33,18 +33,29 @@ append_list_if(COMPILER_RT_HAS_Z_GLOBAL -Wl,-z,global SCUDO_LINK_FLAGS) endif() -set(SCUDO_SOURCES empty.cc) +set(SCUDO_SOURCES + common.cc + fuchsia.cc + linux.cc) set(SCUDO_HEADERS platform.h internal_defs.h atomic_helpers.h - list.h) + list.h + mutex.h + linux.h) if(COMPILER_RT_HAS_SCUDO_STANDALONE) + add_compiler_rt_object_libraries(RTScudoStandalone + ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} + SOURCES ${SCUDO_SOURCES} + ADDITIONAL_HEADERS ${SCUDO_HEADERS} + CFLAGS ${SCUDO_CFLAGS}) + add_compiler_rt_runtime(clang_rt.scudo_standalone STATIC - ARCHS ${SCUDO_SUPPORTED_ARCH} + ARCHS ${SCUDO_STANDALONE_SUPPORTED_ARCH} SOURCES ${SCUDO_SOURCES} ADDITIONAL_HEADERS ${SCUDO_HEADERS} CFLAGS ${SCUDO_CFLAGS} Index: lib/scudo/standalone/atomic_helpers.h =================================================================== --- lib/scudo/standalone/atomic_helpers.h +++ lib/scudo/standalone/atomic_helpers.h @@ -51,7 +51,7 @@ struct atomic_u64 { typedef u64 Type; // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. - volatile ALIGNED(8) Type ValDoNotUse; + ALIGNED(8) volatile Type ValDoNotUse; }; struct atomic_uptr { Index: lib/scudo/standalone/common.h =================================================================== --- lib/scudo/standalone/common.h +++ lib/scudo/standalone/common.h @@ -0,0 +1,171 @@ +//===-- common.h ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_COMMON_H_ +#define SCUDO_COMMON_H_ + +#include "internal_defs.h" + +#include +#include + +namespace scudo { + +template INLINE Dest bit_cast(const Source &S) { + COMPILER_CHECK(sizeof(Dest) == sizeof(Source)); + Dest D; + memcpy(&D, &S, sizeof(D)); + return D; +} + +INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) { + return (X + Boundary - 1) & ~(Boundary - 1); +} + +INLINE constexpr uptr RoundDownTo(uptr X, uptr Boundary) { + return X & ~(Boundary - 1); +} + +INLINE constexpr bool isAligned(uptr X, uptr Alignment) { + return (X & (Alignment - 1)) == 0; +} + +template constexpr T Min(T A, T B) { return A < B ? A : B; } + +template constexpr T Max(T A, T B) { return A > B ? A : B; } + +template void Swap(T &A, T &B) { + T Tmp = A; + A = B; + B = Tmp; +} + +INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; } + +INLINE uptr getMostSignificantSetBitIndex(uptr X) { + DCHECK_NE(X, 0U); + return SCUDO_WORDSIZE - 1U - static_cast(__builtin_clzl(X)); +} + +INLINE uptr roundUpToPowerOfTwo(uptr Size) { + DCHECK(Size); + if (isPowerOfTwo(Size)) + return Size; + const uptr Up = getMostSignificantSetBitIndex(Size); + DCHECK_LT(Size, (1UL << (Up + 1))); + DCHECK_GT(Size, (1UL << Up)); + return 1UL << (Up + 1); +} + +INLINE uptr getLeastSignificantSetBitIndex(uptr X) { + DCHECK_NE(X, 0U); + return static_cast(__builtin_ctzll(X)); +} + +INLINE uptr getLog2(uptr X) { + DCHECK(isPowerOfTwo(X)); + return getLeastSignificantSetBitIndex(X); +} + +INLINE u32 getRandomU32(u32 *State) { + // ANSI C linear congruential PRNG (16-bit output). + // return (*State = *State * 1103515245 + 12345) >> 16; + // XorShift (32-bit output). + *State ^= *State << 13; + *State ^= *State >> 17; + *State ^= *State << 5; + return *State; +} + +INLINE u32 getRandomModN(u32 *State, u32 N) { + return getRandomU32(State) % N; // [0, N) +} + +template INLINE void shuffle(T *A, u32 N, u32 *RandState) { + if (N <= 1) + return; + u32 State = *RandState; + for (u32 I = N - 1; I > 0; I--) + Swap(A[I], A[getRandomModN(&State, I + 1)]); + *RandState = State; +} + +// Hardware specific inlinable functions. + +INLINE void yieldProcessor(u8 Count) { +#if defined(__i386__) || defined(__x86_64__) + __asm__ __volatile__("" ::: "memory"); + for (u8 I = 0; I < Count; I++) + __asm__ __volatile__("pause"); +#elif defined(__aarch64__) || defined(__arm__) + __asm__ __volatile__("" ::: "memory"); + for (u8 I = 0; I < Count; I++) + __asm__ __volatile__("yield"); +#endif + __asm__ __volatile__("" ::: "memory"); +} + +// Platform specific functions. + +void yieldPlatform(); + +extern uptr PageSizeCached; +uptr getPageSizeSlow(); +INLINE uptr getPageSizeCached() { + if (LIKELY(PageSizeCached)) + return PageSizeCached; + return getPageSizeSlow(); +} + +u32 getNumberOfCPUs(); + +const char *getEnv(const char *Name); + +u64 getMonotonicTime(); + +// Our randomness gathering function is limited to 256 bytes to ensure we get +// as many bytes as requested, and avoid interruptions (on Linux). +constexpr uptr MaxRandomLength = 256U; +bool getRandom(void *Buffer, uptr Length, bool Blocking = false); + +// Platform memory mapping functions. + +#define MAP_ALLOWNOMEM (1U << 0) +#define MAP_NOACCESS (1U << 1) +#define MAP_RESIZABLE (1U << 2) + +// Our platform memory mapping use is restricted to 3 scenarios: +// - reserve memory at a random address (MAP_NOACCESS); +// - commit memory in a previously reserved space; +// - commit memory at a random address. +// As such, only a subset of parameters combinations is valid, which is checked +// by the function implementation. The Extra parameter allows to pass opaque +// platform specific data to the function. +// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified. +void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0, + u64 *Extra = nullptr); + +// Indicates that we are getting rid of the whole mapping, which might have +// further consequences on Extra, depending on the platform. +#define UNMAP_ALL (1U << 0) + +void unmap(void *Addr, uptr Size, uptr Flags = 0, u64 *Extra = nullptr); + +void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size, + u64 *Extra = nullptr); + +// Internal map & unmap fatal error. This must not call map(). +void NORETURN dieOnMapUnmapError(bool OutOfMemory = false); + +// Logging related functions. + +void setAbortMessage(const char *Message); + +} // namespace scudo + +#endif // SCUDO_COMMON_H_ Index: lib/scudo/standalone/common.cc =================================================================== --- lib/scudo/standalone/common.cc +++ lib/scudo/standalone/common.cc @@ -0,0 +1,32 @@ +//===-- common.cc -----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "common.h" +#include "atomic_helpers.h" + +namespace scudo { + +uptr PageSizeCached; +uptr getPageSize(); + +uptr getPageSizeSlow() { + PageSizeCached = getPageSize(); + CHECK_NE(PageSizeCached, 0); + return PageSizeCached; +} + +// Fatal internal map() or unmap() error (potentially OOM related). +void NORETURN dieOnMapUnmapError(bool OutOfMemory) { + outputRaw("Scudo ERROR: internal map or unmap failure"); + if (OutOfMemory) + outputRaw(" (OOM)"); + outputRaw("\n"); + die(); +} + +} // namespace scudo Index: lib/scudo/standalone/empty.cc =================================================================== --- lib/scudo/standalone/empty.cc +++ lib/scudo/standalone/empty.cc @@ -1,10 +0,0 @@ -//===-- empty.cc ------------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "atomic_helpers.h" -#include "list.h" Index: lib/scudo/standalone/fuchsia.cc =================================================================== --- lib/scudo/standalone/fuchsia.cc +++ lib/scudo/standalone/fuchsia.cc @@ -0,0 +1,216 @@ +//===-- fuchsia.cc ----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +#if SCUDO_FUCHSIA + +#include "common.h" +#include "mutex.h" + +#include // for PAGE_SIZE +#include // for abort() +#include +#include +#include + +namespace scudo { + +void yieldPlatform() { + const zx_status_t Status = _zx_nanosleep(0); + CHECK_EQ(Status, ZX_OK); +} + +uptr getPageSize() { return PAGE_SIZE; } + +void NORETURN die() { __builtin_trap(); } + +// We zero-initialize the Extra parameter of map(), make sure this is consistent +// with ZX_HANDLE_INVALID. +COMPILER_CHECK(ZX_HANDLE_INVALID == 0); + +struct MapInfo { + zx_handle_t Vmar; + zx_handle_t Vmo; +}; +COMPILER_CHECK(sizeof(MapInfo) == sizeof(u64)); + +static void *allocateVmar(uptr Size, MapInfo *Info, bool AllowNoMem) { + // Only scenario so far. + DCHECK(Info); + DCHECK_EQ(Info->Vmar, ZX_HANDLE_INVALID); + + uintptr_t P; + const zx_status_t Status = _zx_vmar_allocate( + _zx_vmar_root_self(), + ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, + Size, &Info->Vmar, &P); + if (Status != ZX_OK) { + if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) + dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); + return nullptr; + } + return reinterpret_cast(P); +} + +// Returns the offset of an address range in a Vmar, while checking that said +// range fully belongs to the Vmar. An alternative would be to keep track of +// both the base & length to avoid calling this. The tradeoff being a system +// call vs two extra uptr of storage. +// TODO(kostyak): revisit the implications of both options. +static uint64_t getOffsetInVmar(zx_handle_t Vmar, void *Addr, uintptr_t Size) { + zx_info_vmar_t Info; + const zx_status_t Status = _zx_object_get_info( + Vmar, ZX_INFO_VMAR, &Info, sizeof(Info), nullptr, nullptr); + CHECK_EQ(Status, ZX_OK); + const uint64_t Offset = reinterpret_cast(Addr) - Info.base; + CHECK_LE(Offset, Info.base + Info.len); + CHECK_LE(Offset + Size, Info.base + Info.len); + return Offset; +} + +void *map(void *Addr, uptr Size, const char *Name, uptr Flags, u64 *Extra) { + DCHECK_EQ(Size % PAGE_SIZE, 0); + const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM); + MapInfo *Info = reinterpret_cast(Extra); + + // For MAP_NOACCESS, just allocate a Vmar and return. + if (Flags & MAP_NOACCESS) + return allocateVmar(Size, Info, AllowNoMem); + + const zx_handle_t Vmar = Info ? Info->Vmar : _zx_vmar_root_self(); + CHECK_NE(Vmar, ZX_HANDLE_INVALID); + + zx_status_t Status; + zx_handle_t Vmo; + uint64_t VmoSize = 0; + if (Info && Info->Vmo != ZX_HANDLE_INVALID) { + // If a Vmo was specified, it's a resize operation. + CHECK(Addr); + DCHECK(Flags & MAP_RESIZABLE); + Vmo = Info->Vmo; + Status = _zx_vmo_get_size(Vmo, &VmoSize); + if (Status == ZX_OK) + Status = _zx_vmo_set_size(Vmo, VmoSize + Size); + if (Status != ZX_OK) { + if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) + dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); + return nullptr; + } + } else { + // Otherwise, create a Vmo and set its name. + Status = _zx_vmo_create(Size, 0, &Vmo); + if (Status != ZX_OK) { + if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) + dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); + return nullptr; + } + uptr N = 0; + while (Name[N]) + N++; + _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, N); + } + + uintptr_t P; + zx_vm_option_t MapFlags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE; + const uint64_t Offset = Addr ? getOffsetInVmar(Vmar, Addr, Size) : 0; + if (Offset) + MapFlags |= ZX_VM_SPECIFIC; + Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P); + // No need to track the Vmo if we don't intend on resizing it. Close it. + if (Flags & MAP_RESIZABLE) { + DCHECK(Info); + DCHECK_EQ(Info->Vmo, ZX_HANDLE_INVALID); + Info->Vmo = Vmo; + } else { + CHECK_EQ(_zx_handle_close(Vmo), ZX_OK); + } + if (Status != ZX_OK) { + if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) + dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); + return nullptr; + } + + return reinterpret_cast(P); +} + +void unmap(void *Addr, uptr Size, uptr Flags, u64 *Extra) { + MapInfo *Info = reinterpret_cast(Extra); + if (Flags & UNMAP_ALL) { + DCHECK_NE(Info, nullptr); + const zx_handle_t Vmar = Info->Vmar; + DCHECK_NE(Vmar, _zx_vmar_root_self()); + // Destroying the vmar effectively unmaps the whole mapping. + CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK); + CHECK_EQ(_zx_handle_close(Vmar), ZX_OK); + } else { + const zx_handle_t Vmar = Info ? Info->Vmar : _zx_vmar_root_self(); + const zx_status_t Status = + _zx_vmar_unmap(Vmar, reinterpret_cast(Addr), Size); + if (Status != ZX_OK) + dieOnMapUnmapError(); + } + if (Info) { + if (Info->Vmo != ZX_HANDLE_INVALID) + CHECK_EQ(_zx_handle_close(Info->Vmo), ZX_OK); + Info->Vmo = ZX_HANDLE_INVALID; + Info->Vmar = ZX_HANDLE_INVALID; + } +} + +void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size, + u64 *Extra) { + MapInfo *Info = reinterpret_cast(Extra); + DCHECK(Info); + DCHECK_NE(Info->Vmar, ZX_HANDLE_INVALID); + DCHECK_NE(Info->Vmo, ZX_HANDLE_INVALID); + const zx_status_t Status = + _zx_vmo_op_range(Info->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0); + CHECK_EQ(Status, ZX_OK); +} + +const char *getEnv(const char *Name) { return getenv(Name); } + +void BlockingMutex::wait() { + const zx_status_t Status = + _zx_futex_wait(reinterpret_cast(OpaqueStorage), MtxSleeping, + ZX_HANDLE_INVALID, ZX_TIME_INFINITE); + if (Status != ZX_ERR_BAD_STATE) + CHECK_EQ(Status, ZX_OK); // Normal race +} + +void BlockingMutex::wake() { + const zx_status_t Status = + _zx_futex_wake(reinterpret_cast(OpaqueStorage), 1); + CHECK_EQ(Status, ZX_OK); +} + +u64 getMonotonicTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); } + +u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); } + +bool getRandom(void *Buffer, uptr Length, bool Blocking) { + COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN); + if (!Buffer || !Length || Length > MaxRandomLength) + return false; + _zx_cprng_draw(Buffer, Length); + return true; +} + +void outputRaw(const char *Buffer) { + uptr N = 0; + while (Buffer[N]) + N++; + __sanitizer_log_write(Buffer, N); +} + +void setAbortMessage(const char *Message) {} + +} // namespace scudo + +#endif // SCUDO_FUCHSIA Index: lib/scudo/standalone/internal_defs.h =================================================================== --- lib/scudo/standalone/internal_defs.h +++ lib/scudo/standalone/internal_defs.h @@ -11,6 +11,8 @@ #include "platform.h" +#include + #ifndef SCUDO_DEBUG #define SCUDO_DEBUG 0 #endif @@ -31,6 +33,8 @@ #define INLINE inline #define ALWAYS_INLINE inline __attribute__((always_inline)) #define ALIAS(x) __attribute__((alias(x))) +// Please only use the ALIGNED macro before the type. Using ALIGNED after the +// variable declaration is not portable. #define ALIGNED(x) __attribute__((aligned(x))) #define FORMAT(f, a) __attribute__((format(printf, f, a))) #define NOINLINE __attribute__((noinline)) @@ -61,42 +65,29 @@ typedef signed int s32; typedef signed long long s64; -// Various integer constants. - -#undef __INT64_C -#undef __UINT64_C -#undef UINTPTR_MAX -#if SCUDO_WORDSIZE == 64U -#define __INT64_C(c) c##L -#define __UINT64_C(c) c##UL -#define UINTPTR_MAX (18446744073709551615UL) -#else -#define __INT64_C(c) c##LL -#define __UINT64_C(c) c##ULL -#define UINTPTR_MAX (4294967295U) -#endif // SCUDO_WORDSIZE == 64U -#undef INT32_MIN -#define INT32_MIN (-2147483647 - 1) -#undef INT32_MAX -#define INT32_MAX (2147483647) -#undef UINT32_MAX -#define UINT32_MAX (4294967295U) -#undef INT64_MIN -#define INT64_MIN (-__INT64_C(9223372036854775807) - 1) -#undef INT64_MAX -#define INT64_MAX (__INT64_C(9223372036854775807)) -#undef UINT64_MAX -#define UINT64_MAX (__UINT64_C(18446744073709551615)) +// The following two functions have platform specific implementations. +void outputRaw(const char *Buffer); +void NORETURN die(); -enum LinkerInitialized { LINKER_INITIALIZED = 0 }; +#define RAW_CHECK_MSG(Expr, Msg) \ + do { \ + if (UNLIKELY(!(Expr))) { \ + outputRaw(Msg); \ + die(); \ + } \ + } while (false) -// Various CHECK related macros. +#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr) -#define COMPILER_CHECK(Pred) static_assert(Pred, "") - -// TODO(kostyak): implement at a later check-in. +// TODO(kostyak): use reportCheckFailed when checked-in. #define CHECK_IMPL(c1, op, c2) \ do { \ + u64 v1 = (u64)(c1); \ + u64 v2 = (u64)(c2); \ + if (UNLIKELY(!(v1 op v2))) { \ + outputRaw("CHECK failed: (" #c1 ") " #op " (" #c2 ")\n"); \ + die(); \ + } \ } while (false) #define CHECK(a) CHECK_IMPL((a), !=, 0) @@ -125,10 +116,16 @@ #define DCHECK_GE(a, b) #endif -// TODO(kostyak): implement at a later check-in. +// The superfluous die() call effectively makes this macro NORETURN. #define UNREACHABLE(msg) \ do { \ - } while (false) + CHECK(0 && msg); \ + die(); \ + } while (0) + +#define COMPILER_CHECK(Pred) static_assert(Pred, "") + +enum LinkerInitialized { LINKER_INITIALIZED = 0 }; } // namespace scudo Index: lib/scudo/standalone/linux.h =================================================================== --- lib/scudo/standalone/linux.h +++ lib/scudo/standalone/linux.h @@ -0,0 +1,67 @@ +//===-- linux.h -------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_LINUX_H_ +#define SCUDO_LINUX_H_ + +#include "platform.h" + +#if SCUDO_LINUX + +namespace scudo { + +#if SCUDO_ANDROID + +#if defined(__aarch64__) +#define __get_tls() \ + ({ \ + void **__v; \ + __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \ + __v; \ + }) +#elif defined(__arm__) +#define __get_tls() \ + ({ \ + void **__v; \ + __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \ + __v; \ + }) +#elif defined(__i386__) +#define __get_tls() \ + ({ \ + void **__v; \ + __asm__("movl %%gs:0, %0" : "=r"(__v)); \ + __v; \ + }) +#elif defined(__x86_64__) +#define __get_tls() \ + ({ \ + void **__v; \ + __asm__("mov %%fs:0, %0" : "=r"(__v)); \ + __v; \ + }) +#else +#error "Unsupported architecture." +#endif + +// The Android Bionic team has allocated a TLS slot for sanitizers starting +// with Q, given that Android currently doesn't support ELF TLS. It is used to +// store sanitizer thread specific data. +static const int TLS_SLOT_SANITIZER = 8; // TODO(kostyak): 6 for Q!! + +ALWAYS_INLINE uptr *getAndroidTlsPtr() { + return reinterpret_cast(&__get_tls()[TLS_SLOT_SANITIZER]); +} + +#endif // SCUDO_ANDROID + +} // namespace scudo + +#endif // SCUDO_LINUX + +#endif // SCUDO_LINUX_H_ Index: lib/scudo/standalone/linux.cc =================================================================== --- lib/scudo/standalone/linux.cc +++ lib/scudo/standalone/linux.cc @@ -0,0 +1,151 @@ +//===-- linux.cc ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +#if SCUDO_LINUX + +#include "common.h" +#include "linux.h" +#include "mutex.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if SCUDO_ANDROID +#include +// Definitions of prctl arguments to set a vma name in Android kernels. +#define ANDROID_PR_SET_VMA 0x53564d41 +#define ANDROID_PR_SET_VMA_ANON_NAME 0 +#endif + +namespace scudo { + +void yieldPlatform() { sched_yield(); } + +uptr getPageSize() { return static_cast(sysconf(_SC_PAGESIZE)); } + +void NORETURN die() { abort(); } + +void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags, + UNUSED u64 *Extra) { + int MmapFlags = MAP_PRIVATE | MAP_ANON; + if (Flags & MAP_NOACCESS) + MmapFlags |= MAP_NORESERVE; + if (Addr) { + // Currently no scenario for a noaccess mapping with a fixed address. + DCHECK_EQ(Flags & MAP_NOACCESS, 0); + MmapFlags |= MAP_FIXED; + } + const int MmapProt = + (Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE; + void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0); + if (P == MAP_FAILED) { + if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM) + dieOnMapUnmapError(errno == ENOMEM); + return nullptr; + } +#if SCUDO_ANDROID + if (!(Flags & MAP_NOACCESS)) + prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name); +#endif + return P; +} + +void unmap(void *Addr, uptr Size, UNUSED uptr Flags, UNUSED u64 *Extra) { + if (munmap(Addr, Size) != 0) + dieOnMapUnmapError(); +} + +void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size, + UNUSED u64 *Extra) { + void *Addr = reinterpret_cast(BaseAddress + Offset); + while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) { + } +} + +// Calling getenv should be fine (c)(tm) at any time. +const char *getEnv(const char *Name) { return getenv(Name); } + +void BlockingMutex::wait() { + syscall(SYS_futex, reinterpret_cast(OpaqueStorage), FUTEX_WAIT_PRIVATE, + MtxSleeping, nullptr, nullptr, 0); +} + +void BlockingMutex::wake() { + syscall(SYS_futex, reinterpret_cast(OpaqueStorage), FUTEX_WAKE_PRIVATE, + 1, nullptr, nullptr, 0); +} + +u64 getMonotonicTime() { + timespec TS; + clock_gettime(CLOCK_MONOTONIC, &TS); + return static_cast(TS.tv_sec) * (1000ULL * 1000 * 1000) + + static_cast(TS.tv_nsec); +} + +u32 getNumberOfCPUs() { + cpu_set_t CPUs; + CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); + return static_cast(CPU_COUNT(&CPUs)); +} + +// Blocking is possibly unused if the getrandom block is not compiled in. +bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { + if (!Buffer || !Length || Length > MaxRandomLength) + return false; + ssize_t ReadBytes; +#if defined(SYS_getrandom) +#if !defined(GRND_NONBLOCK) +#define GRND_NONBLOCK 1 +#endif + // Up to 256 bytes, getrandom will not be interrupted. + ReadBytes = + syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK); + if (ReadBytes == static_cast(Length)) + return true; +#endif // defined(SYS_getrandom) + // Up to 256 bytes, a read off /dev/urandom will not be interrupted. + // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom. + const int FileDesc = open("/dev/urandom", O_RDONLY); + if (FileDesc == -1) + return false; + ReadBytes = read(FileDesc, Buffer, Length); + close(FileDesc); + return (ReadBytes == static_cast(Length)); +} + +void outputRaw(const char *Buffer) { + static StaticSpinMutex Mutex; + SpinMutexLock L(&Mutex); + uptr N = 0; + while (Buffer[N]) + N++; + write(2, Buffer, N); +} + +extern "C" WEAK void android_set_abort_message(const char *); + +void setAbortMessage(const char *Message) { + if (&android_set_abort_message) + android_set_abort_message(Message); +} + +} // namespace scudo + +#endif // SCUDO_LINUX Index: lib/scudo/standalone/mutex.h =================================================================== --- lib/scudo/standalone/mutex.h +++ lib/scudo/standalone/mutex.h @@ -0,0 +1,108 @@ +//===-- mutex.h -------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_MUTEX_H_ +#define SCUDO_MUTEX_H_ + +#include "atomic_helpers.h" +#include "common.h" + +namespace scudo { + +class StaticSpinMutex { +public: + void init() { atomic_store_relaxed(&State, 0); } + + void lock() { + if (tryLock()) + return; + lockSlow(); + } + + bool tryLock() { + return atomic_exchange(&State, 1, memory_order_acquire) == 0; + } + + void unlock() { atomic_store(&State, 0, memory_order_release); } + + void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); } + +private: + atomic_u8 State; + + void NOINLINE lockSlow() { + for (u32 I = 0;; I++) { + if (I < 10) + yieldProcessor(10); + else + yieldPlatform(); + if (atomic_load_relaxed(&State) == 0 && + atomic_exchange(&State, 1, memory_order_acquire) == 0) + return; + } + } +}; + +class SpinMutex : public StaticSpinMutex { +public: + SpinMutex() { init(); } + +private: + SpinMutex(const SpinMutex &) = delete; + void operator=(const SpinMutex &) = delete; +}; + +enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; + +class BlockingMutex { +public: + explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{0} {} + BlockingMutex() { memset(this, 0, sizeof(*this)); } + void wait(); + void wake(); + void lock() { + atomic_u32 *M = reinterpret_cast(&OpaqueStorage); + if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked) + return; + while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked) + wait(); + } + void unlock() { + atomic_u32 *M = reinterpret_cast(&OpaqueStorage); + const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release); + DCHECK_NE(V, MtxUnlocked); + if (V == MtxSleeping) + wake(); + } + void checkLocked() { + atomic_u32 *M = reinterpret_cast(&OpaqueStorage); + CHECK_NE(MtxUnlocked, atomic_load_relaxed(M)); + } + +private: + uptr OpaqueStorage[1]; +}; + +template class GenericScopedLock { +public: + explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); } + ~GenericScopedLock() { Mutex->unlock(); } + +private: + MutexType *Mutex; + + GenericScopedLock(const GenericScopedLock &) = delete; + void operator=(const GenericScopedLock &) = delete; +}; + +typedef GenericScopedLock SpinMutexLock; +typedef GenericScopedLock BlockingMutexLock; + +} // namespace scudo + +#endif // SCUDO_MUTEX_H_ Index: lib/scudo/standalone/tests/CMakeLists.txt =================================================================== --- lib/scudo/standalone/tests/CMakeLists.txt +++ lib/scudo/standalone/tests/CMakeLists.txt @@ -34,11 +34,14 @@ if(COMPILER_RT_HAS_SCUDO_STANDALONE) foreach(arch ${SCUDO_TEST_ARCH}) set(ScudoUnitTestsObjects) + add_library("RTScudoStandalone.test.${arch}" + $) generate_compiler_rt_tests(ScudoUnitTestsObjects ScudoUnitTests "${testname}-${arch}-Test" ${arch} - SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE} + SOURCES ${TEST_SOURCES} ${COMPILER_RT_GTEST_SOURCE} COMPILE_DEPS ${TEST_HEADERS} DEPS gtest scudo_standalone + RUNTIME RTScudoStandalone.test.${arch} CFLAGS ${SCUDO_UNITTEST_CFLAGS} LINK_FLAGS ${LINK_FLAGS}) endforeach() @@ -48,6 +51,8 @@ set(SCUDO_UNIT_TEST_SOURCES atomic_test.cc list_test.cc + map_test.cc + mutex_test.cc scudo_unit_test_main.cc) add_scudo_unittest(ScudoUnitTest Index: lib/scudo/standalone/tests/atomic_test.cc =================================================================== --- lib/scudo/standalone/tests/atomic_test.cc +++ lib/scudo/standalone/tests/atomic_test.cc @@ -45,7 +45,7 @@ EXPECT_EQ(Val.Magic1, (Type)-3); } -TEST(ScudoStandalone, AtomicStoreLoad) { +TEST(ScudoAtomicTest, AtomicStoreLoad) { checkStoreLoad(); checkStoreLoad(); checkStoreLoad(); @@ -101,7 +101,7 @@ } } -TEST(ScudoStandalone, AtomicCompareExchangeTest) { +TEST(ScudoAtomicTest, AtomicCompareExchangeTest) { checkAtomicCompareExchange(); checkAtomicCompareExchange(); checkAtomicCompareExchange(); Index: lib/scudo/standalone/tests/list_test.cc =================================================================== --- lib/scudo/standalone/tests/list_test.cc +++ lib/scudo/standalone/tests/list_test.cc @@ -58,7 +58,7 @@ EXPECT_TRUE(L->empty()); } -TEST(ScudoSandalone, IntrusiveList) { +TEST(ScudoListTest, IntrusiveList) { ListItem Items[6]; EXPECT_EQ(StaticList.size(), 0U); @@ -167,7 +167,7 @@ EXPECT_TRUE(L2.empty()); } -TEST(ScudoStandalone, IntrusiveListAppendEmpty) { +TEST(ScudoListTest, IntrusiveListAppendEmpty) { ListItem I; List L; L.clear(); Index: lib/scudo/standalone/tests/map_test.cc =================================================================== --- lib/scudo/standalone/tests/map_test.cc +++ lib/scudo/standalone/tests/map_test.cc @@ -0,0 +1,65 @@ +//===-- map_test.cc----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "common.h" + +#include "gtest/gtest.h" + +#include + +const char *MappingName = "scudo:test"; + +TEST(ScudoMapTest, MapNoAccessUnmap) { + const scudo::uptr Size = 4 * scudo::getPageSizeCached(); + scudo::u64 PlatformData = 0; + void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData); + EXPECT_NE(P, nullptr); + EXPECT_DEATH(memset(P, 0xaa, Size), ""); + scudo::unmap(P, Size, UNMAP_ALL, &PlatformData); +} + +TEST(ScudoMapTest, MapUnmap) { + const scudo::uptr Size = 4 * scudo::getPageSizeCached(); + scudo::u64 PlatformData = 0; + void *P = scudo::map(nullptr, Size, MappingName, 0, &PlatformData); + EXPECT_NE(P, nullptr); + memset(P, 0xaa, Size); + scudo::unmap(P, Size, 0, &PlatformData); + EXPECT_DEATH(memset(P, 0xbb, Size), ""); +} + +TEST(ScudoMapTest, MapWithGuardUnmap) { + const scudo::uptr PageSize = scudo::getPageSizeCached(); + const scudo::uptr Size = 4 * PageSize; + scudo::u64 PlatformData = 0; + void *P = scudo::map(nullptr, Size + 2 * PageSize, MappingName, MAP_NOACCESS, + &PlatformData); + EXPECT_NE(P, nullptr); + void *Q = + reinterpret_cast(reinterpret_cast(P) + PageSize); + EXPECT_EQ(scudo::map(Q, Size, MappingName, 0, &PlatformData), Q); + memset(Q, 0xaa, Size); + EXPECT_DEATH(memset(Q, 0xaa, Size + 1), ""); + scudo::unmap(P, Size + 2 * PageSize, UNMAP_ALL, &PlatformData); +} + +TEST(ScudoMapTest, MapGrowUnmap) { + const scudo::uptr PageSize = scudo::getPageSizeCached(); + const scudo::uptr Size = 4 * PageSize; + scudo::u64 PlatformData = 0; + void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData); + EXPECT_NE(P, nullptr); + void *Q = + reinterpret_cast(reinterpret_cast(P) + PageSize); + EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &PlatformData), Q); + memset(Q, 0xaa, PageSize); + Q = reinterpret_cast(reinterpret_cast(Q) + PageSize); + EXPECT_EQ(scudo::map(Q, PageSize, MappingName, 0, &PlatformData), Q); + memset(Q, 0xbb, PageSize); + scudo::unmap(P, Size, UNMAP_ALL, &PlatformData); +} Index: lib/scudo/standalone/tests/mutex_test.cc =================================================================== --- lib/scudo/standalone/tests/mutex_test.cc +++ lib/scudo/standalone/tests/mutex_test.cc @@ -0,0 +1,121 @@ +//===-- mutex_test.cc--------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "mutex.h" + +#include "gtest/gtest.h" + +#include + +template class TestData { +public: + explicit TestData(MutexType *M) : Mutex(M) { + for (scudo::u32 I = 0; I < Size; I++) + Data[I] = 0; + } + + void write() { + Lock L(Mutex); + T V0 = Data[0]; + for (scudo::u32 I = 0; I < Size; I++) { + EXPECT_EQ(Data[I], V0); + Data[I]++; + } + } + + void tryWrite() { + if (!Mutex->tryLock()) + return; + T V0 = Data[0]; + for (scudo::u32 I = 0; I < Size; I++) { + EXPECT_EQ(Data[I], V0); + Data[I]++; + } + Mutex->unlock(); + } + + void backoff() { + volatile T LocalData[Size] = {}; + for (scudo::u32 I = 0; I < Size; I++) { + LocalData[I]++; + EXPECT_EQ(LocalData[I], 1U); + } + } + +private: + typedef scudo::GenericScopedLock Lock; + static const scudo::u32 Size = 64U; + typedef scudo::u64 T; + MutexType *Mutex; + ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size]; +}; + +const scudo::u32 NumberOfThreads = 8; +#if SCUDO_DEBUG +const scudo::u32 NumberOfIterations = 4 * 1024; +#else +const scudo::u32 NumberOfIterations = 16 * 1024; +#endif + +template static void *lockThread(void *Param) { + TestData *Data = reinterpret_cast *>(Param); + for (scudo::u32 I = 0; I < NumberOfIterations; I++) { + Data->write(); + Data->backoff(); + } + return 0; +} + +template static void *tryThread(void *Param) { + TestData *Data = reinterpret_cast *>(Param); + for (scudo::u32 I = 0; I < NumberOfIterations; I++) { + Data->tryWrite(); + Data->backoff(); + } + return 0; +} + +template static void checkLocked(MutexType *M) { + scudo::GenericScopedLock L(M); + M->checkLocked(); +} + +TEST(ScudoMutexTest, SpinMutex) { + scudo::SpinMutex M; + M.init(); + TestData Data(&M); + pthread_t Threads[NumberOfThreads]; + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_create(&Threads[I], 0, lockThread, &Data); + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_join(Threads[I], 0); +} + +TEST(ScudoMutexTest, SpinMutexTry) { + scudo::SpinMutex M; + M.init(); + TestData Data(&M); + pthread_t Threads[NumberOfThreads]; + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_create(&Threads[I], 0, tryThread, &Data); + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_join(Threads[I], 0); +} + +TEST(ScudoMutexTest, BlockingMutex) { + scudo::u64 MutexMemory[1024] = {}; + scudo::BlockingMutex *M = + new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED); + TestData Data(M); + pthread_t Threads[NumberOfThreads]; + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_create(&Threads[I], 0, lockThread, &Data); + for (scudo::u32 I = 0; I < NumberOfThreads; I++) + pthread_join(Threads[I], 0); + checkLocked(M); +}