diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h --- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h +++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h @@ -21,12 +21,12 @@ memory_order_acq_rel = 4, memory_order_seq_cst = 5 }; -COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED); -COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME); -COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE); -COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE); -COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL); -COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST); +static_assert(memory_order_relaxed == __ATOMIC_RELAXED, ""); +static_assert(memory_order_consume == __ATOMIC_CONSUME, ""); +static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, ""); +static_assert(memory_order_release == __ATOMIC_RELEASE, ""); +static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, ""); +static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, ""); struct atomic_u8 { typedef u8 Type; @@ -60,7 +60,7 @@ }; template -INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) { +inline typename T::Type atomic_load(const volatile T *A, memory_order MO) { DCHECK(!(reinterpret_cast(A) % sizeof(*A))); typename T::Type V; __atomic_load(&A->ValDoNotUse, &V, MO); @@ -68,29 +68,29 @@ } template -INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { +inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast(A) % sizeof(*A))); __atomic_store(&A->ValDoNotUse, &V, MO); } -INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); } +inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } template -INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, +inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast(A) % sizeof(*A))); return __atomic_fetch_add(&A->ValDoNotUse, V, MO); } template -INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, +inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast(A) % sizeof(*A))); return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); } template -INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V, +inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, memory_order MO) { DCHECK(!(reinterpret_cast(A) % sizeof(*A))); typename T::Type R; @@ -99,7 +99,7 @@ } template -INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, +inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, typename T::Type Xchg, memory_order MO) { return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO, @@ -107,7 +107,7 @@ } template -INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp, +inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp, typename T::Type Xchg, memory_order MO) { return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO, @@ -117,17 +117,17 @@ // Clutter-reducing helpers. template -INLINE typename T::Type atomic_load_relaxed(const volatile T *A) { +inline typename T::Type atomic_load_relaxed(const volatile T *A) { return atomic_load(A, memory_order_relaxed); } template -INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) { +inline void atomic_store_relaxed(volatile T *A, typename T::Type V) { atomic_store(A, V, memory_order_relaxed); } template -INLINE typename T::Type atomic_compare_exchange(volatile T *A, +inline typename T::Type atomic_compare_exchange(volatile T *A, typename T::Type Cmp, typename T::Type Xchg) { atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); diff --git a/compiler-rt/lib/scudo/standalone/checksum.h b/compiler-rt/lib/scudo/standalone/checksum.h --- a/compiler-rt/lib/scudo/standalone/checksum.h +++ b/compiler-rt/lib/scudo/standalone/checksum.h @@ -37,7 +37,7 @@ // significantly on memory accesses, as well as 1K of CRC32 table, on platforms // that do no support hardware CRC32. The checksum itself is 16-bit, which is at // odds with CRC32, but enough for our needs. -INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) { +inline u16 computeBSDChecksum(u16 Sum, uptr Data) { for (u8 I = 0; I < sizeof(Data); I++) { Sum = static_cast((Sum >> 1) | ((Sum & 1) << 15)); Sum = static_cast(Sum + (Data & 0xff)); diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h --- a/compiler-rt/lib/scudo/standalone/chunk.h +++ b/compiler-rt/lib/scudo/standalone/chunk.h @@ -20,7 +20,7 @@ extern Checksum HashAlgorithm; -INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) { +inline u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) { // If the hardware CRC32 feature is defined here, it was enabled everywhere, // as opposed to only for crc32_hw.cpp. This means that other hardware // specific instructions were likely emitted at other places, and as a result @@ -71,7 +71,7 @@ uptr Checksum : 16; }; typedef atomic_u64 AtomicPackedHeader; -COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); +static_assert(sizeof(UnpackedHeader) == sizeof(PackedHeader), ""); // Those constants are required to silence some -Werror=conversion errors when // assigning values to the related bitfield variables. @@ -86,12 +86,12 @@ return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG); } -INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { +inline AtomicPackedHeader *getAtomicHeader(void *Ptr) { return reinterpret_cast(reinterpret_cast(Ptr) - getHeaderSize()); } -INLINE +inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { return reinterpret_cast( reinterpret_cast(Ptr) - getHeaderSize()); @@ -100,7 +100,7 @@ // We do not need a cryptographically strong hash for the checksum, but a CRC // type function that can alert us in the event a header is invalid or // corrupted. Ideally slightly better than a simple xor of all fields. -static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr, +static inline u16 computeHeaderChecksum(u32 Cookie, const void *Ptr, UnpackedHeader *Header) { UnpackedHeader ZeroChecksumHeader = *Header; ZeroChecksumHeader.Checksum = 0; @@ -110,7 +110,7 @@ ARRAY_SIZE(HeaderHolder)); } -INLINE void storeHeader(u32 Cookie, void *Ptr, +inline void storeHeader(u32 Cookie, void *Ptr, UnpackedHeader *NewUnpackedHeader) { NewUnpackedHeader->Checksum = computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader); @@ -118,7 +118,7 @@ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader); } -INLINE +inline void loadHeader(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) { PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr)); @@ -128,7 +128,7 @@ reportHeaderCorruption(const_cast(Ptr)); } -INLINE void compareExchangeHeader(u32 Cookie, void *Ptr, +inline void compareExchangeHeader(u32 Cookie, void *Ptr, UnpackedHeader *NewUnpackedHeader, UnpackedHeader *OldUnpackedHeader) { NewUnpackedHeader->Checksum = @@ -141,7 +141,7 @@ reportHeaderRace(Ptr); } -INLINE +inline bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) { PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr)); *NewUnpackedHeader = bit_cast(NewPackedHeader); diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -184,7 +184,7 @@ ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize()); // Takes care of extravagantly large sizes as well as integer overflows. - COMPILER_CHECK(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment); + static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, ""); if (UNLIKELY(Size >= MaxAllowedMallocSize)) { if (Options.MayReturnNull) return nullptr; @@ -523,7 +523,7 @@ reportSanityCheckError("class ID"); } - static INLINE void *getBlockBegin(const void *Ptr, + static inline void *getBlockBegin(const void *Ptr, Chunk::UnpackedHeader *Header) { return reinterpret_cast( reinterpret_cast(Ptr) - Chunk::getHeaderSize() - @@ -531,7 +531,7 @@ } // Return the size of a chunk as requested during its allocation. - INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) { + inline uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) { const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; if (LIKELY(Header->ClassId)) return SizeOrUnusedBytes; diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h --- a/compiler-rt/lib/scudo/standalone/common.h +++ b/compiler-rt/lib/scudo/standalone/common.h @@ -19,22 +19,22 @@ namespace scudo { -template INLINE Dest bit_cast(const Source &S) { - COMPILER_CHECK(sizeof(Dest) == sizeof(Source)); +template inline Dest bit_cast(const Source &S) { + static_assert(sizeof(Dest) == sizeof(Source), ""); Dest D; memcpy(&D, &S, sizeof(D)); return D; } -INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) { +inline constexpr uptr roundUpTo(uptr X, uptr Boundary) { return (X + Boundary - 1) & ~(Boundary - 1); } -INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) { +inline constexpr uptr roundDownTo(uptr X, uptr Boundary) { return X & ~(Boundary - 1); } -INLINE constexpr bool isAligned(uptr X, uptr Alignment) { +inline constexpr bool isAligned(uptr X, uptr Alignment) { return (X & (Alignment - 1)) == 0; } @@ -48,14 +48,14 @@ B = Tmp; } -INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; } +inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; } -INLINE uptr getMostSignificantSetBitIndex(uptr X) { +inline uptr getMostSignificantSetBitIndex(uptr X) { DCHECK_NE(X, 0U); return SCUDO_WORDSIZE - 1U - static_cast(__builtin_clzl(X)); } -INLINE uptr roundUpToPowerOfTwo(uptr Size) { +inline uptr roundUpToPowerOfTwo(uptr Size) { DCHECK(Size); if (isPowerOfTwo(Size)) return Size; @@ -65,17 +65,17 @@ return 1UL << (Up + 1); } -INLINE uptr getLeastSignificantSetBitIndex(uptr X) { +inline uptr getLeastSignificantSetBitIndex(uptr X) { DCHECK_NE(X, 0U); return static_cast(__builtin_ctzl(X)); } -INLINE uptr getLog2(uptr X) { +inline uptr getLog2(uptr X) { DCHECK(isPowerOfTwo(X)); return getLeastSignificantSetBitIndex(X); } -INLINE u32 getRandomU32(u32 *State) { +inline u32 getRandomU32(u32 *State) { // ANSI C linear congruential PRNG (16-bit output). // return (*State = *State * 1103515245 + 12345) >> 16; // XorShift (32-bit output). @@ -85,11 +85,11 @@ return *State; } -INLINE u32 getRandomModN(u32 *State, u32 N) { +inline u32 getRandomModN(u32 *State, u32 N) { return getRandomU32(State) % N; // [0, N) } -template INLINE void shuffle(T *A, u32 N, u32 *RandState) { +template inline void shuffle(T *A, u32 N, u32 *RandState) { if (N <= 1) return; u32 State = *RandState; @@ -100,7 +100,7 @@ // Hardware specific inlinable functions. -INLINE void yieldProcessor(u8 Count) { +inline void yieldProcessor(u8 Count) { #if defined(__i386__) || defined(__x86_64__) __asm__ __volatile__("" ::: "memory"); for (u8 I = 0; I < Count; I++) @@ -117,7 +117,7 @@ extern uptr PageSizeCached; uptr getPageSizeSlow(); -INLINE uptr getPageSizeCached() { +inline uptr getPageSizeCached() { // Bionic uses a hardcoded value. if (SCUDO_ANDROID) return 4096U; diff --git a/compiler-rt/lib/scudo/standalone/flags_parser.cpp b/compiler-rt/lib/scudo/standalone/flags_parser.cpp --- a/compiler-rt/lib/scudo/standalone/flags_parser.cpp +++ b/compiler-rt/lib/scudo/standalone/flags_parser.cpp @@ -108,7 +108,7 @@ Pos = OldPos; } -INLINE bool parseBool(const char *Value, bool *b) { +inline bool parseBool(const char *Value, bool *b) { if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 || strncmp(Value, "false", 5) == 0) { *b = false; diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp --- a/compiler-rt/lib/scudo/standalone/fuchsia.cpp +++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp @@ -29,7 +29,7 @@ // We zero-initialize the Extra parameter of map(), make sure this is consistent // with ZX_HANDLE_INVALID. -COMPILER_CHECK(ZX_HANDLE_INVALID == 0); +static_assert(ZX_HANDLE_INVALID == 0, ""); static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) { // Only scenario so far. @@ -171,7 +171,7 @@ u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); } bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { - COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN); + static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, ""); if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength)) return false; _zx_cprng_draw(Buffer, Length); diff --git a/compiler-rt/lib/scudo/standalone/internal_defs.h b/compiler-rt/lib/scudo/standalone/internal_defs.h --- a/compiler-rt/lib/scudo/standalone/internal_defs.h +++ b/compiler-rt/lib/scudo/standalone/internal_defs.h @@ -30,7 +30,6 @@ #define INTERFACE __attribute__((visibility("default"))) #define WEAK __attribute__((weak)) -#define INLINE inline #define ALWAYS_INLINE inline __attribute__((always_inline)) #define ALIAS(X) __attribute__((alias(X))) // Please only use the ALIGNED macro before the type. Using ALIGNED after the @@ -126,8 +125,6 @@ die(); \ } while (0) -#define COMPILER_CHECK(Pred) static_assert(Pred, "") - } // namespace scudo #endif // SCUDO_INTERNAL_DEFS_H_ diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -42,7 +42,7 @@ public: typedef SizeClassMapT SizeClassMap; // Regions should be large enough to hold the largest Block. - COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize); + static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, ""); typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocatorLocalCache CacheT; typedef typename CacheT::TransferBatch TransferBatch; @@ -204,7 +204,7 @@ uptr AllocatedUser; ReleaseToOsInfo ReleaseInfo; }; - COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0); + static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); uptr computeRegionId(uptr Mem) { const uptr Id = Mem >> RegionSizeLog; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -215,7 +215,7 @@ MapPlatformData Data; ReleaseToOsInfo ReleaseInfo; }; - COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0); + static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); uptr PrimaryBase; RegionInfo *RegionInfoArray; diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h --- a/compiler-rt/lib/scudo/standalone/quarantine.h +++ b/compiler-rt/lib/scudo/standalone/quarantine.h @@ -59,7 +59,7 @@ void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); } }; -COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb. +static_assert(sizeof(QuarantineBatch) <= (1U << 13), ""); // 8Kb. // Per-thread cache of memory blocks. template class QuarantineCache { diff --git a/compiler-rt/lib/scudo/standalone/report.cpp b/compiler-rt/lib/scudo/standalone/report.cpp --- a/compiler-rt/lib/scudo/standalone/report.cpp +++ b/compiler-rt/lib/scudo/standalone/report.cpp @@ -34,7 +34,7 @@ ScopedString Message; }; -INLINE void NORETURN trap() { __builtin_trap(); } +inline void NORETURN trap() { __builtin_trap(); } // This could potentially be called recursively if a CHECK fails in the reports. void NORETURN reportCheckFailed(const char *File, int Line, diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -52,7 +52,7 @@ public: // Ensure the freelist is disabled on Fuchsia, since it doesn't support // releasing Secondary blocks yet. - COMPILER_CHECK(!SCUDO_FUCHSIA || MaxFreeListSize == 0U); + static_assert(!SCUDO_FUCHSIA || MaxFreeListSize == 0U, ""); void initLinkerInitialized(GlobalStats *S) { Stats.initLinkerInitialized(); diff --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h --- a/compiler-rt/lib/scudo/standalone/size_class_map.h +++ b/compiler-rt/lib/scudo/standalone/size_class_map.h @@ -49,7 +49,7 @@ static const uptr MaxSize = 1UL << MaxSizeLog; static const uptr NumClasses = MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1; - COMPILER_CHECK(NumClasses <= 256); + static_assert(NumClasses <= 256, ""); static const uptr LargestClassId = NumClasses - 1; static const uptr BatchClassId = 0; diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h --- a/compiler-rt/lib/scudo/standalone/tsd.h +++ b/compiler-rt/lib/scudo/standalone/tsd.h @@ -38,7 +38,7 @@ void commitBack(Allocator *Instance) { Instance->commitBack(this); } - INLINE bool tryLock() { + inline bool tryLock() { if (Mutex.tryLock()) { atomic_store_relaxed(&Precedence, 0); return true; @@ -49,12 +49,12 @@ static_cast(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0))); return false; } - INLINE void lock() { + inline void lock() { atomic_store_relaxed(&Precedence, 0); Mutex.lock(); } - INLINE void unlock() { Mutex.unlock(); } - INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } + inline void unlock() { Mutex.unlock(); } + inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } private: HybridMutex Mutex; diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h --- a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h +++ b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h @@ -20,7 +20,7 @@ namespace scudo { // A common errno setting logic shared by almost all Scudo C wrappers. -INLINE void *setErrnoOnNull(void *Ptr) { +inline void *setErrnoOnNull(void *Ptr) { if (UNLIKELY(!Ptr)) errno = ENOMEM; return Ptr; @@ -30,14 +30,14 @@ // Checks aligned_alloc() parameters, verifies that the alignment is a power of // two and that the size is a multiple of alignment. -INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) { +inline bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) { return Alignment == 0 || !isPowerOfTwo(Alignment) || !isAligned(Size, Alignment); } // Checks posix_memalign() parameters, verifies that alignment is a power of two // and a multiple of sizeof(void *). -INLINE bool checkPosixMemalignAlignment(uptr Alignment) { +inline bool checkPosixMemalignAlignment(uptr Alignment) { return Alignment == 0 || !isPowerOfTwo(Alignment) || !isAligned(Alignment, sizeof(void *)); } @@ -45,7 +45,7 @@ // Returns true if calloc(Size, N) overflows on Size*N calculation. Use a // builtin supported by recent clang & GCC if it exists, otherwise fallback to a // costly division. -INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) { +inline bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) { #if __has_builtin(__builtin_umull_overflow) return __builtin_umull_overflow(Size, N, Product); #else @@ -58,7 +58,7 @@ // Returns true if the size passed to pvalloc overflows when rounded to the next // multiple of PageSize. -INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) { +inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) { return roundUpTo(Size, PageSize) < Size; }