diff --git a/compiler-rt/lib/asan/asan_malloc_linux.cpp b/compiler-rt/lib/asan/asan_malloc_linux.cpp --- a/compiler-rt/lib/asan/asan_malloc_linux.cpp +++ b/compiler-rt/lib/asan/asan_malloc_linux.cpp @@ -34,7 +34,7 @@ static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024; static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; -static INLINE bool IsInDlsymAllocPool(const void *ptr) { +static inline bool IsInDlsymAllocPool(const void *ptr) { uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]); } @@ -95,12 +95,12 @@ } #endif -static INLINE bool MaybeInDlsym() { +static inline bool MaybeInDlsym() { // Fuchsia doesn't use dlsym-based interceptors. return !SANITIZER_FUCHSIA && asan_init_is_running; } -static INLINE bool UseLocalPool() { +static inline bool UseLocalPool() { return EarlyMalloc() || MaybeInDlsym(); } @@ -304,4 +304,4 @@ #endif // SANITIZER_ANDROID #endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || - // SANITIZER_NETBSD || SANITIZER_SOLARIS \ No newline at end of file + // SANITIZER_NETBSD || SANITIZER_SOLARIS diff --git a/compiler-rt/lib/asan/asan_malloc_local.h b/compiler-rt/lib/asan/asan_malloc_local.h --- a/compiler-rt/lib/asan/asan_malloc_local.h +++ b/compiler-rt/lib/asan/asan_malloc_local.h @@ -17,7 +17,7 @@ #include "sanitizer_common/sanitizer_platform.h" #include "asan_internal.h" -static INLINE bool EarlyMalloc() { +static inline bool EarlyMalloc() { return SANITIZER_RTEMS && (!__asan::asan_inited || __asan::asan_init_is_running); } diff --git a/compiler-rt/lib/asan/asan_report.cpp b/compiler-rt/lib/asan/asan_report.cpp --- a/compiler-rt/lib/asan/asan_report.cpp +++ b/compiler-rt/lib/asan/asan_report.cpp @@ -411,7 +411,7 @@ return false; } -static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { +static inline void CheckForInvalidPointerPair(void *p1, void *p2) { switch (flags()->detect_invalid_pointer_pairs) { case 0: return; diff --git a/compiler-rt/lib/msan/tests/msan_test.cpp b/compiler-rt/lib/msan/tests/msan_test.cpp --- a/compiler-rt/lib/msan/tests/msan_test.cpp +++ b/compiler-rt/lib/msan/tests/msan_test.cpp @@ -139,7 +139,7 @@ typedef signed int S4; typedef signed long long S8; #define NOINLINE __attribute__((noinline)) -#define INLINE __attribute__((always_inline)) +#define ALWAYS_INLINE __attribute__((always_inline)) static bool TrackingOrigins() { S8 x; @@ -4312,7 +4312,7 @@ } // namespace template -INLINE +ALWAYS_INLINE void BinaryOpOriginTest(BinaryOp op) { U4 ox = rand(); //NOLINT U4 oy = rand(); //NOLINT @@ -4345,12 +4345,12 @@ EXPECT_ORIGIN(ox, __msan_get_origin(z)); } -template INLINE T XOR(const T &a, const T&b) { return a ^ b; } -template INLINE T ADD(const T &a, const T&b) { return a + b; } -template INLINE T SUB(const T &a, const T&b) { return a - b; } -template INLINE T MUL(const T &a, const T&b) { return a * b; } -template INLINE T AND(const T &a, const T&b) { return a & b; } -template INLINE T OR (const T &a, const T&b) { return a | b; } +template ALWAYS_INLINE T XOR(const T &a, const T&b) { return a ^ b; } +template ALWAYS_INLINE T ADD(const T &a, const T&b) { return a + b; } +template ALWAYS_INLINE T SUB(const T &a, const T&b) { return a - b; } +template ALWAYS_INLINE T MUL(const T &a, const T&b) { return a * b; } +template ALWAYS_INLINE T AND(const T &a, const T&b) { return a & b; } +template ALWAYS_INLINE T OR (const T &a, const T&b) { return a | b; } TEST(MemorySanitizerOrigins, BinaryOp) { if (!TrackingOrigins()) return; @@ -4704,7 +4704,7 @@ __builtin_ia32_bzhi_di(0xABCDABCDABCDABCD, Poisoned(1, 0xFFFFFFFF00000000ULL))); } -inline U4 bextr_imm(U4 start, U4 len) { +ALWAYS_INLINE U4 bextr_imm(U4 start, U4 len) { start &= 0xFF; len &= 0xFF; return (len << 8) | start; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -52,14 +52,14 @@ // Callback type for iterating over chunks. typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); -INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG. +inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG. return (*state = *state * 1103515245 + 12345) >> 16; } -INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n) +inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n) template -INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) { +inline void RandomShuffle(T *a, u32 n, u32 *rand_state) { if (n <= 1) return; u32 state = *rand_state; for (u32 i = n - 1; i > 0; i--) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_checks.h @@ -27,7 +27,7 @@ void SetErrnoToENOMEM(); // A common errno setting logic shared by almost all sanitizer allocator APIs. -INLINE void *SetErrnoOnNull(void *ptr) { +inline void *SetErrnoOnNull(void *ptr) { if (UNLIKELY(!ptr)) SetErrnoToENOMEM(); return ptr; @@ -41,7 +41,7 @@ // two and that the size is a multiple of alignment for POSIX implementation, // and a bit relaxed requirement for non-POSIX ones, that the size is a multiple // of alignment. -INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { +inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { #if SANITIZER_POSIX return alignment != 0 && IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0; @@ -52,13 +52,13 @@ // Checks posix_memalign() parameters, verifies that alignment is a power of two // and a multiple of sizeof(void *). -INLINE bool CheckPosixMemalignAlignment(uptr alignment) { +inline bool CheckPosixMemalignAlignment(uptr alignment) { return alignment != 0 && IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; } // Returns true if calloc(size, n) call overflows on size*n calculation. -INLINE bool CheckForCallocOverflow(uptr size, uptr n) { +inline bool CheckForCallocOverflow(uptr size, uptr n) { if (!size) return false; uptr max = (uptr)-1L; @@ -67,7 +67,7 @@ // Returns true if the size passed to pvalloc overflows when rounded to the next // multiple of page_size. -INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) { +inline bool CheckForPvallocOverflow(uptr size, uptr page_size) { return RoundUpTo(size, page_size) < size; } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h @@ -18,8 +18,8 @@ // (currently, 32 bits and internal allocator). class LargeMmapAllocatorPtrArrayStatic { public: - INLINE void *Init() { return &p_[0]; } - INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); } + inline void *Init() { return &p_[0]; } + inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); } private: static const int kMaxNumChunks = 1 << 15; uptr p_[kMaxNumChunks]; @@ -31,14 +31,14 @@ // same functionality in Fuchsia case, which does not support MAP_NORESERVE. class LargeMmapAllocatorPtrArrayDynamic { public: - INLINE void *Init() { + inline void *Init() { uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr), SecondaryAllocatorName); CHECK(p); return reinterpret_cast(p); } - INLINE void EnsureSpace(uptr n) { + inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); DCHECK(n <= n_reserved_); if (UNLIKELY(n == n_reserved_)) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic.h @@ -72,12 +72,12 @@ // Clutter-reducing helpers. template -INLINE typename T::Type atomic_load_relaxed(const volatile T *a) { +inline typename T::Type atomic_load_relaxed(const volatile T *a) { return atomic_load(a, memory_order_relaxed); } template -INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) { +inline void atomic_store_relaxed(volatile T *a, typename T::Type v) { atomic_store(a, v, memory_order_relaxed); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang.h @@ -34,16 +34,16 @@ // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html // for mappings of the memory model to different processors. -INLINE void atomic_signal_fence(memory_order) { +inline void atomic_signal_fence(memory_order) { __asm__ __volatile__("" ::: "memory"); } -INLINE void atomic_thread_fence(memory_order) { +inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } template -INLINE typename T::Type atomic_fetch_add(volatile T *a, +inline typename T::Type atomic_fetch_add(volatile T *a, typename T::Type v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -51,7 +51,7 @@ } template -INLINE typename T::Type atomic_fetch_sub(volatile T *a, +inline typename T::Type atomic_fetch_sub(volatile T *a, typename T::Type v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -59,7 +59,7 @@ } template -INLINE typename T::Type atomic_exchange(volatile T *a, +inline typename T::Type atomic_exchange(volatile T *a, typename T::Type v, memory_order mo) { DCHECK(!((uptr)a % sizeof(*a))); if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst)) @@ -71,7 +71,7 @@ } template -INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, +inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { typedef typename T::Type Type; @@ -84,7 +84,7 @@ } template -INLINE bool atomic_compare_exchange_weak(volatile T *a, +inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_mips.h @@ -37,7 +37,7 @@ } __attribute__((aligned(32))) lock = {0, {0}}; template <> -INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr, +inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type val, memory_order mo) { DCHECK(mo & @@ -55,14 +55,14 @@ } template <> -INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr, +inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type val, memory_order mo) { return atomic_fetch_add(ptr, -val, mo); } template <> -INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr, +inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type *cmp, atomic_uint64_t::Type xchg, memory_order mo) { @@ -87,7 +87,7 @@ } template <> -INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr, +inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); @@ -100,7 +100,7 @@ } template <> -INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v, +inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst)); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_other.h @@ -17,12 +17,12 @@ namespace __sanitizer { -INLINE void proc_yield(int cnt) { +inline void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); } template -INLINE typename T::Type atomic_load( +inline typename T::Type atomic_load( const volatile T *a, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_consume | memory_order_acquire | memory_order_seq_cst)); @@ -60,7 +60,7 @@ } template -INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { +inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_release | memory_order_seq_cst)); DCHECK(!((uptr)a % sizeof(*a))); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_clang_x86.h @@ -16,7 +16,7 @@ namespace __sanitizer { -INLINE void proc_yield(int cnt) { +inline void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); for (int i = 0; i < cnt; i++) __asm__ __volatile__("pause"); @@ -24,7 +24,7 @@ } template -INLINE typename T::Type atomic_load( +inline typename T::Type atomic_load( const volatile T *a, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_consume | memory_order_acquire | memory_order_seq_cst)); @@ -70,7 +70,7 @@ } template -INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { +inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_release | memory_order_seq_cst)); DCHECK(!((uptr)a % sizeof(*a))); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_atomic_msvc.h @@ -54,21 +54,21 @@ namespace __sanitizer { -INLINE void atomic_signal_fence(memory_order) { +inline void atomic_signal_fence(memory_order) { _ReadWriteBarrier(); } -INLINE void atomic_thread_fence(memory_order) { +inline void atomic_thread_fence(memory_order) { _mm_mfence(); } -INLINE void proc_yield(int cnt) { +inline void proc_yield(int cnt) { for (int i = 0; i < cnt; i++) _mm_pause(); } template -INLINE typename T::Type atomic_load( +inline typename T::Type atomic_load( const volatile T *a, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_consume | memory_order_acquire | memory_order_seq_cst)); @@ -86,7 +86,7 @@ } template -INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { +inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { DCHECK(mo & (memory_order_relaxed | memory_order_release | memory_order_seq_cst)); DCHECK(!((uptr)a % sizeof(*a))); @@ -102,7 +102,7 @@ atomic_thread_fence(memory_order_seq_cst); } -INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a, +inline u32 atomic_fetch_add(volatile atomic_uint32_t *a, u32 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -110,7 +110,7 @@ (long)v); } -INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a, +inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a, uptr v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -123,7 +123,7 @@ #endif } -INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a, +inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a, u32 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -131,7 +131,7 @@ -(long)v); } -INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a, +inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a, uptr v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); @@ -144,28 +144,28 @@ #endif } -INLINE u8 atomic_exchange(volatile atomic_uint8_t *a, +inline u8 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v); } -INLINE u16 atomic_exchange(volatile atomic_uint16_t *a, +inline u16 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v); } -INLINE u32 atomic_exchange(volatile atomic_uint32_t *a, +inline u32 atomic_exchange(volatile atomic_uint32_t *a, u32 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v); } -INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, +inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, memory_order mo) { @@ -191,7 +191,7 @@ return false; } -INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, +inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, memory_order mo) { @@ -204,7 +204,7 @@ return false; } -INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a, +inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a, u16 *cmp, u16 xchg, memory_order mo) { @@ -217,7 +217,7 @@ return false; } -INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a, +inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a, u32 *cmp, u32 xchg, memory_order mo) { @@ -230,7 +230,7 @@ return false; } -INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a, +inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a, u64 *cmp, u64 xchg, memory_order mo) { @@ -244,7 +244,7 @@ } template -INLINE bool atomic_compare_exchange_weak(volatile T *a, +inline bool atomic_compare_exchange_weak(volatile T *a, typename T::Type *cmp, typename T::Type xchg, memory_order mo) { diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -53,25 +53,25 @@ extern const char *SanitizerToolName; // Can be changed by the tool. extern atomic_uint32_t current_verbosity; -INLINE void SetVerbosity(int verbosity) { +inline void SetVerbosity(int verbosity) { atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); } -INLINE int Verbosity() { +inline int Verbosity() { return atomic_load(¤t_verbosity, memory_order_relaxed); } #if SANITIZER_ANDROID -INLINE uptr GetPageSize() { +inline uptr GetPageSize() { // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. return 4096; } -INLINE uptr GetPageSizeCached() { +inline uptr GetPageSizeCached() { return 4096; } #else uptr GetPageSize(); extern uptr PageSizeCached; -INLINE uptr GetPageSizeCached() { +inline uptr GetPageSizeCached() { if (!PageSizeCached) PageSizeCached = GetPageSize(); return PageSizeCached; @@ -91,7 +91,7 @@ // Memory management void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); -INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) { +inline void *MmapOrDieQuietly(uptr size, const char *mem_type) { return MmapOrDie(size, mem_type, /*raw_report*/ true); } void UnmapOrDie(void *addr, uptr size); @@ -374,7 +374,7 @@ } #endif -INLINE uptr MostSignificantSetBitIndex(uptr x) { +inline uptr MostSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) @@ -391,7 +391,7 @@ return up; } -INLINE uptr LeastSignificantSetBitIndex(uptr x) { +inline uptr LeastSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) @@ -408,11 +408,11 @@ return up; } -INLINE bool IsPowerOfTwo(uptr x) { +inline bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } -INLINE uptr RoundUpToPowerOfTwo(uptr size) { +inline uptr RoundUpToPowerOfTwo(uptr size) { CHECK(size); if (IsPowerOfTwo(size)) return size; @@ -422,20 +422,20 @@ return 1ULL << (up + 1); } -INLINE uptr RoundUpTo(uptr size, uptr boundary) { +inline uptr RoundUpTo(uptr size, uptr boundary) { RAW_CHECK(IsPowerOfTwo(boundary)); return (size + boundary - 1) & ~(boundary - 1); } -INLINE uptr RoundDownTo(uptr x, uptr boundary) { +inline uptr RoundDownTo(uptr x, uptr boundary) { return x & ~(boundary - 1); } -INLINE bool IsAligned(uptr a, uptr alignment) { +inline bool IsAligned(uptr a, uptr alignment) { return (a & (alignment - 1)) == 0; } -INLINE uptr Log2(uptr x) { +inline uptr Log2(uptr x) { CHECK(IsPowerOfTwo(x)); return LeastSignificantSetBitIndex(x); } @@ -451,14 +451,14 @@ } // Char handling -INLINE bool IsSpace(int c) { +inline bool IsSpace(int c) { return (c == ' ') || (c == '\n') || (c == '\t') || (c == '\f') || (c == '\r') || (c == '\v'); } -INLINE bool IsDigit(int c) { +inline bool IsDigit(int c) { return (c >= '0') && (c <= '9'); } -INLINE int ToLower(int c) { +inline int ToLower(int c) { return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; } @@ -840,15 +840,15 @@ #if SANITIZER_MAC || SANITIZER_WIN_TRACE void LogFullErrorReport(const char *buffer); #else -INLINE void LogFullErrorReport(const char *buffer) {} +inline void LogFullErrorReport(const char *buffer) {} #endif #if SANITIZER_LINUX || SANITIZER_MAC void WriteOneLineToSyslog(const char *s); void LogMessageOnPrintf(const char *str); #else -INLINE void WriteOneLineToSyslog(const char *s) {} -INLINE void LogMessageOnPrintf(const char *str) {} +inline void WriteOneLineToSyslog(const char *s) {} +inline void LogMessageOnPrintf(const char *str) {} #endif #if SANITIZER_LINUX || SANITIZER_WIN_TRACE @@ -856,21 +856,21 @@ void AndroidLogInit(); void SetAbortMessage(const char *); #else -INLINE void AndroidLogInit() {} +inline void AndroidLogInit() {} // FIXME: MacOS implementation could use CRSetCrashLogMessage. -INLINE void SetAbortMessage(const char *) {} +inline void SetAbortMessage(const char *) {} #endif #if SANITIZER_ANDROID void SanitizerInitializeUnwinder(); AndroidApiLevel AndroidGetApiLevel(); #else -INLINE void AndroidLogWrite(const char *buffer_unused) {} -INLINE void SanitizerInitializeUnwinder() {} -INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } +inline void AndroidLogWrite(const char *buffer_unused) {} +inline void SanitizerInitializeUnwinder() {} +inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } #endif -INLINE uptr GetPthreadDestructorIterations() { +inline uptr GetPthreadDestructorIterations() { #if SANITIZER_ANDROID return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; #elif SANITIZER_POSIX @@ -976,7 +976,7 @@ #if SANITIZER_LINUX && SANITIZER_S390_64 void AvoidCVE_2016_2143(); #else -INLINE void AvoidCVE_2016_2143() {} +inline void AvoidCVE_2016_2143() {} #endif struct StackDepotStats { @@ -997,7 +997,7 @@ // Returns the number of logical processors on the system. u32 GetNumberOfCPUs(); extern u32 NumberOfCPUsCached; -INLINE u32 GetNumberOfCPUsCached() { +inline u32 GetNumberOfCPUsCached() { if (!NumberOfCPUsCached) NumberOfCPUsCached = GetNumberOfCPUs(); return NumberOfCPUsCached; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h @@ -196,9 +196,6 @@ // This header should NOT include any other headers to avoid portability issues. // Common defs. -#ifndef INLINE -#define INLINE inline -#endif #define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE #define SANITIZER_WEAK_DEFAULT_IMPL \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h @@ -109,7 +109,7 @@ // Releases memory pages entirely within the [beg, end] address range. // The pages no longer count toward RSS; reads are guaranteed to return 0. // Requires (but does not verify!) that pages are MAP_PRIVATE. -INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) { +inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) { // man madvise on Linux promises zero-fill for anonymous private pages. // Testing shows the same behaviour for private (but not anonymous) mappings // of shm_open() files, as long as the underlying file is untouched. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -772,7 +772,7 @@ // initialized after the vDSO function pointers, so if it exists, is not null // and is not empty, we can use clock_gettime. extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname; -INLINE bool CanUseVDSO() { +inline bool CanUseVDSO() { // Bionic is safe, it checks for the vDSO function pointers to be initialized. if (SANITIZER_ANDROID) return true; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.h @@ -75,7 +75,7 @@ namespace __sanitizer { static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED); -INLINE void CRAppendCrashLogMessage(const char *msg) { +inline void CRAppendCrashLogMessage(const char *msg) { BlockingMutexLock l(&crashreporter_info_mutex); internal_strlcat(__crashreporter_info_buff__, msg, sizeof(__crashreporter_info_buff__)); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cpp @@ -81,8 +81,6 @@ #include #undef _KERNEL -#undef INLINE // to avoid clashes with sanitizers' definitions - #undef IOC_DIRMASK // Include these after system headers to avoid name clashes and ambiguities. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_report.cpp @@ -47,14 +47,14 @@ return SupportsColoredOutput(fd); } -static INLINE bool ReportSupportsColors() { +static inline bool ReportSupportsColors() { return report_file.SupportsColors(); } #else // SANITIZER_FUCHSIA // Fuchsia's logs always go through post-processing that handles colorization. -static INLINE bool ReportSupportsColors() { return true; } +static inline bool ReportSupportsColors() { return true; } #endif // !SANITIZER_FUCHSIA diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -44,7 +44,7 @@ // at compilation or at runtime. static atomic_uint8_t HashAlgorithm = { CRC32Software }; -INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { +ATTR_inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) { // If the hardware CRC32 feature is defined here, it was enabled everywhere, // as opposed to only for scudo_crc32.cpp. This means that other hardware // specific instructions were likely emitted at other places, and as a @@ -71,31 +71,31 @@ static BackendT &getBackend(); namespace Chunk { - static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { + static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) { return reinterpret_cast(reinterpret_cast(Ptr) - getHeaderSize()); } - static INLINE + static inline const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { return reinterpret_cast( reinterpret_cast(Ptr) - getHeaderSize()); } - static INLINE bool isAligned(const void *Ptr) { + static inline bool isAligned(const void *Ptr) { return IsAligned(reinterpret_cast(Ptr), MinAlignment); } // We can't use the offset member of the chunk itself, as we would double // fetch it without any warranty that it wouldn't have been tampered. To // prevent this, we work with a local copy of the header. - static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { + static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { return reinterpret_cast(reinterpret_cast(Ptr) - getHeaderSize() - (Header->Offset << MinAlignmentLog)); } // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. - static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { + static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { const uptr ClassId = Header->ClassId; if (ClassId) return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() - @@ -105,7 +105,7 @@ } // Returns the size the user requested when allocating the chunk. - static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) { + static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) { const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; if (Header->ClassId) return SizeOrUnusedBytes; @@ -114,7 +114,7 @@ } // Compute the checksum of the chunk pointer and its header. - static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) { + static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) { UnpackedHeader ZeroChecksumHeader = *Header; ZeroChecksumHeader.Checksum = 0; uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)]; @@ -126,7 +126,7 @@ // Checks the validity of a chunk by verifying its checksum. It doesn't // incur termination in the event of an invalid chunk. - static INLINE bool isValid(const void *Ptr) { + static inline bool isValid(const void *Ptr) { PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr)); UnpackedHeader NewUnpackedHeader = @@ -140,7 +140,7 @@ COMPILER_CHECK(ChunkAvailable == 0); // Loads and unpacks the header, verifying the checksum in the process. - static INLINE + static inline void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) { PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr)); @@ -151,7 +151,7 @@ } // Packs and stores the header, computing the checksum in the process. - static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) { + static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); PackedHeader NewPackedHeader = bit_cast(*NewUnpackedHeader); atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader); @@ -160,7 +160,7 @@ // Packs and stores the header, computing the checksum in the process. We // compare the current header with the expected provided one to ensure that // we are not being raced by a corruption occurring in another thread. - static INLINE void compareExchangeHeader(void *Ptr, + static inline void compareExchangeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader, UnpackedHeader *OldUnpackedHeader) { NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader); diff --git a/compiler-rt/lib/scudo/scudo_crc32.h b/compiler-rt/lib/scudo/scudo_crc32.h --- a/compiler-rt/lib/scudo/scudo_crc32.h +++ b/compiler-rt/lib/scudo/scudo_crc32.h @@ -85,7 +85,7 @@ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d }; -INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) { +inline u32 computeSoftwareCRC32(u32 Crc, uptr Data) { for (uptr i = 0; i < sizeof(Data); i++) { Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); Data >>= 8; diff --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h --- a/compiler-rt/lib/scudo/scudo_tsd.h +++ b/compiler-rt/lib/scudo/scudo_tsd.h @@ -29,7 +29,7 @@ void init(); void commitBack(); - INLINE bool tryLock() { + inline bool tryLock() { if (Mutex.TryLock()) { atomic_store_relaxed(&Precedence, 0); return true; @@ -40,14 +40,14 @@ return false; } - INLINE void lock() { + inline void lock() { atomic_store_relaxed(&Precedence, 0); Mutex.Lock(); } - INLINE void unlock() { Mutex.Unlock(); } + inline void unlock() { Mutex.Unlock(); } - INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } + inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); } private: StaticSpinMutex Mutex; diff --git a/compiler-rt/lib/scudo/scudo_utils.h b/compiler-rt/lib/scudo/scudo_utils.h --- a/compiler-rt/lib/scudo/scudo_utils.h +++ b/compiler-rt/lib/scudo/scudo_utils.h @@ -20,7 +20,7 @@ namespace __scudo { template -INLINE Dest bit_cast(const Source& source) { +inline Dest bit_cast(const Source& source) { static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!"); Dest dest; memcpy(&dest, &source, sizeof(dest)); diff --git a/compiler-rt/lib/scudo/scudo_utils.cpp b/compiler-rt/lib/scudo/scudo_utils.cpp --- a/compiler-rt/lib/scudo/scudo_utils.cpp +++ b/compiler-rt/lib/scudo/scudo_utils.cpp @@ -121,7 +121,7 @@ // initialized after the other globals, so we can check its value to know if // calling getauxval is safe. extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname; -INLINE bool areBionicGlobalsInitialized() { +inline bool areBionicGlobalsInitialized() { return !SANITIZER_ANDROID || (&__progname && __progname); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors.h +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors.h @@ -22,7 +22,7 @@ LibIgnore *libignore(); #if !SANITIZER_GO -INLINE bool in_symbolizer() { +inline bool in_symbolizer() { cur_thread_init(); return UNLIKELY(cur_thread()->in_symbolizer); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -458,22 +458,22 @@ ThreadState *cur_thread(); void set_cur_thread(ThreadState *thr); void cur_thread_finalize(); -INLINE void cur_thread_init() { } +inline void cur_thread_init() { } #else __attribute__((tls_model("initial-exec"))) extern THREADLOCAL char cur_thread_placeholder[]; -INLINE ThreadState *cur_thread() { +inline ThreadState *cur_thread() { return reinterpret_cast(cur_thread_placeholder)->current; } -INLINE void cur_thread_init() { +inline void cur_thread_init() { ThreadState *thr = reinterpret_cast(cur_thread_placeholder); if (UNLIKELY(!thr->current)) thr->current = thr; } -INLINE void set_cur_thread(ThreadState *thr) { +inline void set_cur_thread(ThreadState *thr) { reinterpret_cast(cur_thread_placeholder)->current = thr; } -INLINE void cur_thread_finalize() { } +inline void cur_thread_finalize() { } #endif // SANITIZER_MAC || SANITIZER_ANDROID #endif // SANITIZER_GO