Index: cmake/config-ix.cmake =================================================================== --- cmake/config-ix.cmake +++ cmake/config-ix.cmake @@ -179,7 +179,7 @@ set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64} ${MIPS64}) -set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64}) +set(ALL_SCUDO_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_XRAY_SUPPORTED_ARCH ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} powerpc64le) if(APPLE) Index: lib/sanitizer_common/sanitizer_atomic_clang.h =================================================================== --- lib/sanitizer_common/sanitizer_atomic_clang.h +++ lib/sanitizer_common/sanitizer_atomic_clang.h @@ -78,7 +78,11 @@ memory_order mo) { typedef typename T::Type Type; Type cmpv = *cmp; +#if defined(__mips__) + Type prev = __mips_sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); +#else Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); +#endif if (prev == cmpv) return true; *cmp = prev; Index: lib/sanitizer_common/sanitizer_atomic_clang_other.h =================================================================== --- lib/sanitizer_common/sanitizer_atomic_clang_other.h +++ lib/sanitizer_common/sanitizer_atomic_clang_other.h @@ -17,6 +17,49 @@ namespace __sanitizer { +#if defined(__mips__) +static void __spin_lock(volatile int *lock) { + while (__sync_lock_test_and_set(lock, 1)) + while (*lock) { + } +} + +static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); } + +static struct { + int lock; + char pad[32 - sizeof(int)]; +} __attribute__((aligned(32))) lock = {0}; + +template T __mips_sync_fetch_and_add(volatile T *ptr, T val) { + T ret; + + __spin_lock(&lock.lock); + + ret = *ptr; + *ptr = ret + val; + + __spin_unlock(&lock.lock); + + return ret; +} + +template +T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) { + T ret; + + __spin_lock(&lock.lock); + + ret = *ptr; + if (ret == oldval) + *ptr = newval; + + __spin_unlock(&lock.lock); + + return ret; +} +#endif + INLINE void proc_yield(int cnt) { __asm__ __volatile__("" ::: "memory"); } @@ -53,8 +96,14 @@ // 64-bit load on 32-bit platform. // Gross, but simple and reliable. // Assume that it is not in read-only memory. +#if defined(__mips__) + v = __mips_sync_fetch_and_add( + const_cast(&a->val_dont_use), + (typename T::Type)0); +#else v = __sync_fetch_and_add( const_cast(&a->val_dont_use), 0); +#endif } return v; } @@ -84,7 +133,11 @@ typename T::Type cmp = a->val_dont_use; typename T::Type cur; for (;;) { +#if defined(__mips__) + cur = __mips_sync_val_compare_and_swap(&a->val_dont_use, cmp, v); +#else cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v); +#endif if (cmp == v) break; cmp = cur; Index: test/scudo/random_shuffle.cpp =================================================================== --- test/scudo/random_shuffle.cpp +++ test/scudo/random_shuffle.cpp @@ -7,7 +7,7 @@ // RUN: %run %t 10000 > %T/random_shuffle_tmp_dir/out2 // RUN: not diff %T/random_shuffle_tmp_dir/out? // RUN: rm -rf %T/random_shuffle_tmp_dir -// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux +// UNSUPPORTED: i386-linux,i686-linux,arm-linux,armhf-linux,aarch64-linux,mips-linux,mipsel-linux,mips64-linux,mips64el-linux // Tests that the allocator shuffles the chunks before returning to the user.