Index: cmake/config-ix.cmake =================================================================== --- cmake/config-ix.cmake +++ cmake/config-ix.cmake @@ -272,7 +272,7 @@ set(ALL_MSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) set(ALL_PROFILE_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${PPC64} ${MIPS32} ${MIPS64}) -set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) +set(ALL_TSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64} ${PPC64}) set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${PPC64}) set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64}) Index: lib/sanitizer_common/sanitizer_linux.h =================================================================== --- lib/sanitizer_common/sanitizer_linux.h +++ lib/sanitizer_common/sanitizer_linux.h @@ -44,7 +44,8 @@ // internal_sigaction instead. int internal_sigaction_norestorer(int signum, const void *act, void *oldact); void internal_sigdelset(__sanitizer_sigset_t *set, int signum); -#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) +#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ + || defined(__powerpc64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif Index: lib/sanitizer_common/sanitizer_linux.cc =================================================================== --- lib/sanitizer_common/sanitizer_linux.cc +++ lib/sanitizer_common/sanitizer_linux.cc @@ -89,7 +89,8 @@ // Are we using 32-bit or 64-bit Linux syscalls? // x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32 // but it still needs to use 64-bit syscalls. -#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64) +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \ + SANITIZER_WORDSIZE == 64) # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1 #else # define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0 @@ -983,6 +984,88 @@ : "x30", "memory"); return res; } +#elif defined(__powerpc64__) +uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; +/* Stack frame offsets. */ +#if _CALL_ELF != 2 +#define FRAME_MIN_SIZE 112 +#define FRAME_TOC_SAVE 40 +#else +#define FRAME_MIN_SIZE 32 +#define FRAME_TOC_SAVE 24 +#endif + if (!fn || !child_stack) + return -EINVAL; + CHECK_EQ(0, (uptr)child_stack % 16); + child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); + ((unsigned long long *)child_stack)[0] = (uptr)fn; + ((unsigned long long *)child_stack)[1] = (uptr)arg; + + register int (*__fn)(void *) __asm__("r3") = fn; + register void *__cstack __asm__("r4") = child_stack; + register int __flags __asm__("r5") = flags; + register void * __arg __asm__("r6") = arg; + register int * __ptidptr __asm__("r7") = parent_tidptr; + register void * __newtls __asm__("r8") = newtls; + register int * __ctidptr __asm__("r9") = child_tidptr; + + __asm__ __volatile__( + /* fn, arg, child_stack are saved acrVoss the syscall */ + "mr 28, %5\n\t" + "mr 29, %6\n\t" + "mr 27, %8\n\t" + + /* syscall + r3 == flags + r4 == child_stack + r5 == parent_tidptr + r6 == newtls + r7 == child_tidptr */ + "mr 3, %7\n\t" + "mr 5, %9\n\t" + "mr 6, %10\n\t" + "mr 7, %11\n\t" + "li 0, %3\n\t" + "sc\n\t" + + /* Test if syscall was successful */ + "cmpdi cr1, 3, 0\n\t" + "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t" + "bne- cr1, 1f\n\t" + + /* Do the function call */ + "std 2, %13(1)\n\t" +#if _CALL_ELF != 2 + "ld 0, 0(28)\n\t" + "ld 2, 8(28)\n\t" + "mtctr 0\n\t" +#else + "mr 12, 28\n\t" + "mtctr 12\n\t" +#endif + "mr 3, 27\n\t" + "bctrl\n\t" + "ld 2, %13(1)\n\t" + + /* Call _exit(r3) */ + "li 0, %4\n\t" + "sc\n\t" + + /* Return to parent */ + "1:\n\t" + "mr %0, 3\n\t" + : "=r" (res) + : "0" (-1), "i" (EINVAL), + "i" (__NR_clone), "i" (__NR_exit), + "r" (__fn), "r" (__cstack), "r" (__flags), + "r" (__arg), "r" (__ptidptr), "r" (__newtls), + "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE) + : "cr0", "cr1", "memory", "ctr", + "r0", "r29", "r27", "r28"); + return res; +} #endif // defined(__x86_64__) && SANITIZER_LINUX #if SANITIZER_ANDROID Index: lib/sanitizer_common/sanitizer_platform.h =================================================================== --- lib/sanitizer_common/sanitizer_platform.h +++ lib/sanitizer_common/sanitizer_platform.h @@ -91,6 +91,16 @@ # endif #endif +// VMA size definition for architecture that support multiple sizes. +// PPC64 has 2 VMA sizes: 44 AND 46. +#if !defined(SANITIZER_PPC64_VMA) +# define SANITIZER_PPC64_VMA 46 +#else +# if SANITIZER_PPC64_VMA != 44 && SANITIZER_PPC64_VMA != 46 +# error "invalid SANITIZER_PPC64_VMA size" +# endif +#endif + // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. @@ -109,6 +119,9 @@ // will still work but will consume more memory for TwoLevelByteMap. #if defined(__mips__) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) +#elif defined(__powerpc64__) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << \ +SANITIZER_PPC64_VMA) #else # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) #endif @@ -139,7 +152,8 @@ #if defined(__mips__) || (defined(__aarch64__) && SANITIZER_AARCH64_VMA == 39) # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10) -#elif defined(__aarch64__) && SANITIZER_AARCH64_VMA == 42 +#elif(defined(__aarch64__) && SANITIZER_AARCH64_VMA == 42) || \ + (defined(__powerpc64__) && SANITIZER_PPC64_VMA == 44) # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 11) #else # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) Index: lib/sanitizer_common/sanitizer_posix.cc =================================================================== --- lib/sanitizer_common/sanitizer_posix.cc +++ lib/sanitizer_common/sanitizer_posix.cc @@ -326,6 +326,10 @@ void CheckVMASize() { #ifdef __aarch64__ static const unsigned kBuiltVMA = SANITIZER_AARCH64_VMA; +#elif defined(__powerpc64__) + static const unsigned kBuiltVMA = SANITIZER_PPC64_VMA; +#endif +#if defined(__powerpc64__) || defined(__aarch64__) unsigned maxRuntimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); if (kBuiltVMA != maxRuntimeVMA) { Index: lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +++ lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -15,7 +15,7 @@ #include "sanitizer_platform.h" #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ - defined(__aarch64__)) + defined(__aarch64__) || defined(__powerpc64__)) #include "sanitizer_stoptheworld.h" @@ -511,5 +511,5 @@ } } // namespace __sanitizer -#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) - // || defined(__aarch64__) +#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) + // || defined(__aarch64__) || defined(__powerpc64__) Index: lib/tsan/rtl/tsan_interceptors.cc =================================================================== --- lib/tsan/rtl/tsan_interceptors.cc +++ lib/tsan/rtl/tsan_interceptors.cc @@ -79,9 +79,11 @@ }; #endif -#if defined(__x86_64__) || defined(__mips__) +#if defined(__x86_64__) || defined(__mips__) \ + || (defined(__powerpc64__) && defined(__BIG_ENDIAN__)) #define PTHREAD_ABI_BASE "GLIBC_2.3.2" -#elif defined(__aarch64__) +#elif defined(__aarch64__) || (defined(__powerpc64__) \ + && defined(__LITTLE_ENDIAN__)) #define PTHREAD_ABI_BASE "GLIBC_2.17" #endif Index: lib/tsan/rtl/tsan_platform.h =================================================================== --- lib/tsan/rtl/tsan_platform.h +++ lib/tsan/rtl/tsan_platform.h @@ -161,6 +161,79 @@ // Indicates the runtime will define the memory regions at runtime. #define TSAN_RUNTIME_VMA 1 +#elif defined(__powerpc64__) +// PPC64 supports multiple VMA which leads to multiple address transformation +// functions. To support these multiple VMAS transformations and mappings TSAN +// runtime for PPC64 uses an external memory read (vmaSize) to select which +// mapping to use. Although slower, it make a same instrumented binary run on +// multiple kernels. + +/* +C/C++ on linux/powerpc64 (44-bit VMA) +0000 0000 0100 - 0001 0000 0000: main binary +0001 0000 0000 - 0001 0000 0000: - +0001 0000 0000 - 0b00 0000 0000: shadow +0b00 0000 0000 - 0b00 0000 0000: - +0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects) +0d00 0000 0000 - 0d00 0000 0000: - +0d00 0000 0000 - 0f00 0000 0000: traces +0f00 0000 0000 - 0f00 0000 0000: - +0f00 0000 0000 - 0f50 0000 0000: heap +0f50 0000 0000 - 0f60 0000 0000: - +0f60 0000 0000 - 1000 0000 0000: modules and main thread stack +*/ +struct Mapping44 { + static const uptr kMetaShadowBeg = 0x0b0000000000ull; + static const uptr kMetaShadowEnd = 0x0d0000000000ull; + static const uptr kTraceMemBeg = 0x0d0000000000ull; + static const uptr kTraceMemEnd = 0x0f0000000000ull; + static const uptr kShadowBeg = 0x000100000000ull; + static const uptr kShadowEnd = 0x0b0000000000ull; + static const uptr kLoAppMemBeg = 0x000000000100ull; + static const uptr kLoAppMemEnd = 0x000100000000ull; + static const uptr kHeapMemBeg = 0x0f0000000000ull; + static const uptr kHeapMemEnd = 0x0f5000000000ull; + static const uptr kHiAppMemBeg = 0x0f6000000000ull; + static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits + static const uptr kAppMemMsk = 0x0f0000000000ull; + static const uptr kAppMemXor = 0x002100000000ull; + static const uptr kVdsoBeg = 0x3c0000000000000ull; +}; + +/* +C/C++ on linux/powerpc64 (46-bit VMA) +0000 0000 1000 - 0100 0000 0000: main binary +0100 0000 0000 - 0200 0000 0000: - +0100 0000 0000 - 1000 0000 0000: shadow +1000 0000 0000 - 1000 0000 0000: - +1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects) +2000 0000 0000 - 2000 0000 0000: - +2000 0000 0000 - 2200 0000 0000: traces +2200 0000 0000 - 3d00 0000 0000: - +3d00 0000 0000 - 3e00 0000 0000: heap +3e00 0000 0000 - 3e80 0000 0000: - +3e80 0000 0000 - 4000 0000 0000: modules and main thread stack +*/ +struct Mapping46 { + static const uptr kMetaShadowBeg = 0x100000000000ull; + static const uptr kMetaShadowEnd = 0x200000000000ull; + static const uptr kTraceMemBeg = 0x200000000000ull; + static const uptr kTraceMemEnd = 0x220000000000ull; + static const uptr kShadowBeg = 0x010000000000ull; + static const uptr kShadowEnd = 0x100000000000ull; + static const uptr kHeapMemBeg = 0x3d0000000000ull; + static const uptr kHeapMemEnd = 0x3e0000000000ull; + static const uptr kLoAppMemBeg = 0x000000001000ull; + static const uptr kLoAppMemEnd = 0x010000000000ull; + static const uptr kHiAppMemBeg = 0x3e8000000000ull; + static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits + static const uptr kAppMemMsk = 0x3c0000000000ull; + static const uptr kAppMemXor = 0x020000000000ull; + static const uptr kVdsoBeg = 0x7800000000000000ull; +}; + +// Indicates the runtime will define the memory regions at runtime. +#define TSAN_RUNTIME_VMA 1 #endif #elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS @@ -274,6 +347,12 @@ else return MappingImpl(); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MappingImpl(); + else + return MappingImpl(); + DCHECK(0); #else return MappingImpl(); #endif @@ -399,6 +478,12 @@ else return IsAppMemImpl(mem); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsAppMemImpl(mem); + else + return IsAppMemImpl(mem); + DCHECK(0); #else return IsAppMemImpl(mem); #endif @@ -418,6 +503,12 @@ else return IsShadowMemImpl(mem); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsShadowMemImpl(mem); + else + return IsShadowMemImpl(mem); + DCHECK(0); #else return IsShadowMemImpl(mem); #endif @@ -437,6 +528,12 @@ else return IsMetaMemImpl(mem); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return IsMetaMemImpl(mem); + else + return IsMetaMemImpl(mem); + DCHECK(0); #else return IsMetaMemImpl(mem); #endif @@ -462,6 +559,12 @@ else return MemToShadowImpl(x); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MemToShadowImpl(x); + else + return MemToShadowImpl(x); + DCHECK(0); #else return MemToShadowImpl(x); #endif @@ -489,6 +592,12 @@ else return MemToMetaImpl(x); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return MemToMetaImpl(x); + else + return MemToMetaImpl(x); + DCHECK(0); #else return MemToMetaImpl(x); #endif @@ -522,6 +631,12 @@ else return ShadowToMemImpl(s); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return ShadowToMemImpl(s); + else + return ShadowToMemImpl(s); + DCHECK(0); #else return ShadowToMemImpl(s); #endif @@ -549,6 +664,12 @@ else return GetThreadTraceImpl(tid); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return GetThreadTraceImpl(tid); + else + return GetThreadTraceImpl(tid); + DCHECK(0); #else return GetThreadTraceImpl(tid); #endif @@ -571,6 +692,12 @@ else return GetThreadTraceHeaderImpl(tid); DCHECK(0); +#elif defined(__powerpc64__) + if (vmaSize == 44) + return GetThreadTraceHeaderImpl(tid); + else + return GetThreadTraceHeaderImpl(tid); + DCHECK(0); #else return GetThreadTraceHeaderImpl(tid); #endif Index: lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- lib/tsan/rtl/tsan_platform_linux.cc +++ lib/tsan/rtl/tsan_platform_linux.cc @@ -244,9 +244,9 @@ #ifdef TSAN_RUNTIME_VMA vmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); - if (vmaSize != 39 && vmaSize != 42) { + if (vmaSize != 39 && vmaSize != 42 && vmaSize != 44 && vmaSize != 46) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize); + Printf("FATAL: Found %d - Supported 39, 42, 44, and 46\n", vmaSize); Die(); } #endif Index: lib/tsan/rtl/tsan_platform_posix.cc =================================================================== --- lib/tsan/rtl/tsan_platform_posix.cc +++ lib/tsan/rtl/tsan_platform_posix.cc @@ -58,6 +58,18 @@ } else { DCHECK(0); } +#elif defined(__powerpc64__) + uptr kMadviseRangeBeg = 0; + uptr kMadviseRangeSize = 0; + if (vmaSize == 44) { + kMadviseRangeBeg = 0x0f60000000ull; + kMadviseRangeSize = 0x0010000000ull; + } else if (vmaSize == 42) { + kMadviseRangeBeg = 0x3f0000000000ull; + kMadviseRangeSize = 0x010000000000ull; + } else { + DCHECK(0); + } #endif NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), kMadviseRangeSize * kShadowMultiplier); Index: lib/tsan/rtl/tsan_rtl.h =================================================================== --- lib/tsan/rtl/tsan_rtl.h +++ lib/tsan/rtl/tsan_rtl.h @@ -54,7 +54,7 @@ #ifndef SANITIZER_GO struct MapUnmapCallback; -#if defined(__mips64) || defined(__aarch64__) +#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) static const uptr kAllocatorSpace = 0; static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE; static const uptr kAllocatorRegionSizeLog = 20; Index: test/tsan/CMakeLists.txt =================================================================== --- test/tsan/CMakeLists.txt +++ test/tsan/CMakeLists.txt @@ -1,5 +1,5 @@ set(TSAN_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) -if(NOT ${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "mips" AND NOT APPLE) +if(${COMPILER_RT_DEFAULT_TARGET_ARCH} MATCHES "x86_64") list(APPEND TSAN_TEST_DEPS GotsanRuntimeCheck) endif() if(NOT COMPILER_RT_STANDALONE_BUILD) Index: test/tsan/map32bit.cc =================================================================== --- test/tsan/map32bit.cc +++ test/tsan/map32bit.cc @@ -10,6 +10,7 @@ // MAP_32BIT flag for mmap is supported only for x86_64. // XFAIL: mips64 // XFAIL: aarch64 +// XFAIL: powerpc64 void *Thread(void *ptr) { *(int*)ptr = 42; Index: test/tsan/mmap_large.cc =================================================================== --- test/tsan/mmap_large.cc +++ test/tsan/mmap_large.cc @@ -16,6 +16,8 @@ const size_t kLog2Size = 39; #elif defined(__mips64) || defined(__aarch64__) const size_t kLog2Size = 32; +#elif defined(__powerpc64__) + const size_t kLog2Size = 39; #endif const uintptr_t kLocation = 0x40ULL << kLog2Size; void *p = mmap( Index: test/tsan/test.h =================================================================== --- test/tsan/test.h +++ test/tsan/test.h @@ -62,6 +62,22 @@ } fprintf(stderr, format, (unsigned long) address); +#elif defined(__powerpc64__) + // PPC64 currently has 2 different VMA (44, and 48 bits) and it requires + // different pointer size to match the diagnostic message. + const char *format = 0; + unsigned long vma = (unsigned long)__builtin_frame_address(0); + vma = 64 - __builtin_clzll(vma); + if (vma == 44) + format = "0x%011lx"; + else if (vma == 46) + format = "0x%012lx"; + else { + fprintf(stderr, "unsupported vma: %lu\n", vma); + exit(1); + } + + fprintf(stderr, format, (unsigned long) address); #endif }