diff --git a/compiler-rt/cmake/config-ix.cmake b/compiler-rt/cmake/config-ix.cmake --- a/compiler-rt/cmake/config-ix.cmake +++ b/compiler-rt/cmake/config-ix.cmake @@ -294,7 +294,7 @@ set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${RISCV64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9}) -set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} +set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9}) set(ALL_CRT_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV32} ${RISCV64}) set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -132,6 +132,10 @@ const uptr kAllocatorSpace = ~(uptr)0; const uptr kAllocatorSize = 0x2000000000ULL; // 128G. typedef VeryCompactSizeClassMap SizeClassMap; +# elif (defined(__riscv) && (__riscv_xlen == 64)) +const uptr kAllocatorSpace = ~(uptr)0; +const uptr kAllocatorSize = 0x2000000000ULL; // 128G. +typedef VeryDenseSizeClassMap SizeClassMap; # elif defined(__aarch64__) // AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA // so no need to different values for different VMA. diff --git a/compiler-rt/lib/asan/asan_interceptors.h b/compiler-rt/lib/asan/asan_interceptors.h --- a/compiler-rt/lib/asan/asan_interceptors.h +++ b/compiler-rt/lib/asan/asan_interceptors.h @@ -112,7 +112,8 @@ #endif #if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \ - defined(__i386__) || defined(__x86_64__)) + defined(__i386__) || defined(__x86_64__) || \ + (defined(__riscv) && (__riscv_xlen == 64))) # define ASAN_INTERCEPT_VFORK 1 #else # define ASAN_INTERCEPT_VFORK 0 diff --git a/compiler-rt/lib/asan/asan_interceptors_vfork.S b/compiler-rt/lib/asan/asan_interceptors_vfork.S --- a/compiler-rt/lib/asan/asan_interceptors_vfork.S +++ b/compiler-rt/lib/asan/asan_interceptors_vfork.S @@ -4,6 +4,7 @@ #define COMMON_INTERCEPTOR_SPILL_AREA __asan_extra_spill_area #define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork #include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S" +#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S" #include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S" #include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S" #include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S" diff --git a/compiler-rt/lib/asan/asan_mapping.h b/compiler-rt/lib/asan/asan_mapping.h --- a/compiler-rt/lib/asan/asan_mapping.h +++ b/compiler-rt/lib/asan/asan_mapping.h @@ -79,6 +79,18 @@ // || `[0x1000000000, 0x11ffffffff]` || lowshadow || // || `[0x0000000000, 0x0fffffffff]` || lowmem || // +// Riscv has only 38 bits for task size +// Low mem size is set with kRiscv64_ShadowOffset64 in compiler-rt/lib/asan/asan_allocator.h +// and in llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with kRiscv64_ShadowOffset64 +// High mem top border is set with GetMaxVirtualAddress() in +// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +// Default Linux/RISCV64 Sv39/Sv48 mapping: +// || `[0x000820000000, 0x003fffffffff]` || HighMem || +// || `[0x000124000000, 0x00081fffffff]` || HighShadow || +// || `[0x000024000000, 0x000123ffffff]` || ShadowGap || +// || `[0x000020000000, 0x000023ffffff]` || LowShadow || +// || `[0x000000000000, 0x00001fffffff]` || LowMem || +// // Default Linux/AArch64 (42-bit VMA) mapping: // || `[0x10000000000, 0x3ffffffffff]` || highmem || // || `[0x0a000000000, 0x0ffffffffff]` || highshadow || @@ -161,6 +173,7 @@ static const u64 kDefaultShort64bitShadowOffset = 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G. static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; +static const u64 kRiscv64_ShadowOffset64 = 0x20000000; static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 44; @@ -208,6 +221,8 @@ # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address # elif SANITIZER_MAC && defined(__aarch64__) # define SHADOW_OFFSET __asan_shadow_memory_dynamic_address +# elif (defined(__riscv) && (__riscv_xlen == 64)) +# define SHADOW_OFFSET kRiscv64_ShadowOffset64 # elif defined(__aarch64__) # define SHADOW_OFFSET kAArch64_ShadowOffset64 # elif defined(__powerpc64__) diff --git a/compiler-rt/lib/asan/asan_shadow_setup.cpp b/compiler-rt/lib/asan/asan_shadow_setup.cpp --- a/compiler-rt/lib/asan/asan_shadow_setup.cpp +++ b/compiler-rt/lib/asan/asan_shadow_setup.cpp @@ -44,7 +44,8 @@ } static void MaybeReportLinuxPIEBug() { -#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__)) +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__) || \ + (defined(__riscv) && (__riscv_xlen == 64))) Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n"); Report( "See https://github.com/google/sanitizers/issues/856 for possible " diff --git a/compiler-rt/lib/asan/scripts/asan_symbolize.py b/compiler-rt/lib/asan/scripts/asan_symbolize.py --- a/compiler-rt/lib/asan/scripts/asan_symbolize.py +++ b/compiler-rt/lib/asan/scripts/asan_symbolize.py @@ -49,7 +49,8 @@ def is_valid_arch(s): return s in ["i386", "x86_64", "x86_64h", "arm", "armv6", "armv7", "armv7s", - "armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390"] + "armv7k", "arm64", "powerpc64", "powerpc64le", "s390x", "s390", + "riscv64"] def guess_arch(addr): # Guess which arch we're running. 10 = len('0x') + 8 hex digits. diff --git a/compiler-rt/lib/asan/tests/asan_test.cpp b/compiler-rt/lib/asan/tests/asan_test.cpp --- a/compiler-rt/lib/asan/tests/asan_test.cpp +++ b/compiler-rt/lib/asan/tests/asan_test.cpp @@ -623,7 +623,8 @@ #if !defined(__ANDROID__) && !defined(__arm__) && \ !defined(__aarch64__) && !defined(__mips__) && \ - !defined(__mips64) && !defined(__s390__) + !defined(__mips64) && !defined(__s390__) && \ + !(defined(__riscv) && (__riscv_xlen == 64)) NOINLINE void BuiltinLongJmpFunc1(jmp_buf buf) { // create three red zones for these two stack objects. int a; @@ -648,6 +649,7 @@ #endif // !defined(__ANDROID__) && !defined(__arm__) && // !defined(__aarch64__) && !defined(__mips__) // !defined(__mips64) && !defined(__s390__) + // !defined(__riscv) && (__riscv_xlen == 64) TEST(AddressSanitizer, UnderscopeLongJmpTest) { static jmp_buf buf; diff --git a/compiler-rt/lib/builtins/clear_cache.c b/compiler-rt/lib/builtins/clear_cache.c --- a/compiler-rt/lib/builtins/clear_cache.c +++ b/compiler-rt/lib/builtins/clear_cache.c @@ -46,6 +46,12 @@ #include #endif +#if defined(__linux__) && (defined(__riscv) && (__riscv_xlen == 64)) +#include +#include +#include +#endif + // The compiler generates calls to __clear_cache() when creating // trampoline functions on the stack for use with nested functions. // It is expected to invalidate the instruction cache for the @@ -84,6 +90,10 @@ #else compilerrt_abort(); #endif +#elif defined(__linux__) && (defined(__riscv) && (__riscv_xlen == 64)) + // The only available flag is SYS_RISCV_FLUSH_ICACHE_LOCAL + // see linux/arch/riscv/include/asm/cacheflush.h + __riscv_flush_icache(start, end, 0); #elif defined(__linux__) && defined(__mips__) const uintptr_t start_int = (uintptr_t)start; const uintptr_t end_int = (uintptr_t)end; diff --git a/compiler-rt/lib/crt/crtbegin.c b/compiler-rt/lib/crt/crtbegin.c --- a/compiler-rt/lib/crt/crtbegin.c +++ b/compiler-rt/lib/crt/crtbegin.c @@ -52,6 +52,10 @@ __asm__(".pushsection .init,\"ax\",@progbits\n\t" "call " __USER_LABEL_PREFIX__ "__do_init\n\t" ".popsection"); +#elif defined(__riscv) +__asm__(".pushsection .init,\"ax\",%progbits\n\t" + "call " __USER_LABEL_PREFIX__ "__do_init\n\t" + ".popsection"); #elif defined(__arm__) || defined(__aarch64__) __asm__(".pushsection .init,\"ax\",%progbits\n\t" "bl " __USER_LABEL_PREFIX__ "__do_init\n\t" @@ -110,6 +114,11 @@ "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t" "nop\n\t" ".popsection"); +#elif defined(__riscv) +__asm__(".pushsection .fini,\"ax\",@progbits\n\t" + "call " __USER_LABEL_PREFIX__ "__do_fini\n\t" + "nop\n\t" + ".popsection"); #elif defined(__sparc__) __asm__(".pushsection .fini,\"ax\",@progbits\n\t" "call " __USER_LABEL_PREFIX__ "__do_fini\n\t" diff --git a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S --- a/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S +++ b/compiler-rt/lib/hwasan/hwasan_interceptors_vfork.S @@ -3,6 +3,7 @@ #if defined(__linux__) && HWASAN_WITH_INTERCEPTORS #define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area #define COMMON_INTERCEPTOR_HANDLE_VFORK __hwasan_handle_vfork +#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S" #include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S" #include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S" #endif diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt --- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt @@ -186,6 +186,7 @@ sanitizer_syscall_linux_aarch64.inc sanitizer_syscall_linux_arm.inc sanitizer_syscall_linux_x86_64.inc + sanitizer_syscall_linux_riscv64.inc sanitizer_syscalls_netbsd.inc sanitizer_thread_registry.h sanitizer_tls_get_addr.h diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -674,7 +674,8 @@ kModuleArchARMV7, kModuleArchARMV7S, kModuleArchARMV7K, - kModuleArchARM64 + kModuleArchARM64, + kModuleArchRISCV64 }; // Opens the file 'file_name" and reads up to 'max_len' bytes. @@ -718,6 +719,8 @@ return "armv7k"; case kModuleArchARM64: return "arm64"; + case kModuleArchRISCV64: + return "riscv64"; } CHECK(0 && "Invalid module arch"); return ""; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S @@ -0,0 +1,56 @@ +#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__) + +#include "sanitizer_common/sanitizer_asm.h" + +ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA) + +.comm _ZN14__interception10real_vforkE,8,8 +.globl ASM_WRAPPER_NAME(vfork) +ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork)) +ASM_WRAPPER_NAME(vfork): + // Save ra in the off-stack spill area. + // allocate space on stack + addi sp, sp, -16 + // store ra value + sd ra, 8(sp) + call COMMON_INTERCEPTOR_SPILL_AREA + // restore previous values from stack + ld ra, 8(sp) + // adjust stack + addi sp, sp, 16 + // store ra by x10?? + sd ra, 0(x10) + + // Call real vfork. This may return twice. User code that runs between the first and the second return + // may clobber the stack frame of the interceptor; that's why it does not have a frame. + la x10, _ZN14__interception10real_vforkE + ld x10, 0(x10) + jalr x10 + + // adjust stack + addi sp, sp, -16 + // store x10 by adjusted stack + sd x10, 8(sp) + // jump to exit label if x10 is 0 + beqz x10, .L_exit + + // x0 != 0 => parent process. Clear stack shadow. + // put old sp to x10 + addi x10, sp, 16 + call COMMON_INTERCEPTOR_HANDLE_VFORK + +.L_exit: + // Restore ra + call COMMON_INTERCEPTOR_SPILL_AREA + ld ra, 0(x10) + // load value by stack + ld x10, 8(sp) + // adjust stack + addi sp, sp, 16 + ret +ASM_SIZE(vfork) + +.weak vfork +.set vfork, ASM_WRAPPER_NAME(vfork) + +#endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc --- a/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_syscalls.inc @@ -2296,7 +2296,8 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { #if !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ - defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ + (defined(__riscv) && (__riscv_xlen == 64))) if (data) { if (request == ptrace_setregs) { PRE_READ((void *)data, struct_user_regs_struct_sz); @@ -2317,7 +2318,8 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { #if !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ - defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__)) + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ + (defined(__riscv) && (__riscv_xlen == 64))) if (res >= 0 && data) { // Note that this is different from the interceptor in // sanitizer_common_interceptors.inc. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h @@ -62,7 +62,7 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum); #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \ - || defined(__arm__) + || defined(__arm__) || (defined(__riscv) && (__riscv_xlen == 64)) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -154,6 +154,8 @@ #if SANITIZER_LINUX && defined(__x86_64__) #include "sanitizer_syscall_linux_x86_64.inc" +#elif SANITIZER_LINUX && (defined(__riscv) && (__riscv_xlen == 64)) +#include "sanitizer_syscall_linux_riscv64.inc" #elif SANITIZER_LINUX && defined(__aarch64__) #include "sanitizer_syscall_linux_aarch64.inc" #elif SANITIZER_LINUX && defined(__arm__) @@ -710,7 +712,7 @@ }; #else struct linux_dirent { -#if SANITIZER_X32 || defined(__aarch64__) +#if SANITIZER_X32 || defined(__aarch64__) || (defined(__riscv) && (__riscv_xlen == 64)) u64 d_ino; u64 d_off; #else @@ -718,7 +720,7 @@ unsigned long d_off; #endif unsigned short d_reclen; -#ifdef __aarch64__ +#if defined(__aarch64__) || (defined(__riscv) && (__riscv_xlen == 64)) unsigned char d_type; #endif char d_name[256]; @@ -1067,6 +1069,8 @@ // This should (does) work for both PowerPC64 Endian modes. // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; +# elif (defined(__riscv) && (__riscv_xlen == 64)) + return (1ULL << 38) - 1; # elif defined(__mips64) return (1ULL << 40) - 1; // 0x000000ffffffffffUL; # elif defined(__s390x__) @@ -1361,6 +1365,57 @@ : "memory", "$29" ); return res; } +#elif (defined(__riscv) && (__riscv_xlen == 64)) +uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; + if (!fn || !child_stack) + return -EINVAL; + CHECK_EQ(0, (uptr)child_stack % 16); + child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); + ((unsigned long long *)child_stack)[0] = (uptr)fn; + ((unsigned long long *)child_stack)[1] = (uptr)arg; + + register int (*__fn)(void *) __asm__("a0") = fn; + register void *__stack __asm__("a1") = child_stack; + register int __flags __asm__("a2") = flags; + register void *__arg __asm__("a3") = arg; + register int *__ptid __asm__("a4") = parent_tidptr; + register void *__tls __asm__("a5") = newtls; + register int *__ctid __asm__("a6") = child_tidptr; + + __asm__ __volatile__( + "mv a0,a2\n" /* flags */ + "mv a2,a4\n" /* ptid */ + "mv a3,a5\n" /* tls */ + "mv a4,a6\n" /* ctid */ + "addi a7, zero, %9\n" /* clone */ + + "ecall\n" + + /* if (%r0 != 0) + * return %r0; + */ + "bnez a0, 1f\n" + + /* In the child, now. Call "fn(arg)". */ + "ld a0, 8(sp)\n" + "ld a1, 16(sp)\n" + "jalr a1\n" + + /* Call _exit(%r0). */ + "addi a7, zero, %10\n" + "ecall\n" + "1:\n" + + : "=r" (res) + : "i"(-EINVAL), + "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), + "r"(__ptid), "r"(__tls), "r"(__ctid), + "i"(__NR_clone), "i"(__NR_exit) + : "ra", "memory"); + return res; +} #elif defined(__aarch64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr) { @@ -1866,6 +1921,69 @@ static const uptr FSR_WRITE = 1U << 11; uptr fsr = ucontext->uc_mcontext.error_code; return fsr & FSR_WRITE ? WRITE : READ; +#elif (defined(__riscv) && (__riscv_xlen == 64)) + const auto *exception_source = + reinterpret_cast(ucontext->uc_mcontext.__gregs[REG_PC]); + // check if instruction is 2 bytes long + bool isCompressed = ((exception_source[0] & 0x3) != 0x3); + + if (!isCompressed) { + // FIXME: Add support for FPU, Atomic, etc + // opcode is located in lower 7 bits + uint8_t op_code = exception_source[0] & ((1 << 7) - 1); + // bits [12-14] + uint8_t funct3 = (exception_source[1] >> 4u) & 0b111u; + + const uint8_t READ_OPCODE = 0b0000011u; // loads + const uint8_t WRITE_OPCODE = 0b0100011u; // stores + + if (op_code == READ_OPCODE) { + switch (funct3) { + case 0b000: // lb + case 0b001: // lh + case 0b010: // lw + case 0b100: // lbu + case 0b101: // lhu + case 0b110: // lwu + case 0b011: // ld + return READ; + } + } + + if (op_code == WRITE_OPCODE) { + switch (funct3) { + case 0b000: // sb + case 0b001: // sh + case 0b010: // sw + case 0b011: // sd + return WRITE; + } + } + + return UNKNOWN; + } + uint8_t insn_byte_lo = exception_source[0]; + uint8_t insn_byte_hi = exception_source[1]; + // reconstruct the actual short instruction + uint16_t faulty_instruction = (insn_byte_lo << 0) | (insn_byte_hi << 8); + uint8_t op_code = faulty_instruction & 0x3u; + uint8_t funct = faulty_instruction >> 13u; + // RCV quadrant 0 or 2 + if (op_code == 0x0 || op_code == 0x2) { + switch (funct) { + case 0b001: // c.fld / c.fldsp + case 0b010: // c.lw / c.lwsp + case 0b011: // c.ld / c.ldsp + return READ; + case 0b101: // c.fsd / c.fsdsp + case 0b110: // c.sw / c.swsp + case 0b111: // c.sd / c.sdsp + return WRITE; + } + return UNKNOWN; + } + return UNKNOWN; + #elif defined(__aarch64__) static const u64 ESR_ELx_WNR = 1U << 6; u64 esr; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux_libcdep.cpp @@ -267,7 +267,7 @@ #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \ defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \ - defined(__arm__)) && \ + defined(__arm__) || (defined(__riscv) && (__riscv_xlen == 64))) && \ SANITIZER_LINUX && !SANITIZER_ANDROID // sizeof(struct pthread) from glibc. static atomic_uintptr_t thread_descriptor_size; @@ -307,6 +307,9 @@ #elif defined(__mips__) // TODO(sagarthakur): add more values as per different glibc versions. val = FIRST_32_SECOND_64(1152, 1776); +#elif (defined(__riscv) && (__riscv_xlen == 64)) + // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. + val = 1776; #elif defined(__aarch64__) // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. val = 1776; @@ -327,7 +330,8 @@ return kThreadSelfOffset; } -#if defined(__mips__) || defined(__powerpc64__) +#if defined(__mips__) || defined(__powerpc64__) || \ + (defined(__riscv) && (__riscv_xlen == 64)) // TlsPreTcbSize includes size of struct pthread_descr and size of tcb // head structure. It lies before the static tls blocks. static uptr TlsPreTcbSize() { @@ -335,6 +339,8 @@ const uptr kTcbHead = 16; // sizeof (tcbhead_t) # elif defined(__powerpc64__) const uptr kTcbHead = 88; // sizeof (tcbhead_t) +# elif (defined(__riscv) && (__riscv_xlen == 64)) + const uptr kTcbHead = 16; // sizeof (tcbhead_t) # endif const uptr kTlsAlign = 16; const uptr kTlsPreTcbSize = @@ -364,6 +370,15 @@ # elif defined(__aarch64__) || defined(__arm__) descr_addr = reinterpret_cast(__builtin_thread_pointer()) - ThreadDescriptorSize(); +# elif (defined(__riscv) && (__riscv_xlen == 64)) + + uptr tcb_end; + asm volatile("mv %0, tp;\n" + : "=r" (tcb_end)); + // https://github.com/riscv/riscv-elf-psabi-doc/issues/53 + const uptr kTlsTcbOffset = 0x800; + descr_addr = reinterpret_cast(tcb_end - kTlsTcbOffset - TlsPreTcbSize()); + # elif defined(__s390__) descr_addr = reinterpret_cast(__builtin_thread_pointer()); # elif defined(__powerpc64__) @@ -434,7 +449,8 @@ *addr -= *size; *addr += ThreadDescriptorSize(); # elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \ - || defined(__arm__) + || defined(__arm__) || \ + (defined(__riscv) && (__riscv_xlen == 64)) *addr = ThreadSelf(); *size = GetTlsSize(); # else @@ -491,7 +507,8 @@ uptr addr, size; GetTls(&addr, &size); return size; -#elif defined(__mips__) || defined(__powerpc64__) +#elif defined(__mips__) || defined(__powerpc64__) || \ + (defined(__riscv) && (__riscv_xlen == 64)) return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16); #else return g_tls_size; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h @@ -239,6 +239,8 @@ // will still work but will consume more memory for TwoLevelByteMap. #if defined(__mips__) # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) +#elif (defined(__riscv) && (__riscv_xlen == 64)) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38) #elif defined(__aarch64__) # if SANITIZER_MAC // Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_interceptors.h @@ -281,7 +281,7 @@ #if SI_LINUX_NOT_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ - defined(__s390__)) + defined(__s390__) || (defined(__riscv) && (__riscv_xlen == 64))) #define SANITIZER_INTERCEPT_PTRACE 1 #else #define SANITIZER_INTERCEPT_PTRACE 0 diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h @@ -71,6 +71,9 @@ #elif defined(__arm__) const unsigned struct_kernel_stat_sz = 64; const unsigned struct_kernel_stat64_sz = 104; +#elif (defined(__riscv) && (__riscv_xlen == 64)) +const unsigned struct_kernel_stat_sz = 128; +const unsigned struct_kernel_stat64_sz = 104; #elif defined(__aarch64__) const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 104; @@ -804,7 +807,7 @@ #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ - defined(__s390__)) + defined(__s390__) || (defined(__riscv) && (__riscv_xlen == 64))) extern unsigned struct_user_regs_struct_sz; extern unsigned struct_user_fpregs_struct_sz; extern unsigned struct_user_fpxregs_struct_sz; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -90,7 +90,8 @@ #if SANITIZER_LINUX # include # include -# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) +# if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ + (defined(__riscv) && (__riscv_xlen == 64)) # include # ifdef __arm__ typedef struct user_fpregs elf_fpregset_t; @@ -306,10 +307,13 @@ #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ - defined(__s390__)) + defined(__s390__) || (defined(__riscv) && (__riscv_xlen == 64))) #if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); +#elif (defined(__riscv) && (__riscv_xlen == 64)) + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state); #elif defined(__aarch64__) unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); @@ -321,7 +325,8 @@ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); #endif // __mips64 || __powerpc64__ || __aarch64__ #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ - defined(__aarch64__) || defined(__arm__) || defined(__s390__) + defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ + (defined(__riscv) && (__riscv_xlen == 64)) unsigned struct_user_fpxregs_struct_sz = 0; #else unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h @@ -85,6 +85,14 @@ return pc - 4; #elif defined(__sparc__) || defined(__mips__) return pc - 8; +#elif (defined(__riscv) && (__riscv_xlen == 64)) + // RV-64 has variable instruciton length... + // C extentions gives us 2-byte instructoins + // RV-64 has 4-byte instructions + // + RISCV architecture allows instructions up to 8 bytes + // It seems difficult to figure out the exact instruction length - + // pc - 2 seems like a safe option for the purposes of stack tracing + return pc - 2; #else return pc - 1; #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cpp @@ -21,6 +21,29 @@ return pc + 8; #elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__) return pc + 4; +#elif (defined(__riscv) && (__riscv_xlen == 64)) + // we check for largest possible instruction first and then trying to unwind + // it if it is not + // Current check order is 8 -> 6 -> 4 -> 2 + u8 InsnByte = *(u8 *)(pc); + if ((InsnByte & 0x7f) == 0x3f) { + // xxxxxxxxx0111111 | 64 bit | + return pc + 8; + } + if ((InsnByte & 0x3f) == 0x1f) { + // xxxxxxxxxx011111 | 48 bit | + return pc + 6; + } + if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) { + // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111 + return pc + 4; + } + if ((InsnByte & 0x3) != 0x3) { + // xxxxxxxxxxxxxxaa | 16 bit | aa != 11 + return pc + 2; + } + // in case could not determine instruction width, return 0 + return 0; #else return pc + 1; #endif @@ -94,6 +117,8 @@ uhwptr pc1 = caller_frame[2]; #elif defined(__s390__) uhwptr pc1 = frame[14]; +#elif defined(__riscv) + uhwptr pc1 = frame[-1]; #else uhwptr pc1 = frame[1]; #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp @@ -16,7 +16,8 @@ #if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ defined(__aarch64__) || defined(__powerpc64__) || \ defined(__s390__) || defined(__i386__) || \ - defined(__arm__)) + defined(__arm__) || \ + (defined(__riscv) && (__riscv_xlen == 64))) #include "sanitizer_stoptheworld.h" @@ -31,7 +32,7 @@ #include // for pid_t #include // for iovec #include // for NT_PRSTATUS -#if defined(__aarch64__) && !SANITIZER_ANDROID +#if (defined(__aarch64__) || (defined(__riscv) && (__riscv_xlen == 64))) && !SANITIZER_ANDROID // GLIBC 2.20+ sys/user does not include asm/ptrace.h # include #endif @@ -503,6 +504,11 @@ #define REG_SP sp #define ARCH_IOVEC_FOR_GETREGSET +#elif (defined(__riscv) && (__riscv_xlen == 64)) +typedef struct user_regs_struct regs_struct; +#define REG_SP sp +#define ARCH_IOVEC_FOR_GETREGSET + #elif defined(__s390__) typedef _user_regs_struct regs_struct; #define REG_SP gprs[15] diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cpp @@ -258,6 +258,8 @@ const char* const kSymbolizerArch = "--default-arch=x86_64"; #elif defined(__i386__) const char* const kSymbolizerArch = "--default-arch=i386"; +#elif (defined(__riscv) && (__riscv_xlen == 64)) + const char* const kSymbolizerArch = "--default-arch=riscv64"; #elif defined(__aarch64__) const char* const kSymbolizerArch = "--default-arch=arm64"; #elif defined(__arm__) diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_riscv64.inc @@ -0,0 +1,198 @@ +//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implementations of internal_syscall and internal_iserror for Linux/riscv64. +// +//===----------------------------------------------------------------------===// + +// About local register variables: +// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables +// +// Kernel ABI... +// To my surprise I haven't found much information regarding it. +// Kernel source and internet browsing shows that: +// syscall number is passed in a7 (http://man7.org/linux/man-pages/man2/syscall.2.html) +// results are return in a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) +// arguments are passed in: a0-a7 (see below) +// +// Regarding the arguments. The only "documentation" I could find is +// this comment (!!!) by Bruce Hold on google forums (!!!): +// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ +// Confirmed by inspecting glibc sources. +// Great way to document things. +#define SYSCALL(name) __NR_ ## name + +#define INTERNAL_SYSCALL_CLOBBERS "memory" + +static uptr __internal_syscall(u64 nr) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0"); + __asm__ volatile ( + "ecall\n\t" + : "=r" (a0) + : "r" (a7) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall0(n) \ + (__internal_syscall)(n) + +static uptr __internal_syscall(u64 nr, u64 arg1) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall1(n, a1) \ + (__internal_syscall)(n, (u64)(a1)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall2(n, a1, a2) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + register long int a2 asm ("a2") = arg3; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1), "r" (a2) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall3(n, a1, a2, a3) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + register long int a2 asm ("a2") = arg3; + register long int a3 asm ("a3") = arg4; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1), "r" (a2), "r" (a3) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall4(n, a1, a2, a3, a4) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4, long arg5) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + register long int a2 asm ("a2") = arg3; + register long int a3 asm ("a3") = arg4; + register long int a4 asm ("a4") = arg5; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1), "r" (a2), "r" (a3), "r" (a4) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall5(n, a1, a2, a3, a4, a5) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4, long arg5, long arg6) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + register long int a2 asm ("a2") = arg3; + register long int a3 asm ("a3") = arg4; + register long int a4 asm ("a4") = arg5; + register long int a5 asm ("a5") = arg6; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4, long arg5, long arg6, long arg7) { + long int ecall_result = 0; + register long int a7 asm ("a7") = nr; + register long int a0 asm ("a0") = arg1; + register long int a1 asm ("a1") = arg2; + register long int a2 asm ("a2") = arg3; + register long int a3 asm ("a3") = arg4; + register long int a4 asm ("a4") = arg5; + register long int a5 asm ("a5") = arg6; + register long int a6 asm ("a6") = arg7; + __asm__ volatile ( + "ecall\n\t" + : "+r" (a0) + : "r" (a7), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6) + : INTERNAL_SYSCALL_CLOBBERS); + ecall_result = a0; + return ecall_result; +} +#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6), (long)(a7)) + +#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n +#define __SYSCALL_NARGS(...) \ + __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, ) +#define __SYSCALL_CONCAT_X(a, b) a##b +#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b) +#define __SYSCALL_DISP(b, ...) \ + __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__) + +#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__) + +// Helper function used to avoid cobbler errno. +bool internal_iserror(uptr retval, int *rverrno) { + if (retval >= (uptr)-4095) { + if (rverrno) + *rverrno = -retval; + return true; + } + return false; +} diff --git a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt --- a/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/tests/CMakeLists.txt @@ -3,7 +3,7 @@ clang_compiler_add_cxx_check() # FIXME: use SANITIZER_COMMON_SUPPORTED_ARCH here -filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el) +filter_available_targets(SANITIZER_UNITTEST_SUPPORTED_ARCH x86_64 i386 mips64 mips64el riscv64) if(APPLE) darwin_filter_host_archs(SANITIZER_UNITTEST_SUPPORTED_ARCH SANITIZER_UNITTEST_SUPPORTED_ARCH) endif() diff --git a/compiler-rt/test/asan/CMakeLists.txt b/compiler-rt/test/asan/CMakeLists.txt --- a/compiler-rt/test/asan/CMakeLists.txt +++ b/compiler-rt/test/asan/CMakeLists.txt @@ -14,7 +14,7 @@ endif() macro(get_bits_for_arch arch bits) - if (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|arm64|mips64|mips64el|s390x|sparcv9") + if (${arch} MATCHES "x86_64|powerpc64|powerpc64le|aarch64|arm64|mips64|mips64el|s390x|sparcv9|riscv64") set(${bits} 64) elseif (${arch} MATCHES "i386|arm|mips|mipsel|sparc") set(${bits} 32) diff --git a/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp b/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp --- a/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp +++ b/compiler-rt/test/asan/TestCases/Linux/ptrace.cpp @@ -66,6 +66,14 @@ #define PRINT_REG_PC(__regs) printf ("%lx\n", (unsigned long) (__regs.psw.addr)) #define PRINT_REG_FP(__fpregs) printf ("%lx\n", (unsigned long) (__fpregs.fpc)) #define ARCH_IOVEC_FOR_GETREGSET + +#elif defined(__riscv) && (__riscv_xlen == 64) +#include +typedef user_regs_struct regs_struct; +typedef __riscv_q_ext_state fpregs_struct; +#define PRINT_REG_PC(__regs) printf("%lx\n", (unsigned long)(__regs.pc)) +#define PRINT_REG_FP(__fpregs) printf("%lx\n", (unsigned long)(__fpregs.fcsr)) +#define ARCH_IOVEC_FOR_GETREGSET #endif diff --git a/compiler-rt/test/asan/TestCases/Linux/vfork.cpp b/compiler-rt/test/asan/TestCases/Linux/vfork.cpp --- a/compiler-rt/test/asan/TestCases/Linux/vfork.cpp +++ b/compiler-rt/test/asan/TestCases/Linux/vfork.cpp @@ -1,7 +1,7 @@ // https://github.com/google/sanitizers/issues/925 // RUN: %clang_asan -O0 %s -o %t && %run %t 2>&1 -// REQUIRES: aarch64-target-arch || x86_64-target-arch || i386-target-arch || arm-target-arch +// REQUIRES: aarch64-target-arch || x86_64-target-arch || i386-target-arch || arm-target-arch || riscv64-target-arch #include #include diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -105,6 +105,7 @@ static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000; static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 37; static const uint64_t kAArch64_ShadowOffset64 = 1ULL << 36; +static const uint64_t kRiscv64_ShadowOffset64 = 0x20000000; static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30; static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46; static const uint64_t kNetBSD_ShadowOffset32 = 1ULL << 30; @@ -447,6 +448,7 @@ bool IsMIPS64 = TargetTriple.isMIPS64(); bool IsArmOrThumb = TargetTriple.isARM() || TargetTriple.isThumb(); bool IsAArch64 = TargetTriple.getArch() == Triple::aarch64; + bool IsRISCV64 = TargetTriple.getArch() == Triple::riscv64; bool IsWindows = TargetTriple.isOSWindows(); bool IsFuchsia = TargetTriple.isOSFuchsia(); bool IsMyriad = TargetTriple.getVendor() == llvm::Triple::Myriad; @@ -515,6 +517,8 @@ Mapping.Offset = kDynamicShadowSentinel; else if (IsAArch64) Mapping.Offset = kAArch64_ShadowOffset64; + else if (IsRISCV64) + Mapping.Offset = kRiscv64_ShadowOffset64; else Mapping.Offset = kDefaultShadowOffset64; } @@ -533,6 +537,7 @@ // we could OR the constant in a single instruction, but it's more // efficient to load it once and use indexed addressing. Mapping.OrShadowOffset = !IsAArch64 && !IsPPC64 && !IsSystemZ && !IsPS4CPU && + !IsRISCV64 && !(Mapping.Offset & (Mapping.Offset - 1)) && Mapping.Offset != kDynamicShadowSentinel; bool IsAndroidWithIfuncSupport =