diff --git a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake --- a/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake +++ b/compiler-rt/cmake/Modules/AllSupportedArchDefs.cmake @@ -3,6 +3,7 @@ set(HEXAGON hexagon) set(X86 i386) set(X86_64 x86_64) +set(LOONGARCH64 loongarch64) set(MIPS32 mips mipsel) set(MIPS64 mips64 mips64el) set(PPC32 powerpc powerpcspe) @@ -24,7 +25,7 @@ set(ALL_SANITIZER_COMMON_SUPPORTED_ARCH ${X86} ${X86_64} ${PPC64} ${RISCV64} ${ARM32} ${ARM64} ${MIPS32} ${MIPS64} ${S390X} ${SPARC} ${SPARCV9} - ${HEXAGON}) + ${HEXAGON} ${LOONGARCH64}) set(ALL_ASAN_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM32} ${ARM64} ${RISCV64} ${MIPS32} ${MIPS64} ${PPC64} ${S390X} ${SPARC} ${SPARCV9} ${HEXAGON}) set(ALL_DFSAN_SUPPORTED_ARCH ${X86_64} ${MIPS64} ${ARM64}) diff --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt --- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt +++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt @@ -190,6 +190,7 @@ sanitizer_syscall_linux_arm.inc sanitizer_syscall_linux_x86_64.inc sanitizer_syscall_linux_riscv64.inc + sanitizer_syscall_linux_loongarch64.inc sanitizer_syscalls_netbsd.inc sanitizer_thread_registry.h sanitizer_thread_safety.h diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp @@ -78,6 +78,10 @@ #include #endif +#if SANITIZER_LINUX && defined(__loongarch__) +# include +#endif + #if SANITIZER_FREEBSD #include #include @@ -188,6 +192,8 @@ # include "sanitizer_syscall_linux_arm.inc" # elif SANITIZER_LINUX && defined(__hexagon__) # include "sanitizer_syscall_linux_hexagon.inc" +# elif SANITIZER_LINUX && SANITIZER_LOONGARCH64 +# include "sanitizer_syscall_linux_loongarch64.inc" # else # include "sanitizer_syscall_generic.inc" # endif @@ -290,7 +296,29 @@ } #endif -#if SANITIZER_MIPS64 +# if SANITIZER_LINUX && defined(__loongarch__) +static void statx_to_stat(struct statx *in, struct stat *out) { + internal_memset(out, 0, sizeof(*out)); + out->st_dev = makedev(in->stx_dev_major, in->stx_dev_minor); + out->st_ino = in->stx_ino; + out->st_mode = in->stx_mode; + out->st_nlink = in->stx_nlink; + out->st_uid = in->stx_uid; + out->st_gid = in->stx_gid; + out->st_rdev = makedev(in->stx_rdev_major, in->stx_rdev_minor); + out->st_size = in->stx_size; + out->st_blksize = in->stx_blksize; + out->st_blocks = in->stx_blocks; + out->st_atime = in->stx_atime.tv_sec; + out->st_atim.tv_nsec = in->stx_atime.tv_nsec; + out->st_mtime = in->stx_mtime.tv_sec; + out->st_mtim.tv_nsec = in->stx_mtime.tv_nsec; + out->st_ctime = in->stx_ctime.tv_sec; + out->st_ctim.tv_nsec = in->stx_ctime.tv_nsec; +} +# endif + +# if SANITIZER_MIPS64 // Undefine compatibility macros from // so that they would not clash with the kernel_stat // st_[a|m|c]time fields @@ -344,8 +372,14 @@ #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); # elif SANITIZER_LINUX -# if (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ +# if defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, + AT_NO_AUTOMOUNT, STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; +# elif (SANITIZER_WORDSIZE == 64 || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); @@ -369,8 +403,15 @@ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); # elif SANITIZER_LINUX -# if (defined(_LP64) || SANITIZER_X32 || \ - (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ +# if defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), AT_FDCWD, (uptr)path, + AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, + STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; +# elif (defined(_LP64) || SANITIZER_X32 || \ + (defined(__mips__) && _MIPS_SIM == _ABIN32)) && \ !SANITIZER_SPARC return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW); @@ -397,9 +438,15 @@ int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); kernel_stat_to_stat(&kbuf, (struct stat *)buf); return res; -# else +# elif SANITIZER_LINUX && defined(__loongarch__) + struct statx bufx; + int res = internal_syscall(SYSCALL(statx), fd, 0, AT_EMPTY_PATH, + STATX_BASIC_STATS, (uptr)&bufx); + statx_to_stat(&bufx, (struct stat *)buf); + return res; +# else return internal_syscall(SYSCALL(fstat), fd, (uptr)buf); -# endif +# endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(fstat64), fd, &buf64); @@ -445,7 +492,7 @@ } uptr internal_rename(const char *oldpath, const char *newpath) { -#if defined(__riscv) && defined(__linux__) +# if (defined(__riscv) || defined(__loongarch__)) && defined(__linux__) return internal_syscall(SYSCALL(renameat2), AT_FDCWD, (uptr)oldpath, AT_FDCWD, (uptr)newpath, 0); # elif SANITIZER_LINUX @@ -2176,6 +2223,11 @@ *pc = ucontext->uc_mcontext.pc; *bp = ucontext->uc_mcontext.r30; *sp = ucontext->uc_mcontext.r29; +# elif defined(__loongarch__) + ucontext_t *ucontext = (ucontext_t *)context; + *pc = ucontext->uc_mcontext.__pc; + *bp = ucontext->uc_mcontext.__gregs[22]; + *sp = ucontext->uc_mcontext.__gregs[3]; # else # error "Unsupported arch" # endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform.h @@ -272,6 +272,12 @@ # define SANITIZER_RISCV64 0 #endif +#if defined(__loongarch_lp64) +# define SANITIZER_LOONGARCH64 1 +#else +# define SANITIZER_LOONGARCH64 0 +#endif + // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_linux.cpp @@ -59,7 +59,8 @@ # if !defined(__powerpc64__) && !defined(__x86_64__) && \ !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \ - !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) + !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && \ + !defined(__loongarch__) COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); #endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.h @@ -122,6 +122,9 @@ # elif defined(__hexagon__) const unsigned struct_kernel_stat_sz = 128; const unsigned struct_kernel_stat64_sz = 0; +# elif defined(__loongarch__) +const unsigned struct_kernel_stat_sz = 128; +const unsigned struct_kernel_stat64_sz = 0; # endif struct __sanitizer_perf_event_attr { unsigned type; @@ -142,7 +145,7 @@ #if SANITIZER_LINUX -#if defined(__powerpc64__) || defined(__s390__) +# if defined(__powerpc64__) || defined(__s390__) || defined(__loongarch__) const unsigned struct___old_kernel_stat_sz = 0; #elif !defined(__sparc__) const unsigned struct___old_kernel_stat_sz = 32; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp @@ -271,6 +271,10 @@ defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \ defined(__hexagon__) # define SIZEOF_STRUCT_USTAT 20 +# elif defined(__loongarch__) + // Not used. The minimum Glibc version available for LoongArch is 2.36 + // so ustat() wrapper is already gone. +# define SIZEOF_STRUCT_USTAT 0 # else # error Unknown size of struct ustat # endif diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc @@ -0,0 +1,167 @@ +//===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// Implementations of internal_syscall and internal_iserror for +// Linux/loongarch64. +// +//===----------------------------------------------------------------------===// + +// About local register variables: +// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables +// +// Kernel ABI... +// syscall number is passed in a7 +// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in +// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments +// are passed in: a0-a7 (confirmed by inspecting glibc sources). +#define SYSCALL(name) __NR_##name + +#define INTERNAL_SYSCALL_CLOBBERS "memory" + +static uptr __internal_syscall(u64 nr) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0"); + __asm__ volatile("syscall 0\n\t" + : "=r"(a0) + : "r"(a7) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall0(n) (__internal_syscall)(n) + +static uptr __internal_syscall(u64 nr, u64 arg1) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall2(n, a1, a2) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall3(n, a1, a2, a3) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall4(n, a1, a2, a3, a4) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall5(n, a1, a2, a3, a4, a5) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + register u64 a5 asm("a5") = arg6; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4, + long arg5, long arg6, long arg7) { + register u64 a7 asm("a7") = nr; + register u64 a0 asm("a0") = arg1; + register u64 a1 asm("a1") = arg2; + register u64 a2 asm("a2") = arg3; + register u64 a3 asm("a3") = arg4; + register u64 a4 asm("a4") = arg5; + register u64 a5 asm("a5") = arg6; + register u64 a6 asm("a6") = arg7; + __asm__ volatile("syscall 0\n\t" + : "+r"(a0) + : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), + "r"(a6) + : INTERNAL_SYSCALL_CLOBBERS); + return a0; +} +#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6), (long)(a7)) + +#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n +#define __SYSCALL_NARGS(...) \ + __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, ) +#define __SYSCALL_CONCAT_X(a, b) a##b +#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b) +#define __SYSCALL_DISP(b, ...) \ + __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__) + +#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__) + +// Helper function used to avoid clobbering of errno. +bool internal_iserror(uptr retval, int *internal_errno) { + if (retval >= (uptr)-4095) { + if (internal_errno) + *internal_errno = -retval; + return true; + } + return false; +}