Index: compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc +++ compiler-rt/lib/sanitizer_common/sanitizer_syscall_linux_loongarch64.inc @@ -21,15 +21,17 @@ // are passed in: a0-a7 (confirmed by inspecting glibc sources). #define SYSCALL(name) __NR_##name -#define INTERNAL_SYSCALL_CLOBBERS "memory" +#define INTERNAL_SYSCALL_CLOBBERS \ + "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8" static uptr __internal_syscall(u64 nr) { register u64 a7 asm("$a7") = nr; register u64 a0 asm("$a0"); __asm__ volatile("syscall 0\n\t" - : "=r"(a0) - : "r"(a7) - : INTERNAL_SYSCALL_CLOBBERS); + : "=r"(a0), "+r"(a7) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a1", "$a2", "$a3", "$a4", + "$a5", "$a6"); return a0; } #define __internal_syscall0(n) (__internal_syscall)(n) @@ -38,9 +40,10 @@ register u64 a7 asm("$a7") = nr; register u64 a0 asm("$a0") = arg1; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a1", "$a2", "$a3", "$a4", + "$a5", "$a6"); return a0; } #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1)) @@ -50,9 +53,10 @@ register u64 a0 asm("$a0") = arg1; register u64 a1 asm("$a1") = arg2; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7), "+r"(a1) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a2", "$a3", "$a4", "$a5", + "$a6"); return a0; } #define __internal_syscall2(n, a1, a2) \ @@ -64,9 +68,9 @@ register u64 a1 asm("$a1") = arg2; register u64 a2 asm("$a2") = arg3; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1), "r"(a2) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7), "+r"(a1), "+r"(a2) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a3", "$a4", "$a5", "$a6"); return a0; } #define __internal_syscall3(n, a1, a2, a3) \ @@ -80,9 +84,9 @@ register u64 a2 asm("$a2") = arg3; register u64 a3 asm("$a3") = arg4; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1), "r"(a2), "r"(a3) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7), "+r"(a1), "+r"(a2), "+r"(a3) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a4", "$a5", "$a6"); return a0; } #define __internal_syscall4(n, a1, a2, a3, a4) \ @@ -97,9 +101,9 @@ register u64 a3 asm("$a3") = arg4; register u64 a4 asm("$a4") = arg5; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7), "+r"(a1), "+r"(a2), "+r"(a3), "+r"(a4) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a5", "$a6"); return a0; } #define __internal_syscall5(n, a1, a2, a3, a4, a5) \ @@ -116,9 +120,10 @@ register u64 a4 asm("$a4") = arg5; register u64 a5 asm("$a5") = arg6; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5) - : INTERNAL_SYSCALL_CLOBBERS); + : "+r"(a0), "+r"(a7), "+r"(a1), "+r"(a2), "+r"(a3), "+r"(a4), + "+r"(a5) + : + : INTERNAL_SYSCALL_CLOBBERS, "$a6"); return a0; } #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ @@ -136,9 +141,9 @@ register u64 a5 asm("$a5") = arg6; register u64 a6 asm("$a6") = arg7; __asm__ volatile("syscall 0\n\t" - : "+r"(a0) - : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5), - "r"(a6) + : "+r"(a0), "+r"(a7), "+r"(a1), "+r"(a2), "+r"(a3), "+r"(a4), + "+r"(a5), "+r"(a6) + : : INTERNAL_SYSCALL_CLOBBERS); return a0; }