diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -565,27 +565,6 @@ void AfterSleep(ThreadState *thr, uptr pc); void IncrementEpoch(ThreadState *thr); -// The hacky call uses custom calling convention and an assembly thunk. -// It is considerably faster that a normal call for the caller -// if it is not executed (it is intended for slow paths from hot functions). -// The trick is that the call preserves all registers and the compiler -// does not treat it as a call. -// If it does not work for you, use normal call. -#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC -// The caller may not create the stack frame for itself at all, -// so we create a reserve stack frame for it (1024b must be enough). -#define HACKY_CALL(f) \ - __asm__ __volatile__("sub $1024, %%rsp;" \ - CFI_INL_ADJUST_CFA_OFFSET(1024) \ - ".hidden " #f "_thunk;" \ - "call " #f "_thunk;" \ - "add $1024, %%rsp;" \ - CFI_INL_ADJUST_CFA_OFFSET(-1024) \ - ::: "memory", "cc"); -#else -#define HACKY_CALL(f) f() -#endif - #if !SANITIZER_GO uptr ALWAYS_INLINE HeapEnd() { return HeapMemEnd() + PrimaryAllocator::AdditionalSize(); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -913,12 +913,6 @@ atomic_load_relaxed(&thr->trace_pos)); } -#if !SANITIZER_GO -extern "C" void __tsan_trace_switch() {} - -extern "C" void __tsan_report_race() {} -#endif - void ThreadIgnoreBegin(ThreadState* thr, uptr pc) { DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); thr->ignore_reads_and_writes++; diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_amd64.S @@ -9,242 +9,6 @@ .section __TEXT,__text #endif -ASM_HIDDEN(__tsan_trace_switch) -.globl ASM_SYMBOL(__tsan_trace_switch_thunk) -ASM_SYMBOL(__tsan_trace_switch_thunk): - CFI_STARTPROC - _CET_ENDBR - # Save scratch registers. - push %rax - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rax, 0) - push %rcx - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rcx, 0) - push %rdx - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rdx, 0) - push %rsi - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rsi, 0) - push %rdi - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rdi, 0) - push %r8 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r8, 0) - push %r9 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r9, 0) - push %r10 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r10, 0) - push %r11 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r11, 0) - # All XMM registers are caller-saved. - sub $0x100, %rsp - CFI_ADJUST_CFA_OFFSET(0x100) - vmovdqu %xmm0, 0x0(%rsp) - vmovdqu %xmm1, 0x10(%rsp) - vmovdqu %xmm2, 0x20(%rsp) - vmovdqu %xmm3, 0x30(%rsp) - vmovdqu %xmm4, 0x40(%rsp) - vmovdqu %xmm5, 0x50(%rsp) - vmovdqu %xmm6, 0x60(%rsp) - vmovdqu %xmm7, 0x70(%rsp) - vmovdqu %xmm8, 0x80(%rsp) - vmovdqu %xmm9, 0x90(%rsp) - vmovdqu %xmm10, 0xa0(%rsp) - vmovdqu %xmm11, 0xb0(%rsp) - vmovdqu %xmm12, 0xc0(%rsp) - vmovdqu %xmm13, 0xd0(%rsp) - vmovdqu %xmm14, 0xe0(%rsp) - vmovdqu %xmm15, 0xf0(%rsp) - # Align stack frame. - push %rbx # non-scratch - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rbx, 0) - mov %rsp, %rbx # save current rsp - CFI_DEF_CFA_REGISTER(%rbx) - shr $4, %rsp # clear 4 lsb, align to 16 - shl $4, %rsp - - call ASM_SYMBOL(__tsan_trace_switch) - - # Unalign stack frame back. - mov %rbx, %rsp # restore the original rsp - CFI_DEF_CFA_REGISTER(%rsp) - pop %rbx - CFI_ADJUST_CFA_OFFSET(-8) - # Restore scratch registers. - vmovdqu 0x0(%rsp), %xmm0 - vmovdqu 0x10(%rsp), %xmm1 - vmovdqu 0x20(%rsp), %xmm2 - vmovdqu 0x30(%rsp), %xmm3 - vmovdqu 0x40(%rsp), %xmm4 - vmovdqu 0x50(%rsp), %xmm5 - vmovdqu 0x60(%rsp), %xmm6 - vmovdqu 0x70(%rsp), %xmm7 - vmovdqu 0x80(%rsp), %xmm8 - vmovdqu 0x90(%rsp), %xmm9 - vmovdqu 0xa0(%rsp), %xmm10 - vmovdqu 0xb0(%rsp), %xmm11 - vmovdqu 0xc0(%rsp), %xmm12 - vmovdqu 0xd0(%rsp), %xmm13 - vmovdqu 0xe0(%rsp), %xmm14 - vmovdqu 0xf0(%rsp), %xmm15 - add $0x100, %rsp - CFI_ADJUST_CFA_OFFSET(-0x100) - pop %r11 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r10 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r9 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r8 - CFI_ADJUST_CFA_OFFSET(-8) - pop %rdi - CFI_ADJUST_CFA_OFFSET(-8) - pop %rsi - CFI_ADJUST_CFA_OFFSET(-8) - pop %rdx - CFI_ADJUST_CFA_OFFSET(-8) - pop %rcx - CFI_ADJUST_CFA_OFFSET(-8) - pop %rax - CFI_ADJUST_CFA_OFFSET(-8) - CFI_RESTORE(%rax) - CFI_RESTORE(%rbx) - CFI_RESTORE(%rcx) - CFI_RESTORE(%rdx) - CFI_RESTORE(%rsi) - CFI_RESTORE(%rdi) - CFI_RESTORE(%r8) - CFI_RESTORE(%r9) - CFI_RESTORE(%r10) - CFI_RESTORE(%r11) - ret - CFI_ENDPROC - -ASM_HIDDEN(__tsan_report_race) -.globl ASM_SYMBOL(__tsan_report_race_thunk) -ASM_SYMBOL(__tsan_report_race_thunk): - CFI_STARTPROC - _CET_ENDBR - # Save scratch registers. - push %rax - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rax, 0) - push %rcx - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rcx, 0) - push %rdx - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rdx, 0) - push %rsi - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rsi, 0) - push %rdi - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rdi, 0) - push %r8 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r8, 0) - push %r9 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r9, 0) - push %r10 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r10, 0) - push %r11 - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%r11, 0) - # All XMM registers are caller-saved. - sub $0x100, %rsp - CFI_ADJUST_CFA_OFFSET(0x100) - vmovdqu %xmm0, 0x0(%rsp) - vmovdqu %xmm1, 0x10(%rsp) - vmovdqu %xmm2, 0x20(%rsp) - vmovdqu %xmm3, 0x30(%rsp) - vmovdqu %xmm4, 0x40(%rsp) - vmovdqu %xmm5, 0x50(%rsp) - vmovdqu %xmm6, 0x60(%rsp) - vmovdqu %xmm7, 0x70(%rsp) - vmovdqu %xmm8, 0x80(%rsp) - vmovdqu %xmm9, 0x90(%rsp) - vmovdqu %xmm10, 0xa0(%rsp) - vmovdqu %xmm11, 0xb0(%rsp) - vmovdqu %xmm12, 0xc0(%rsp) - vmovdqu %xmm13, 0xd0(%rsp) - vmovdqu %xmm14, 0xe0(%rsp) - vmovdqu %xmm15, 0xf0(%rsp) - # Align stack frame. - push %rbx # non-scratch - CFI_ADJUST_CFA_OFFSET(8) - CFI_REL_OFFSET(%rbx, 0) - mov %rsp, %rbx # save current rsp - CFI_DEF_CFA_REGISTER(%rbx) - shr $4, %rsp # clear 4 lsb, align to 16 - shl $4, %rsp - - call ASM_SYMBOL(__tsan_report_race) - - # Unalign stack frame back. - mov %rbx, %rsp # restore the original rsp - CFI_DEF_CFA_REGISTER(%rsp) - pop %rbx - CFI_ADJUST_CFA_OFFSET(-8) - # Restore scratch registers. - vmovdqu 0x0(%rsp), %xmm0 - vmovdqu 0x10(%rsp), %xmm1 - vmovdqu 0x20(%rsp), %xmm2 - vmovdqu 0x30(%rsp), %xmm3 - vmovdqu 0x40(%rsp), %xmm4 - vmovdqu 0x50(%rsp), %xmm5 - vmovdqu 0x60(%rsp), %xmm6 - vmovdqu 0x70(%rsp), %xmm7 - vmovdqu 0x80(%rsp), %xmm8 - vmovdqu 0x90(%rsp), %xmm9 - vmovdqu 0xa0(%rsp), %xmm10 - vmovdqu 0xb0(%rsp), %xmm11 - vmovdqu 0xc0(%rsp), %xmm12 - vmovdqu 0xd0(%rsp), %xmm13 - vmovdqu 0xe0(%rsp), %xmm14 - vmovdqu 0xf0(%rsp), %xmm15 - add $0x100, %rsp - CFI_ADJUST_CFA_OFFSET(-0x100) - pop %r11 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r10 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r9 - CFI_ADJUST_CFA_OFFSET(-8) - pop %r8 - CFI_ADJUST_CFA_OFFSET(-8) - pop %rdi - CFI_ADJUST_CFA_OFFSET(-8) - pop %rsi - CFI_ADJUST_CFA_OFFSET(-8) - pop %rdx - CFI_ADJUST_CFA_OFFSET(-8) - pop %rcx - CFI_ADJUST_CFA_OFFSET(-8) - pop %rax - CFI_ADJUST_CFA_OFFSET(-8) - CFI_RESTORE(%rax) - CFI_RESTORE(%rbx) - CFI_RESTORE(%rcx) - CFI_RESTORE(%rdx) - CFI_RESTORE(%rsi) - CFI_RESTORE(%rdi) - CFI_RESTORE(%r8) - CFI_RESTORE(%r9) - CFI_RESTORE(%r10) - CFI_RESTORE(%r11) - ret - CFI_ENDPROC - ASM_HIDDEN(__tsan_setjmp) #if defined(__NetBSD__) .comm _ZN14__interception15real___setjmp14E,8,8