Index: lib/sanitizer_common/sanitizer_common_interceptors.inc
===================================================================
--- lib/sanitizer_common/sanitizer_common_interceptors.inc
+++ lib/sanitizer_common/sanitizer_common_interceptors.inc
@@ -46,6 +46,9 @@
 #include "sanitizer_platform_interceptors.h"
 #include "sanitizer_symbolizer.h"
 #include "sanitizer_tls_get_addr.h"
+#if SANITIZER_RELACY_SCHEDULER
+#include <tsan/rtl/relacy/tsan_scheduler_engine.h>
+#endif
 
 #include <stdarg.h>
 
@@ -4026,7 +4029,11 @@
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
   COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
+#ifdef SANITIZER_RELACY_SCHEDULER
+  int res = _scheduler_engine.Lock(m);
+#else
   int res = REAL(pthread_mutex_lock)(m);
+#endif
   if (res == errno_EOWNERDEAD)
     COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
   if (res == 0 || res == errno_EOWNERDEAD)
@@ -4040,7 +4047,11 @@
   void *ctx;
   COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
   COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
-  int res = REAL(pthread_mutex_unlock)(m);
+#ifdef SANITIZER_RELACY_SCHEDULER
+    int res = _scheduler_engine.Unlock(m);
+#else
+    int res = REAL(pthread_mutex_unlock)(m);
+#endif
   if (res == errno_EINVAL)
     COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
   return res;
Index: lib/sanitizer_common/sanitizer_linux_libcdep.cc
===================================================================
--- lib/sanitizer_common/sanitizer_linux_libcdep.cc
+++ lib/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -282,7 +282,12 @@
 # if defined(__i386__)
   asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
 # elif defined(__x86_64__)
+#if SANITIZER_RELACY_SCHEDULER
+  // kThreadSelfOffset seems are not valid. See ELF TLS documentation
+  asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(0));
+#else
   asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
+#endif
 # elif defined(__mips__)
   // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
   // points to the end of the TCB + 0x7000. The pthread_descr structure is
Index: lib/tsan/CMakeLists.txt
===================================================================
--- lib/tsan/CMakeLists.txt
+++ lib/tsan/CMakeLists.txt
@@ -3,6 +3,12 @@
 include_directories(..)
 
 set(TSAN_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+
+option(SANITIZER_RELACY_SCHEDULER "manually scheduler for thread" OFF)
+if (SANITIZER_RELACY_SCHEDULER)
+    add_definitions(-DSANITIZER_RELACY_SCHEDULER)
+endif()
+
 # SANITIZER_COMMON_CFLAGS contains -fPIC, but it's performance-critical for
 # TSan runtime to be built with -fPIE to reduce the number of register spills.
 append_list_if(COMPILER_RT_HAS_FPIE_FLAG -fPIE TSAN_CFLAGS)
@@ -50,7 +56,10 @@
   rtl/tsan_stat.cc
   rtl/tsan_suppressions.cc
   rtl/tsan_symbolize.cc
-  rtl/tsan_sync.cc)
+  rtl/tsan_sync.cc
+  rtl/relacy/schedulers/tsan_generator_paths.cc
+  rtl/relacy/tsan_threads_box.cc
+  rtl/relacy/tsan_thread_context.cc)
 
 set(TSAN_CXX_SOURCES
   rtl/tsan_new_delete.cc)
@@ -65,7 +74,19 @@
   # Assume Linux
   list(APPEND TSAN_SOURCES
     rtl/tsan_platform_linux.cc
-    rtl/tsan_platform_posix.cc)
+    rtl/tsan_platform_posix.cc
+    rtl/relacy/tsan_scheduler_engine.cc
+    rtl/relacy/schedulers/tsan_all_states_scheduler.cc
+    rtl/relacy/schedulers/tsan_fixed_window_scheduler.cc
+    rtl/relacy/schedulers/tsan_full_path_scheduler.cc
+    rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.cc
+    rtl/relacy/schedulers/tsan_random_scheduler.cc
+    rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.cc
+    rtl/relacy/platforms/tsan_fiber_tls_copy_platform.cc
+    rtl/relacy/platforms/tsan_fiber_tls_swap_platform.cc
+    rtl/relacy/platforms/tsan_pthread_platform.cc
+    rtl/relacy/schedulers/tsan_random_generator.cc
+    rtl/relacy/tsan_shared_memory.cc)
 endif()
 
 set(TSAN_HEADERS
@@ -93,7 +114,28 @@
   rtl/tsan_symbolize.h
   rtl/tsan_sync.h
   rtl/tsan_trace.h
-  rtl/tsan_update_shadow_word_inl.h)
+  rtl/tsan_update_shadow_word_inl.h
+  rtl/relacy/schedulers/tsan_generator_paths.h
+  rtl/relacy/tsan_threads_box.h
+  rtl/relacy/tsan_thread_context.h
+  rtl/relacy/tsan_type_traits.h
+  rtl/relacy/tsan_scheduler_engine.h
+  rtl/relacy/tsan_platform.h
+  rtl/relacy/tsan_scheduler.h
+  rtl/relacy/schedulers/tsan_all_states_scheduler.h
+  rtl/relacy/schedulers/tsan_fixed_window_scheduler.h
+  rtl/relacy/schedulers/tsan_full_path_scheduler.h
+  rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.h
+  rtl/relacy/schedulers/tsan_random_scheduler.h
+  rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.h
+  rtl/relacy/platforms/tsan_fiber_tls_copy_platform.h
+  rtl/relacy/platforms/tsan_fiber_tls_swap_platform.h
+  rtl/relacy/platforms/tsan_pthread_platform.h
+  rtl/relacy/tsan_shared_value.h
+  rtl/relacy/schedulers/tsan_random_generator.h
+  rtl/relacy/tsan_shared_memory.h
+  rtl/relacy/tsan_shared_vector.h
+  rtl/relacy/tsan_shared_value.h)
 
 set(TSAN_RUNTIME_LIBRARIES)
 add_compiler_rt_component(tsan)
Index: lib/tsan/go/buildgo.sh
===================================================================
--- lib/tsan/go/buildgo.sh
+++ lib/tsan/go/buildgo.sh
@@ -136,7 +136,7 @@
 	cat $F >> $DIR/gotsan.cc
 done
 
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../ -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
 if [ "$DEBUG" = "" ]; then
 	FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer"
 	if [ "$SUFFIX" = "linux_ppc64le" ]; then
Index: lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_copy_platform.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_copy_platform.h
@@ -0,0 +1,35 @@
+#ifndef TSAN_FIBER_TLS_COPY_PLATFORM_H
+#define TSAN_FIBER_TLS_COPY_PLATFORM_H
+
+#include "rtl/relacy/tsan_platform.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class FiberTlsCopyPlatform : public Platform {
+  public:
+   explicit FiberTlsCopyPlatform(ThreadsBox& threads_box);
+
+   ThreadContext* Create(void *th, void *attr, void (*callback)(), void *param) override;
+
+   void Initialize() override;
+
+   PlatformType GetType() override;
+
+   void Yield(ThreadContext *context) override;
+
+   void Start() override;
+
+  private:
+   static constexpr uptr FIBER_STACK_SIZE = 64 * 1024;
+   char *tls_base_;
+   uptr tls_size_;
+   uptr tls_addr_;
+   ThreadsBox& threads_box_;
+};
+
+}
+}
+
+#endif //TSAN_FIBER_TLS_COPY_PLATFORM_H
Index: lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_copy_platform.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_copy_platform.cc
@@ -0,0 +1,124 @@
+#include "tsan_fiber_tls_copy_platform.h"
+#include <ucontext.h>
+#include <rtl/tsan_rtl.h>
+#include <interception/interception.h>
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class FiberContext : public ThreadContext {
+  public:
+   explicit FiberContext(ucontext_t *fiber_context = nullptr, char *tls = nullptr, FiberContext *parent = nullptr, int tid = 0)
+           : ThreadContext(tid), ctx_(fiber_context), tls_(tls) {
+       SetParent(parent);
+   }
+
+   ucontext_t *GetFiberContext() {
+       return ctx_;
+   }
+
+   void SetFiberContext(ucontext_t *fiber_context) {
+       ctx_ = fiber_context;
+   }
+
+   char *GetTls() {
+       return tls_;
+   }
+
+   void SetTls(char *tls) {
+       tls_ = tls;
+   }
+
+  private:
+   ucontext_t *ctx_;
+   char *tls_;
+};
+
+FiberTlsCopyPlatform::FiberTlsCopyPlatform(ThreadsBox& threads_box)
+        : threads_box_(threads_box) {
+    uptr stk_addr = 0;
+    uptr stk_size = 0;
+    tls_addr_ = 0;
+    InitTlsSize();
+    GetThreadStackAndTls(true, &stk_addr, &stk_size, &tls_addr_, &tls_size_);
+
+    FiberContext *current_thread = static_cast<FiberContext *>(InternalCalloc(1, sizeof(FiberContext)));
+    new (current_thread) FiberContext{static_cast<ucontext_t *>(InternalCalloc(1, sizeof(ucontext_t))),
+                                     reinterpret_cast<char *>(InternalCalloc(tls_size_, 1)),
+                                     current_thread,
+                                     0};
+
+    ucontext_t &context = *current_thread->GetFiberContext();
+    context.uc_stack.ss_flags = 0;
+    context.uc_link = nullptr;
+
+    tls_base_ = static_cast<char *>(InternalCalloc(tls_size_, 1));
+
+    internal_memcpy(tls_base_, reinterpret_cast<const char *>(tls_addr_), tls_size_);
+    uptr offset = (uptr) cur_thread() - tls_addr_;
+    internal_memset(tls_base_ + offset, 0, sizeof(ThreadState));
+
+    threads_box_.AddRunning(current_thread);
+    threads_box_.SetCurrentThread(current_thread);
+}
+
+static void *empty_call(void *) {
+    return nullptr;
+}
+
+ThreadContext* FiberTlsCopyPlatform::Create(void *th, void *attr, void (*callback)(), void *param) {
+    (void) th;
+    (void) attr;
+
+    FiberContext *fiber_context = static_cast<FiberContext *>(InternalCalloc(1, sizeof(FiberContext)));
+    new(fiber_context) FiberContext{static_cast<ucontext_t *>(InternalCalloc(1, sizeof(ucontext_t)))};
+
+    if (getcontext(fiber_context->GetFiberContext()) == -1) {
+        Printf("FATAL: ThreadSanitizer getcontext error in the moment creating fiber\n");
+        Die();
+    }
+
+    ucontext_t &context = *fiber_context->GetFiberContext();
+    context.uc_stack.ss_sp = InternalCalloc(FIBER_STACK_SIZE, sizeof(char));
+    context.uc_stack.ss_size = FIBER_STACK_SIZE;
+    context.uc_stack.ss_flags = 0;
+    context.uc_link = static_cast<FiberContext*>(threads_box_.GetCurrentThread())->GetFiberContext();;
+    fiber_context->SetParent(threads_box_.GetCurrentThread());
+    fiber_context->SetTls(static_cast<char *>(InternalCalloc(tls_size_, 1)));
+    internal_memcpy(fiber_context->GetTls(), tls_base_, tls_size_);
+    makecontext(fiber_context->GetFiberContext(), callback, 1, param);
+    REAL(pthread_create)(th, attr, empty_call, param);
+    return fiber_context;
+}
+
+void FiberTlsCopyPlatform::Initialize() {
+
+}
+
+PlatformType FiberTlsCopyPlatform::GetType() {
+    return PlatformType::FIBER_TLS_COPY;
+}
+
+void FiberTlsCopyPlatform::Yield(ThreadContext *context) {
+    FiberContext *old_thread = static_cast<FiberContext *>(threads_box_.GetCurrentThread());
+    FiberContext *new_thread = static_cast<FiberContext *>(context);
+    threads_box_.SetCurrentThread(context);
+
+    internal_memcpy(old_thread->GetTls(), reinterpret_cast<const void *>(tls_addr_), tls_size_);
+    internal_memcpy(reinterpret_cast<void *>(tls_addr_), new_thread->GetTls(), tls_size_);
+
+    int res = swapcontext(old_thread->GetFiberContext(), new_thread->GetFiberContext());
+
+    if (res != 0) {
+        Printf("FATAL: ThreadSanitizer swapcontext error in the moment yield fiber\n");
+        Die();
+    }
+}
+
+void FiberTlsCopyPlatform::Start() {
+
+}
+
+}
+}
Index: lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_swap_platform.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_swap_platform.cc
@@ -0,0 +1,135 @@
+#include "tsan_fiber_tls_swap_platfrom.h"
+#include <ucontext.h>
+#include <rtl/tsan_rtl.h>
+#include <interception/interception.h>
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+static unsigned long get_tls_addr() {
+    unsigned long addr;
+    asm("mov %%fs:0, %0" : "=r"(addr));
+    return addr;
+}
+
+static void set_tls_addr(unsigned long addr) {
+    asm("mov %0, %%fs:0" : "+r"(addr));
+}
+
+class FiberContext : public ThreadContext {
+  public:
+   explicit FiberContext(ucontext_t *fiber_context = nullptr, char *tls = nullptr, FiberContext *parent = nullptr, int tid = 0)
+           : ThreadContext(tid), ctx_(fiber_context), tls_(tls) {
+       SetParent(parent);
+   }
+
+   ucontext_t *GetFiberContext() {
+       return ctx_;
+   }
+
+   void SetFiberContext(ucontext_t *fiber_context) {
+       ctx_ = fiber_context;
+   }
+
+   char *GetTls() {
+       return tls_;
+   }
+
+   void SetTls(char *tls) {
+       tls_ = tls;
+   }
+
+  private:
+   ucontext_t *ctx_;
+   char *tls_;
+};
+
+FiberTlsSwapPlatform::FiberTlsSwapPlatform(ThreadsBox &threads_box)
+        : threads_box_(threads_box) {
+    uptr stk_addr = 0;
+    uptr stk_size = 0;
+    uptr tls_addr = 0;
+    InitTlsSize();
+    GetThreadStackAndTls(true, &stk_addr, &stk_size, &tls_addr, &tls_size_);
+    tls_addr = get_tls_addr();
+
+    FiberContext *current_thread = static_cast<FiberContext *>(InternalCalloc(1, sizeof(FiberContext)));
+    new(current_thread) FiberContext{static_cast<ucontext_t *>(InternalCalloc(1, sizeof(ucontext_t))),
+                                     reinterpret_cast<char *>(tls_addr) - tls_size_,
+                                     current_thread,
+                                     0};
+
+    ucontext_t &context = *current_thread->GetFiberContext();
+    context.uc_stack.ss_flags = 0;
+    context.uc_link = nullptr;
+
+    tls_base_ = static_cast<char *>(InternalCalloc(tls_size_, 1));
+
+    internal_memcpy(tls_base_, reinterpret_cast<const char *>(tls_addr) - tls_size_, tls_size_);
+    uptr offset = (uptr) cur_thread() - (tls_addr - tls_size_);
+    internal_memset(tls_base_ + offset, 0, sizeof(ThreadState));
+
+    threads_box_.AddRunning(current_thread);
+    threads_box_.SetCurrentThread(current_thread);
+}
+
+static void *empty_call(void *) {
+    return nullptr;
+}
+
+ThreadContext *FiberTlsSwapPlatform::Create(void *th, void *attr, void (*callback)(), void *param) {
+    (void) th;
+    (void) attr;
+
+    FiberContext *fiber_context = static_cast<FiberContext *>(InternalCalloc(1, sizeof(FiberContext)));
+    new(fiber_context) FiberContext{static_cast<ucontext_t *>(InternalCalloc(1, sizeof(ucontext_t)))};
+
+    if (getcontext(fiber_context->GetFiberContext()) == -1) {
+        Printf("FATAL: ThreadSanitizer getcontext error in the moment creating fiber\n");
+        Die();
+    }
+
+    ucontext_t &context = *fiber_context->GetFiberContext();
+    context.uc_stack.ss_sp = InternalCalloc(FIBER_STACK_SIZE, sizeof(char));
+    context.uc_stack.ss_size = FIBER_STACK_SIZE;
+    context.uc_stack.ss_flags = 0;
+    context.uc_link = static_cast<FiberContext*>(threads_box_.GetCurrentThread())->GetFiberContext();;
+    fiber_context->SetParent(threads_box_.GetCurrentThread());
+    fiber_context->SetTls(static_cast<char *>(InternalCalloc(tls_size_, 1)));
+    internal_memcpy(fiber_context->GetTls(), tls_base_, tls_size_);
+    makecontext(fiber_context->GetFiberContext(), callback, 1, param);
+    REAL(pthread_create)(th, attr, empty_call, param);
+    return fiber_context;
+}
+
+void FiberTlsSwapPlatform::Initialize() {
+    uptr descr_addr = (uptr) static_cast<FiberContext *>(threads_box_.GetCurrentThread())->GetTls() + tls_size_;
+    set_tls_addr(descr_addr);
+}
+
+PlatformType FiberTlsSwapPlatform::GetType() {
+    return PlatformType::FIBER_TLS_SWAP;
+}
+
+void FiberTlsSwapPlatform::Yield(ThreadContext *context) {
+    FiberContext *old_thread = static_cast<FiberContext *>(threads_box_.GetCurrentThread());
+    FiberContext *new_thread = static_cast<FiberContext *>(context);
+    threads_box_.SetCurrentThread(context);
+
+    int res = swapcontext(old_thread->GetFiberContext(), new_thread->GetFiberContext());
+
+    Initialize();
+
+    if (res != 0) {
+        Printf("FATAL: ThreadSanitizer swapcontext error in the moment yield fiber\n");
+        Die();
+    }
+}
+
+void FiberTlsSwapPlatform::Start() {
+
+}
+
+}
+}
Index: lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_swap_platfrom.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_fiber_tls_swap_platfrom.h
@@ -0,0 +1,34 @@
+#ifndef TSAN_FIBER_TLS_SWAP_PLATFROM_H
+#define TSAN_FIBER_TLS_SWAP_PLATFROM_H
+
+#include "rtl/relacy/tsan_platform.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class FiberTlsSwapPlatform : public Platform {
+  public:
+   explicit FiberTlsSwapPlatform(ThreadsBox& threads_box);
+
+   ThreadContext* Create(void *th, void *attr, void (*callback)(), void *param) override;
+
+   void Initialize() override;
+
+   PlatformType GetType() override;
+
+   void Yield(ThreadContext *context) override;
+
+   void Start() override;
+
+  private:
+   static constexpr uptr FIBER_STACK_SIZE = 64 * 1024;
+   char *tls_base_;
+   uptr tls_size_;
+   ThreadsBox& threads_box_;
+};
+
+}
+}
+
+#endif //TSAN_FIBER_TLS_SWAP_PLATFROM_H
Index: lib/tsan/rtl/relacy/platforms/tsan_platform_type.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_platform_type.h
@@ -0,0 +1,18 @@
+#ifndef TSAN_PLATFORM_TYPE_H
+#define TSAN_PLATFORM_TYPE_H
+
+
+namespace __tsan {
+namespace __relacy {
+
+enum class PlatformType {
+   OS,
+   FIBER_TLS_COPY,
+   FIBER_TLS_SWAP,
+   PTHREAD
+};
+
+}
+}
+
+#endif //TSAN_PLATFORM_TYPE_H
Index: lib/tsan/rtl/relacy/platforms/tsan_pthread_platform.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_pthread_platform.h
@@ -0,0 +1,51 @@
+#ifndef TSAN_PTHREAD_PLATFORM_H
+#define TSAN_PTHREAD_PLATFORM_H
+
+#include "rtl/relacy/tsan_platform.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+
+class PthreadContext : public ThreadContext {
+  public:
+   PthreadContext() : m_wait(true) {
+
+   }
+
+   void SetWait(bool wait) {
+       m_wait = wait;
+   }
+
+   bool GetWait() {
+       return m_wait;
+   }
+
+  private:
+   bool m_wait{};
+};
+
+class PthreadPlatform : public Platform {
+  public:
+   PthreadPlatform(ThreadsBox& threads_box);
+
+   ThreadContext* Create(void *th, void *attr, void (*callback)(), void *param) override;
+
+   void Initialize() override;
+
+   PlatformType GetType() override;
+
+   void Yield(ThreadContext *context) override;
+
+   void Start() override;
+
+  private:
+   ThreadsBox& threads_box_;
+   ThreadContext* last_created_;
+};
+
+}
+}
+
+#endif //TSAN_PTHREAD_PLATFORM_H
Index: lib/tsan/rtl/relacy/platforms/tsan_pthread_platform.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/platforms/tsan_pthread_platform.cc
@@ -0,0 +1,106 @@
+#include <interception/interception.h>
+#include <rtl/tsan_rtl.h>
+#include <zconf.h>
+#include <ucontext.h>
+#include "tsan_pthread_platform.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+
+PthreadPlatform::PthreadPlatform(ThreadsBox& threads_box)
+        : threads_box_(threads_box) {
+    PthreadContext *fiber_context = static_cast<PthreadContext *>(InternalCalloc(1, sizeof(PthreadContext)));
+    new(fiber_context) PthreadContext{};
+    fiber_context->SetParent(threads_box_.GetCurrentThread());
+    fiber_context->SetWait(false);
+    threads_box_.AddRunning(fiber_context);
+    threads_box_.SetCurrentThread(fiber_context);
+}
+
+volatile int exclusion_create = 0;
+
+ThreadContext *PthreadPlatform::Create(void *th, void *attr, void (*callback)(), void *param) {
+    while (__sync_lock_test_and_set(&exclusion_create, 1)) {
+        Printf("FATAL: Double threads in critical section create %d \n", threads_box_.GetCurrentThread()->GetTid());
+        threads_box_.PrintDebugInfo();
+        Die();
+        // Do nothing. This GCC builtin instruction
+        // ensures memory barrier.
+    }
+
+    PthreadContext *fiber_context = static_cast<PthreadContext *>(InternalCalloc(1, sizeof(PthreadContext)));
+    new(fiber_context) PthreadContext{};
+    fiber_context->SetParent(threads_box_.GetCurrentThread());
+    last_created_ = fiber_context;
+    REAL(pthread_create)(th, attr, reinterpret_cast<void *(*)(void *)>(callback), param);
+    return fiber_context;
+}
+
+void PthreadPlatform::Initialize() {
+    PthreadContext* thread = static_cast<PthreadContext*>(last_created_);
+    while(thread->GetWait()) {
+        internal_sched_yield();
+    }
+
+    __sync_synchronize(); // Memory barrier.
+    exclusion_create = 0;
+}
+
+PlatformType PthreadPlatform::GetType() {
+    return PlatformType::PTHREAD;
+}
+
+volatile int exclusion = 0;
+
+void PthreadPlatform::Yield(ThreadContext *context) {
+
+
+    while (__sync_lock_test_and_set(&exclusion, 1)) {
+        //Printf("FATAL: Double threads in critical section %d \n", threads_box_.GetCurrentThread()->GetTid());
+        threads_box_.PrintDebugInfo();
+        //Die();
+        // Do nothing. This GCC builtin instruction
+        // ensures memory barrier.
+    }
+
+    if (context == nullptr) {
+        Printf("FATAL: ThreadSanitizer context is nullptr\n");
+        Die();
+    }
+
+    if (threads_box_.GetCurrentThread() == nullptr) {
+        Printf("FATAL: ThreadSanitizer current thread is nullptr\n");
+        Die();
+    }
+    PthreadContext *new_thread = static_cast<PthreadContext *>(context);
+    PthreadContext *old_thread = static_cast<PthreadContext *>(threads_box_.GetCurrentThread());
+    threads_box_.SetCurrentThread(context);
+    if (!threads_box_.ContainsStoppedByTid(old_thread->GetTid())) {
+        old_thread->SetWait(true);
+    } else {
+        if (old_thread->GetTid() == new_thread->GetTid()) {
+            Printf("FATAL: tids are equals\n");
+            threads_box_.PrintDebugInfo();
+            Die();
+        }
+    }
+    old_thread->SetWait(true);
+    new_thread->SetWait(false);
+
+    __sync_synchronize(); // Memory barrier.
+    exclusion = 0;
+
+    while(old_thread->GetWait()) {
+        internal_sched_yield();
+    }
+}
+
+
+
+void PthreadPlatform::Start() {
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_all_states_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_all_states_scheduler.h
@@ -0,0 +1,38 @@
+#ifndef TSAN_ALL_STATES_SCHEDULER_H
+#define TSAN_ALL_STATES_SCHEDULER_H
+
+#include "rtl/relacy/tsan_scheduler.h"
+#include "rtl/relacy/tsan_shared_vector.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class AllStatesScheduler : public Scheduler {
+  public:
+   explicit AllStatesScheduler(ThreadsBox& threads_box);
+
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+
+  private:
+   SharedVector<unsigned long> variants_;
+   SharedVector<unsigned long> used_;
+   ThreadsBox& threads_box_;
+   uptr depth_;
+   uptr iteration_;
+};
+
+}
+}
+
+#endif //TSAN_ALL_STATES_SCHEDULER_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_all_states_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_all_states_scheduler.cc
@@ -0,0 +1,83 @@
+#include <cstdlib>
+#include "tsan_all_states_scheduler.h"
+#include "tsan_scheduler_type.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+static int bsr(unsigned long number) {
+    if (number == 0) {
+        return -1;
+    }
+    long position = 0;
+    asm ("bsrq %1, %0" : "=r" (position) : "r" (number));
+    return static_cast<int>(position);
+}
+
+AllStatesScheduler::AllStatesScheduler(ThreadsBox& threads_box)
+    : variants_("variants")
+    , used_("used")
+    , threads_box_(threads_box)
+    , depth_(0)
+    , iteration_(0) {
+
+}
+
+ThreadContext* AllStatesScheduler::Yield() {
+    if (threads_box_.GetCountRunning() == 0) {
+        Printf("FATAL: ThreadSanitizer running threads is not exists\n");
+        Die();
+    }
+
+    if (variants_.Size() == depth_) {
+        variants_.PushBack(0);
+        used_.PushBack(0);
+    }
+
+    variants_[depth_] |= threads_box_.GetRunningBitSet();
+    int tid = bsr(~used_[depth_] & variants_[depth_]);
+    used_[depth_] |= 1UL << (unsigned long)tid;
+    ++depth_;
+    if (threads_box_.ContainsRunningByTid(tid)) {
+        return threads_box_.GetRunningByTid(tid);
+    }
+
+    return threads_box_.GetRunningByIndex(rand() % threads_box_.GetCountRunning());
+}
+
+void AllStatesScheduler::Start() {
+    srand(iteration_);
+}
+
+void AllStatesScheduler::Finish() {
+    iteration_++;
+    variants_.Revalidate();
+    used_.Revalidate();
+}
+
+bool AllStatesScheduler::IsEnd() {
+    if (iteration_ == 0) {
+        return false;
+    }
+
+    for (uptr i = 0; i < variants_.Size(); i++) {
+        if ((~used_[i] & variants_[i]) != 0) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+
+void AllStatesScheduler::Initialize() {
+
+}
+
+SchedulerType AllStatesScheduler::GetType() {
+    return SchedulerType::ALL_STATES;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_fixed_window_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_fixed_window_scheduler.h
@@ -0,0 +1,41 @@
+#ifndef TSAN_FIXED_WINDOW_SCHEDULER_H
+#define TSAN_FIXED_WINDOW_SCHEDULER_H
+
+#include "rtl/relacy/tsan_scheduler.h"
+#include "rtl/relacy/tsan_shared_vector.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class FixedWindowScheduler : public Scheduler {
+  public:
+   FixedWindowScheduler(ThreadsBox& threads_box, int window_size);
+
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+  private:
+   ThreadsBox& threads_box_;
+   SharedVector<int> window_paths_;
+   SharedVector<int> window_border_;
+   uptr offset_;
+   SharedValue<uptr> depth_;
+   SharedValue<int> invalidate_pos_;
+   int window_size_;
+   bool is_end_;
+   uptr iteration_;
+};
+
+}
+}
+
+#endif //TSAN_FIXED_WINDOW_SCHEDULER_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_fixed_window_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_fixed_window_scheduler.cc
@@ -0,0 +1,96 @@
+#include <cstdlib>
+#include "tsan_fixed_window_scheduler.h"
+#include "tsan_scheduler_type.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+FixedWindowScheduler::FixedWindowScheduler(ThreadsBox& threads_box, int window_size)
+        : threads_box_(threads_box)
+        , window_paths_("window_paths")
+        , window_border_("window_border")
+        , window_size_(window_size)
+        , is_end_(false)
+        , iteration_(0) {
+    invalidate_pos_ = -1;
+}
+
+ThreadContext *FixedWindowScheduler::Yield() {
+    if (offset_ <= depth_ && depth_ < offset_ + window_size_) {
+        if (window_border_.Size() == depth_ - offset_) {
+            window_border_.PushBack(static_cast<const int &>(threads_box_.GetCountRunning()));
+            window_paths_.PushBack(0);
+        }
+
+        int tid = window_paths_[depth_ - offset_];
+        if (!threads_box_.ContainsRunningByTid(tid)) {
+            if (invalidate_pos_ == -1) {
+                invalidate_pos_ = depth_ - offset_;
+            }
+            depth_++;
+            return threads_box_.GetRunningByIndex(rand() % threads_box_.GetCountRunning());
+        }
+
+        depth_++;
+        return threads_box_.GetRunningByTid(tid);
+    }
+    depth_++;
+    return threads_box_.GetRunningByIndex(rand() % threads_box_.GetCountRunning());
+}
+
+void FixedWindowScheduler::Start() {
+    srand(iteration_);
+    depth_ = 0;
+    invalidate_pos_ = -1;
+}
+
+void FixedWindowScheduler::Finish() {
+    window_paths_.Revalidate();
+    window_border_.Revalidate();
+    if (depth_ < offset_ + window_size_) {
+        is_end_ = true;
+    }
+
+    int p = 1;
+    if (window_paths_.Size() > 0) {
+        for (int i = static_cast<int>(invalidate_pos_ == -1 ? window_paths_.Size() - 1 : min(window_paths_.Size() - 1,
+                                                                                      (uptr) invalidate_pos_));
+             p != 0 && i >= 0; i--) {
+            p += window_paths_[i];
+            window_paths_[i] = p % ((unsigned int) window_border_[i] + 1);
+            p = p / ((unsigned int) window_border_[i] + 1);
+        }
+    } else {
+        p = 0;
+    }
+
+    if (invalidate_pos_ != -1) {
+        for (uptr i = invalidate_pos_ + 1; i < window_paths_.Size(); i++) {
+            window_paths_[i] = 0;
+        }
+    }
+
+    if (p != 0) {
+        window_border_.Resize(0);
+        window_paths_.Resize(0);
+        ++offset_;
+    }
+
+    ++iteration_;
+}
+
+bool FixedWindowScheduler::IsEnd() {
+    return is_end_;
+}
+
+void FixedWindowScheduler::Initialize() {
+
+}
+
+SchedulerType FixedWindowScheduler::GetType() {
+    return SchedulerType::FIXED_WINDOW;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_full_path_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_full_path_scheduler.h
@@ -0,0 +1,36 @@
+#ifndef TSAN_FULL_PATH_SCHEDULER_H
+#define TSAN_FULL_PATH_SCHEDULER_H
+
+#include "rtl/relacy/tsan_scheduler.h"
+#include "rtl/relacy/tsan_threads_box.h"
+#include "rtl/relacy/schedulers/tsan_generator_paths.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class FullPathScheduler : public Scheduler {
+  public:
+   explicit FullPathScheduler(ThreadsBox& threads_box);
+
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+
+  private:
+   ThreadsBox& threads_box_;
+   GeneratorPaths generator_paths_;
+   uptr iteration_;
+};
+
+}
+}
+
+#endif //TSAN_FULL_PATH_SCHEDULER_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_full_path_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_full_path_scheduler.cc
@@ -0,0 +1,52 @@
+#include <cstdlib>
+#include "tsan_full_path_scheduler.h"
+#include "tsan_scheduler_type.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+FullPathScheduler::FullPathScheduler(ThreadsBox& threads_box)
+        : threads_box_(threads_box)
+        , iteration_(0) {
+
+}
+
+ThreadContext* FullPathScheduler::Yield() {
+    if (threads_box_.GetCountRunning() == 0) {
+        Printf("FATAL: ThreadSanitizer running threads is not exists\n");
+        Die();
+    }
+
+    int tid = generator_paths_.Yield(static_cast<int>(threads_box_.GetCountRunning()));
+    if (!threads_box_.ContainsRunningByTid(tid)) {
+        generator_paths_.InvalidateThread();
+        return threads_box_.GetRunningByIndex(rand() % threads_box_.GetCountRunning());
+    }
+
+    return threads_box_.GetRunningByTid(tid);
+}
+
+void FullPathScheduler::Start() {
+    srand(iteration_);
+    generator_paths_.Start();
+}
+
+void FullPathScheduler::Finish() {
+    iteration_++;
+    generator_paths_.Finish();
+}
+
+bool FullPathScheduler::IsEnd() {
+    return generator_paths_.IsEnd();
+}
+
+void FullPathScheduler::Initialize() {
+}
+
+SchedulerType FullPathScheduler::GetType() {
+    return SchedulerType::FULL_PATH;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_generator_paths.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_generator_paths.h
@@ -0,0 +1,38 @@
+#ifndef TSAN_GENERATOR_PATHS_H
+#define TSAN_GENERATOR_PATHS_H
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan/rtl/relacy/tsan_shared_vector.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class GeneratorPaths {
+  public:
+   GeneratorPaths();
+
+   void Start();
+
+   int Yield(int max_tid);
+
+   void InvalidateThread();
+
+   bool IsEnd();
+
+   void Finish();
+
+  private:
+   void Next();
+
+  private:
+   SharedValue<int> invalidate_pos_;
+   SharedValue<uptr> depth_;
+   SharedValue<bool> is_end_;
+   SharedVector<int> paths_;
+   SharedVector<int> border_;
+};
+
+}
+}
+
+#endif //TSAN_GENERATOR_PATHS_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_generator_paths.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_generator_paths.cc
@@ -0,0 +1,76 @@
+#include "tsan_generator_paths.h"
+#include "rtl/tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __tsan {
+namespace __relacy {
+
+GeneratorPaths::GeneratorPaths() : paths_("paths"), border_("border") {
+
+}
+
+void GeneratorPaths::Start() {
+    invalidate_pos_ = -1;
+    depth_ = 0;
+}
+
+int GeneratorPaths::Yield(int max_tid) {
+    if (border_.Size() == depth_) {
+        border_.PushBack(max_tid);
+    }
+
+    if (paths_.Size() == depth_) {
+        paths_.PushBack(0);
+    }
+
+    border_[depth_] = max(border_[depth_], max_tid);
+    return paths_[depth_++];
+}
+
+void GeneratorPaths::InvalidateThread() {
+    if (invalidate_pos_ == -1) {
+        invalidate_pos_ = depth_ - 1;
+    }
+}
+
+bool GeneratorPaths::IsEnd() {
+    return is_end_;
+}
+
+void GeneratorPaths::Finish() {
+    paths_.Revalidate();
+    border_.Revalidate();
+
+    paths_.Resize(depth_);
+    border_.Resize(depth_);
+
+    Next();
+}
+
+void GeneratorPaths::Next() {
+    int p = 1;
+    if (paths_.Size() > 0) {
+        for (int i = static_cast<int>(invalidate_pos_ == -1 ? paths_.Size() - 1 : min(paths_.Size() - 1,
+                                                                                      (uptr) invalidate_pos_));
+             p != 0 && i >= 0; i--) {
+            p += paths_[i];
+            paths_[i] = p % ((unsigned int) border_[i] + 1);
+            p = p / ((unsigned int) border_[i] + 1);
+        }
+    } else {
+        p = 0;
+    }
+
+    if (invalidate_pos_ != -1) {
+        for (uptr i = invalidate_pos_ + 1; i < paths_.Size(); i++) {
+            paths_[i] = 0;
+        }
+    }
+
+    if (p != 0) {
+        is_end_ = true;
+    }
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.h
@@ -0,0 +1,28 @@
+#ifndef TSAN_PARALLEL_FULL_PATH_SCHEDULER_H
+#define TSAN_PARALLEL_FULL_PATH_SCHEDULER_H
+
+
+#include "rtl/relacy/tsan_scheduler.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class ParallelFullPathScheduler : public Scheduler {
+  public:
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+};
+
+}
+}
+
+#endif //TSAN_PARALLEL_FULL_PATH_SCHEDULER_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.cc
@@ -0,0 +1,32 @@
+#include "tsan_parallel_full_path_scheduler.h"
+#include "tsan_scheduler_type.h"
+
+namespace __tsan {
+namespace __relacy {
+
+ThreadContext* ParallelFullPathScheduler::Yield() {
+    return nullptr;
+}
+
+void ParallelFullPathScheduler::Start() {
+
+}
+
+void ParallelFullPathScheduler::Finish() {
+
+}
+
+bool ParallelFullPathScheduler::IsEnd() {
+    return false;
+}
+
+void ParallelFullPathScheduler::Initialize() {
+
+}
+
+SchedulerType ParallelFullPathScheduler::GetType() {
+    return SchedulerType::PARALLEL_FULL_PATH;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_generator.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_generator.h
@@ -0,0 +1,30 @@
+#ifndef TSAN_RANDOM_GENERATOR_H
+#define TSAN_RANDOM_GENERATOR_H
+
+#include <random>
+#include <functional>
+#include <vector>
+#include "rtl/relacy/tsan_shared_value.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class RandomGenerator {
+  public:
+   RandomGenerator();
+
+   int Rand(int b);
+
+   void NextGenerator();
+
+  private:
+   SharedValue<int> count_calls_;
+   int generator_;
+   std::random_device rd;
+   std::vector<std::function<int(int)>> generators_;
+};
+
+}
+}
+
+#endif //TSAN_RANDOM_GENERATOR_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_generator.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_generator.cc
@@ -0,0 +1,76 @@
+#include "rtl/relacy/schedulers/tsan_random_generator.h"
+
+namespace __tsan {
+namespace __relacy {
+
+RandomGenerator::RandomGenerator() {
+    generator_ = 0;
+    count_calls_ = 0;
+    generators_ = {
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::uniform_int_distribution<> dis(0, b);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::binomial_distribution<> dis(b, 0.5);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::negative_binomial_distribution<> dis(b, 0.75);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::geometric_distribution<> dis;
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                return abs(static_cast<int>(rd()) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::poisson_distribution<> dis(4);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::exponential_distribution<> dis(1);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::weibull_distribution<> dis;
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::normal_distribution<> dis(b, 2);
+                return abs(static_cast<int>(dis(gen)) % b);
+            },
+            [&](int b) {
+                static std::mt19937 gen(rd());
+                std::lognormal_distribution<> dis(b, 0.25);
+                return abs(static_cast<int>(dis(gen)) % b);
+            }
+    };
+
+}
+
+int RandomGenerator::Rand(int b) {
+    ++count_calls_;
+    return generators_[generator_ % generators_.size()](b);
+}
+
+void RandomGenerator::NextGenerator() {
+    for (int i = 0; i < count_calls_; i++) {
+        generators_[generator_ % generators_.size()](1);
+    }
+    count_calls_ = 0;
+    ++generator_;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_scheduler.h
@@ -0,0 +1,34 @@
+#ifndef TSAN_RANDOM_SCHEDULER_H
+#define TSAN_RANDOM_SCHEDULER_H
+
+#include "rtl/relacy/tsan_scheduler.h"
+#include "rtl/relacy/tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class RandomScheduler : public Scheduler {
+  public:
+   RandomScheduler(ThreadsBox& threads_box);
+
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+
+  private:
+   ThreadsBox& threads_box_;
+   uptr iteration_;
+};
+
+}
+}
+
+#endif //TSAN_RANDOM_SCHEDULER_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_scheduler.cc
@@ -0,0 +1,44 @@
+#include <cstdlib>
+#include "tsan_random_scheduler.h"
+#include "tsan_scheduler_type.h"
+
+namespace __tsan {
+namespace __relacy {
+
+RandomScheduler::RandomScheduler(ThreadsBox& threads_box)
+        : threads_box_(threads_box)
+        , iteration_(0) {
+
+}
+
+ThreadContext* RandomScheduler::Yield() {
+    if (threads_box_.GetCountRunning() == 0) {
+        Printf("FATAL: ThreadSanitizer random scheduler running threads is not exists\n");
+        Die();
+    }
+
+    return threads_box_.GetRunningByIndex(rand() % threads_box_.GetCountRunning());
+}
+
+void RandomScheduler::Start() {
+    srand(iteration_);
+}
+
+void RandomScheduler::Finish() {
+    iteration_++;
+}
+
+bool RandomScheduler::IsEnd() {
+    return false;
+}
+
+void RandomScheduler::Initialize() {
+
+}
+
+SchedulerType RandomScheduler::GetType() {
+    return SchedulerType::RANDOM;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.h
@@ -0,0 +1,35 @@
+#ifndef TSAN_RANDOM_SCHEDULER_WITH_DIFFERENT_DISTRIBUTIONS_H
+#define TSAN_RANDOM_SCHEDULER_WITH_DIFFERENT_DISTRIBUTIONS_H
+
+#include "rtl/relacy/tsan_scheduler.h"
+#include "rtl/relacy/tsan_threads_box.h"
+#include "rtl/relacy/schedulers/tsan_random_generator.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class RandomWithDifferentDistributionsScheduler : public Scheduler {
+  public:
+   RandomWithDifferentDistributionsScheduler(ThreadsBox& threads_box);
+
+   ThreadContext* Yield() override;
+
+   void Start() override;
+
+   void Finish() override;
+
+   bool IsEnd() override;
+
+   void Initialize() override;
+
+   SchedulerType GetType() override;
+
+  private:
+   ThreadsBox& threads_box_;
+   RandomGenerator generator_;
+};
+
+}
+}
+
+#endif //TSAN_RANDOM_SCHEDULER_WITH_DIFFERENT_DISTRIBUTIONS_H
Index: lib/tsan/rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.cc
@@ -0,0 +1,42 @@
+#include "tsan_random_with_different_distributions_scheduler.h"
+#include "tsan_scheduler_type.h"
+
+namespace __tsan {
+namespace __relacy {
+
+RandomWithDifferentDistributionsScheduler::RandomWithDifferentDistributionsScheduler(ThreadsBox& thread_box)
+        : threads_box_(thread_box) {
+
+}
+
+ThreadContext* RandomWithDifferentDistributionsScheduler::Yield() {
+    if (threads_box_.GetCountRunning() == 0) {
+        Printf("FATAL: ThreadSanitizer running threads is not exists\n");
+        Die();
+    }
+
+    return threads_box_.GetRunningByIndex(static_cast<uptr>(generator_.Rand(static_cast<int>(threads_box_.GetCountRunning()))));
+}
+
+void RandomWithDifferentDistributionsScheduler::Start() {
+
+}
+
+void RandomWithDifferentDistributionsScheduler::Finish() {
+    generator_.NextGenerator();
+}
+
+bool RandomWithDifferentDistributionsScheduler::IsEnd() {
+    return false;
+}
+
+void RandomWithDifferentDistributionsScheduler::Initialize() {
+
+}
+
+SchedulerType RandomWithDifferentDistributionsScheduler::GetType() {
+    return SchedulerType::RANDOM_WITH_DIFFERENT_DISTRIBUTIONS;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/schedulers/tsan_scheduler_type.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/schedulers/tsan_scheduler_type.h
@@ -0,0 +1,21 @@
+#ifndef TSAN_SCHEDULER_TYPE_H
+#define TSAN_SCHEDULER_TYPE_H
+
+
+namespace __tsan {
+namespace __relacy {
+
+enum class SchedulerType {
+   OS,
+   ALL_STATES,
+   FIXED_WINDOW,
+   FULL_PATH,
+   PARALLEL_FULL_PATH,
+   RANDOM,
+   RANDOM_WITH_DIFFERENT_DISTRIBUTIONS
+};
+
+}
+}
+
+#endif //TSAN_SCHEDULER_TYPE_H
Index: lib/tsan/rtl/relacy/tsan_platform.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_platform.h
@@ -0,0 +1,27 @@
+#ifndef TSAN_RELACY_PLATFORM_H
+#define TSAN_RELACY_PLATFORM_H
+
+#include "tsan_thread_context.h"
+#include "platforms/tsan_platform_type.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class Platform {
+  public:
+   virtual ThreadContext* Create(void *th, void *attr, void (*callback)(), void *param) = 0;
+
+   virtual void Initialize() = 0;
+
+   virtual PlatformType GetType() = 0;
+
+   virtual void Yield(ThreadContext *context) = 0;
+
+   virtual void Start() = 0;
+
+   virtual ~Platform() = default;
+};
+
+}
+}
+#endif //TSAN_RELACY_PLATFORM_H
Index: lib/tsan/rtl/relacy/tsan_scheduler.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_scheduler.h
@@ -0,0 +1,29 @@
+#ifndef TSAN_SCHEDULER_H
+#define TSAN_SCHEDULER_H
+
+#include "schedulers/tsan_scheduler_type.h"
+#include "tsan_thread_context.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class Scheduler {
+  public:
+   virtual ThreadContext* Yield() = 0;
+
+   virtual void Start() = 0;
+
+   virtual void Finish() = 0;
+
+   virtual void Initialize() = 0;
+
+   virtual bool IsEnd() = 0;
+
+   virtual SchedulerType GetType() = 0;
+
+   virtual ~Scheduler() = default;
+};
+
+}
+}
+#endif //TSAN_SCHEDULER_H
Index: lib/tsan/rtl/relacy/tsan_scheduler_engine.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_scheduler_engine.h
@@ -0,0 +1,69 @@
+#ifndef TSAN_FIBER_H
+#define TSAN_FIBER_H
+
+/*
+ * Only Linux support
+ */
+
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan/rtl/relacy/schedulers/tsan_generator_paths.h"
+#include "tsan_threads_box.h"
+#include "tsan_scheduler.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class SchedulerEngine {
+  public:
+   SchedulerEngine();
+
+   ThreadContext *CreateFiber(void *th, void *attr, void (*callback)(), void *param);
+
+   void Yield(ThreadContext *context);
+
+   void Yield();
+
+   void AddFiberContext(int tid, ThreadContext *context);
+
+   void Join(int wait_tid);
+
+   void StopThread();
+
+   int Lock(void *m);
+
+   int Unlock(void *m);
+
+   void Initialize();
+
+   ThreadContext* GetParent();
+
+   SchedulerType GetSchedulerType();
+
+   PlatformType GetPlatformType();
+
+   int CondWait(void *c, void *m);
+
+   int Signal(void *c);
+
+   int Broadcast(void *c);
+
+  private:
+   void Start();
+
+  private:
+   ThreadsBox threads_box_;
+   Scheduler *scheduler_;
+   Platform *platform_;
+};
+
+
+}
+}
+
+#if SANITIZER_RELACY_SCHEDULER
+extern ::__tsan::__relacy::SchedulerEngine _scheduler_engine;
+#endif
+
+
+#endif // TSAN_FIBER_H
Index: lib/tsan/rtl/relacy/tsan_scheduler_engine.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_scheduler_engine.cc
@@ -0,0 +1,374 @@
+#include <interception/interception.h>
+#include <rtl/tsan_interceptors.h>
+#include <ucontext.h>
+#include <cstring>
+#include <cstdlib>
+#include <ctime>
+#include <rtl/relacy/tsan_scheduler_engine.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <sys/mman.h>
+#include "rtl/relacy/schedulers/tsan_random_generator.h"
+#include "rtl/tsan_rtl.h"
+#include <linux/unistd.h>
+#include <asm/ldt.h>
+#include <sys/syscall.h>
+#include "rtl/relacy/platforms/tsan_pthread_platform.h"
+
+namespace __interception {
+extern int (*real_pthread_mutex_unlock)(void*);
+extern int (*real_pthread_mutex_lock)(void*);
+extern int (*real_pthread_mutex_trylock)(void*);
+}
+
+//schedulers
+#include "rtl/relacy/schedulers/tsan_all_states_scheduler.h"
+#include "rtl/relacy/schedulers/tsan_fixed_window_scheduler.h"
+#include "rtl/relacy/schedulers/tsan_full_path_scheduler.h"
+#include "rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.h"
+#include "rtl/relacy/schedulers/tsan_random_scheduler.h"
+#include "rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.h"
+
+//platforms
+#include "rtl/relacy/platforms/tsan_fiber_tls_swap_platfrom.h"
+#include "rtl/relacy/platforms/tsan_fiber_tls_copy_platform.h"
+#include "rtl/relacy/platforms/tsan_pthread_platform.h"
+
+namespace __tsan {
+namespace __relacy {
+
+SchedulerEngine::SchedulerEngine() {
+    if (!strcmp(flags()->scheduler_type, "")) {
+        scheduler_ = nullptr;
+    } else if (!strcmp(flags()->scheduler_type, "random")) {
+        scheduler_ = static_cast<RandomScheduler *>(InternalCalloc(1, sizeof(RandomScheduler)));
+        new(scheduler_) RandomScheduler{threads_box_};
+    } else if (!strcmp(flags()->scheduler_type, "all_states")) {
+        scheduler_ = static_cast<AllStatesScheduler *>(InternalCalloc(1, sizeof(AllStatesScheduler)));
+        new(scheduler_) AllStatesScheduler{threads_box_};
+    } else if (!strcmp(flags()->scheduler_type, "full_path")) {
+        scheduler_ = static_cast<FullPathScheduler *>(InternalCalloc(1, sizeof(FullPathScheduler)));
+        new(scheduler_) FullPathScheduler{threads_box_};
+    } else if (!strcmp(flags()->scheduler_type, "parallel_full_path")) {
+        scheduler_ = static_cast<ParallelFullPathScheduler *>(InternalCalloc(1, sizeof(ParallelFullPathScheduler)));
+        new(scheduler_) ParallelFullPathScheduler{};
+    } else if (!strcmp(flags()->scheduler_type, "random_with_different_distributions")) {
+        scheduler_ = static_cast<RandomWithDifferentDistributionsScheduler *>(InternalCalloc(1,
+                                                                                             sizeof(RandomWithDifferentDistributionsScheduler)));
+        new(scheduler_) RandomWithDifferentDistributionsScheduler{threads_box_};
+    } else if (!strcmp(flags()->scheduler_type, "fixed_window")) {
+        scheduler_ = static_cast<FixedWindowScheduler *>(InternalCalloc(1, sizeof(FixedWindowScheduler)));
+        new(scheduler_) FixedWindowScheduler{threads_box_, 3};
+    } else {
+        Printf("FATAL: ThreadSanitizer invalid scheduler type. Please check TSAN_OPTIONS!\n");
+        Die();
+    }
+
+    if (!strcmp(flags()->scheduler_platform, "")) {
+        platform_ = nullptr;
+    } else if (!strcmp(flags()->scheduler_platform, "fiber_tls_swap")) {
+        platform_ = static_cast<FiberTlsSwapPlatform *>(InternalCalloc(1, sizeof(FiberTlsSwapPlatform)));
+        new(platform_) FiberTlsSwapPlatform{threads_box_};
+    } else if (!strcmp(flags()->scheduler_platform, "fiber_tls_copy")) {
+        platform_ = static_cast<FiberTlsCopyPlatform *>(InternalCalloc(1, sizeof(FiberTlsCopyPlatform)));
+        new(platform_) FiberTlsCopyPlatform{threads_box_};
+    } else if (!strcmp(flags()->scheduler_platform, "pthread")) {
+        platform_ = static_cast<PthreadPlatform *>(InternalCalloc(1, sizeof(PthreadPlatform)));
+        new(platform_) PthreadPlatform{threads_box_};
+    } else {
+        Printf("FATAL: ThreadSanitizer invalid platform type. Please check TSAN_OPTIONS!\n");
+        Die();
+    }
+
+    if (scheduler_ == nullptr || platform_ == nullptr) {
+        if (scheduler_ != nullptr || platform_ != nullptr) {
+            Printf("FATAL: ThreadSanitizer platform + scheduler invalid combination\n");
+            Die();
+        }
+
+        return;
+    }
+
+    Printf("Platform %s Type %s\n", flags()->scheduler_platform, flags()->scheduler_type);
+
+    Start();
+}
+
+ThreadContext *SchedulerEngine::CreateFiber(void *th, void *attr, void (*callback)(), void *param) {
+    return GetPlatformType() == PlatformType::OS ? nullptr : platform_->Create(th, attr, callback, param);
+}
+
+void SchedulerEngine::Yield(ThreadContext *context) {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+    //Printf("yield %d %d\n", context->GetTid(), threads_box_.GetCountRunning());
+    platform_->Yield(context);
+}
+
+void SchedulerEngine::AddFiberContext(int tid, ThreadContext *context) {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+    context->SetTid(tid);
+    threads_box_.ExtractStoppedByTid(tid);
+    threads_box_.AddRunning(context);
+}
+
+void SchedulerEngine::Yield() {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+    Yield(scheduler_->Yield());
+}
+
+void SchedulerEngine::Join(int wait_tid) {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+
+    if (threads_box_.ContainsStoppedByTid(wait_tid)) {
+        return;
+    }
+
+    if (threads_box_.GetCountRunning() == 0) {
+        Printf("FATAL: ThreadSanitizer joining last thread\n");
+        Die();
+    }
+
+    ThreadContext *context = threads_box_.ExtractRunningByTid(threads_box_.GetCurrentThread()->GetTid());
+
+    if (context == nullptr) {
+        Printf("FATAL: ThreadSanitizer is not existing thread\n");
+        Die();
+    }
+
+    ThreadContext *wait_context = threads_box_.GetRunningByTid(wait_tid);
+
+    if (wait_context == nullptr) {
+        wait_context = threads_box_.GetConditionVariableThreadByTid(wait_tid);
+    }
+    if (wait_context == nullptr) {
+        if (threads_box_.ContainsWaitingByTid(wait_tid)) {
+            wait_context = threads_box_.GetWaitingByTid(wait_tid).GetCurrentThread();
+        }
+    }
+
+    if (wait_context == nullptr) {
+        wait_context = threads_box_.GetSlepingByTid(wait_tid);
+    }
+
+    if (wait_context == nullptr) {
+        wait_context = threads_box_.GetStoppedByTid(wait_tid);
+    }
+
+    if (wait_context == nullptr) {
+        if (threads_box_.ContainsJoiningByTid(wait_tid)) {
+            wait_context = threads_box_.GetJoiningByTid(wait_tid).GetCurrentThread();
+        }
+    }
+
+    threads_box_.AddJoining(JoinContext{context, wait_context});
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+}
+
+void SchedulerEngine::StopThread() {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+
+    threads_box_.AddStopped(threads_box_.ExtractRunningByTid(threads_box_.GetCurrentThread()->GetTid()));
+    threads_box_.WakeupJoiningByWaitTid(threads_box_.GetCurrentThread()->GetTid());
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+}
+
+
+void SchedulerEngine::Start() {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+    scheduler_->Initialize();
+    while (!scheduler_->IsEnd()) {
+        pid_t pid = fork();
+        //pid_t pid = 0;
+        if (pid < 0) {
+            Printf("FATAL: ThreadSanitizer fork error\n");
+            Die();
+        }
+        if (pid != 0) {
+            scheduler_->Start();
+            int status;
+            if (waitpid(pid, &status, WUNTRACED | WCONTINUED) == -1) {
+                Printf("FATAL: ThreadSanitizer waitpid error\n");
+                Die();
+            }
+            if (WEXITSTATUS(status) != 0) {
+                Printf("FATAL: ThreadSanitizer invalid status code\n");
+                Die();
+            }
+            scheduler_->Finish();
+        } else {
+            break;
+        }
+    }
+    platform_->Start();
+    if (scheduler_->IsEnd()) {
+        scheduler_->Start();
+    }
+}
+
+int SchedulerEngine::Lock(void *m) {
+    if (GetPlatformType() == PlatformType::OS) {
+        return REAL(pthread_mutex_lock)(m);
+    }
+
+    int current_tid = threads_box_.GetCurrentThread()->GetTid();
+
+    while (threads_box_.ExistsMutex(m)) {
+        if (!threads_box_.ContainsWaitingByTid(current_tid)) {
+            threads_box_.AddWaiting(MutexContext{threads_box_.ExtractRunningByTid(current_tid), m});
+        }
+        Yield();
+    }
+
+    threads_box_.AddMutex(m);
+
+    Yield();
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+
+    return 0;
+}
+
+int SchedulerEngine::Unlock(void *m) {
+    if (GetPlatformType() == PlatformType::OS) {
+        return REAL(pthread_mutex_unlock)(m);
+    }
+
+    threads_box_.ExtractMutex(m);
+
+    ThreadContext* wakeup = threads_box_.ExtractWaitByMutex(m);
+    if (wakeup != nullptr) {
+        threads_box_.AddRunning(wakeup);
+    }
+
+    Yield();
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+
+    return 0;
+}
+
+int SchedulerEngine::CondWait(void *c, void *m) {
+
+    threads_box_.ExtractMutex(m);
+
+    ThreadContext* wakeup = threads_box_.ExtractWaitByMutex(m);
+    if (wakeup != nullptr) {
+        threads_box_.AddRunning(wakeup);
+    }
+
+    int current_tid = threads_box_.GetCurrentThread()->GetTid();
+
+    threads_box_.AddConditionVariable(c, threads_box_.ExtractRunningByTid(current_tid));
+    Yield();
+
+    Lock(m);
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+    return 0;
+}
+
+int SchedulerEngine::Signal(void *c) {
+    if (ThreadContext* ctx = threads_box_.ExtractWaitByConditionVariable(c)) {
+        threads_box_.AddRunning(ctx);
+    }
+    Yield();
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+    return 0;
+}
+
+int SchedulerEngine::Broadcast(void *c) {
+    while (ThreadContext* ctx = threads_box_.ExtractWaitByConditionVariable(c)) {
+        threads_box_.AddRunning(ctx);
+    }
+    Yield();
+
+
+    if (PthreadContext* ctx = static_cast<PthreadContext *>(threads_box_.GetCurrentThread())) {
+        if (ctx->GetWait()) {
+            Printf("ThreadSanitizer: thread was stopped\n");
+            Die();
+        }
+    }
+    return 0;
+}
+
+ThreadContext *SchedulerEngine::GetParent() {
+    return GetSchedulerType() == SchedulerType::OS ? nullptr : threads_box_.GetCurrentThread()->GetParent();
+}
+
+SchedulerType SchedulerEngine::GetSchedulerType() {
+    return scheduler_ ? scheduler_->GetType() : SchedulerType::OS;
+}
+
+PlatformType SchedulerEngine::GetPlatformType() {
+    return platform_ ? platform_->GetType() : PlatformType::OS;
+}
+
+void SchedulerEngine::Initialize() {
+    if (GetPlatformType() == PlatformType::OS) {
+        return;
+    }
+
+    platform_->Initialize();
+}
+
+
+}
+
+
+}
+
+#if SANITIZER_RELACY_SCHEDULER
+::__tsan::__relacy::SchedulerEngine _scheduler_engine;
+#endif
Index: lib/tsan/rtl/relacy/tsan_shared_memory.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_shared_memory.h
@@ -0,0 +1,23 @@
+#ifndef TSAN_SHARED_MEMORY_H
+#define TSAN_SHARED_MEMORY_H
+
+namespace __tsan {
+namespace __relacy {
+
+void *CreateSharedMemory(unsigned int size, int fd = 0);
+
+void *CreateSharedMemory(unsigned int size, int fd, int visibility);
+
+void FreeSharedMemory(void *value, unsigned int size);
+
+int SharedMemoryOpen(const char* name);
+
+void SharedMemoryClose(int fd, const char* name);
+
+int Truncate(int fd, unsigned int size);
+
+}
+}
+
+
+#endif //TSAN_SHARED_MEMORY_H
Index: lib/tsan/rtl/relacy/tsan_shared_memory.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_shared_memory.cc
@@ -0,0 +1,39 @@
+#include "rtl/relacy/tsan_shared_memory.h"
+#include <sys/mman.h>
+#include <zconf.h>
+#include <fcntl.h>
+#include "rtl/tsan_rtl.h"
+
+namespace __tsan {
+namespace __relacy {
+
+void *CreateSharedMemory(unsigned int size, int fd) {
+    constexpr int protection = PROT_READ | PROT_WRITE;
+    constexpr int visibility = MAP_ANONYMOUS | MAP_SHARED;
+    return mmap(nullptr, size, protection, visibility, fd, 0);
+}
+
+void *CreateSharedMemory(unsigned int size, int fd, int visibility) {
+    constexpr int protection = PROT_READ | PROT_WRITE;
+    return mmap(nullptr, size, protection, MAP_SHARED | visibility, fd, 0);
+}
+
+void FreeSharedMemory(void *value, unsigned int size) {
+    munmap(value, size);
+}
+
+int SharedMemoryOpen(const char* name) {
+    return shm_open(name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
+}
+
+void SharedMemoryClose(int fd, const char* name) {
+    close(fd);
+    shm_unlink(name);
+}
+
+int Truncate(int fd, unsigned int size) {
+    return ftruncate(fd, size);
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/tsan_shared_value.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_shared_value.h
@@ -0,0 +1,92 @@
+#ifndef TSAN_SHARED_VALUE_H
+#define TSAN_SHARED_VALUE_H
+
+#include "tsan_shared_memory.h"
+
+namespace __tsan {
+namespace __relacy {
+
+template<typename T>
+class SharedValue {
+  public:
+   SharedValue()
+           : value_(CreateSharedMemory(sizeof(T))) {
+       new (value_) T{};
+   }
+
+   explicit SharedValue(const T& value)
+           : value_(CreateSharedMemory(sizeof(T))) {
+       new (value_) T{};
+       *static_cast<T*>(value_) = value;
+   }
+
+   SharedValue(const SharedValue& other) : value_(CreateSharedMemory(sizeof(T))) {
+       new (value_) T{};
+       *static_cast<T*>(value_) = *static_cast<T*>(other.value_);
+   }
+
+   SharedValue(SharedValue&& other) {
+       value_ = other.value_;
+       other.value_ = nullptr;
+   }
+
+   SharedValue& operator=(SharedValue other) {
+       void* tmp = value_;
+       value_ = other.value_;
+       other.value_ = tmp;
+       return *this;
+   }
+
+   SharedValue& operator=(SharedValue&& other) {
+       ~SharedValue();
+       value_ = other.value_;
+   }
+
+   SharedValue& operator=(const T& value) {
+       *static_cast<T*>(value_) = value;
+       return *this;
+   }
+
+   operator T&() {
+       return *static_cast<T*>(value_);
+   }
+
+   operator const T&() const {
+       return *static_cast<T*>(value_);
+   }
+
+   T& operator ++() {
+       ++static_cast<T&>(*this);
+       return *this;
+   }
+
+   T operator ++(int) {
+       SharedValue old(*this);
+       ++static_cast<T&>(*this);
+       return old;
+   }
+
+   T& operator --() {
+       ++static_cast<T&>(*this);
+       return *this;
+   }
+
+   T operator --(int) {
+       SharedValue old(value_);
+       ++static_cast<T&>(*this);
+       return old;
+   }
+
+   ~SharedValue() {
+       //FreeSharedMemory(value_, sizeof(T));
+       *this = T {};
+   }
+
+  private:
+   void *value_;
+};
+
+}
+}
+
+#endif //TSAN_SHARED_VALUE_H
Index: lib/tsan/rtl/relacy/tsan_shared_vector.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_shared_vector.h
@@ -0,0 +1,139 @@
+#ifndef TSAN_SHARED_VECTOR_H
+#define TSAN_SHARED_VECTOR_H
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_shared_value.h"
+#include "tsan_shared_memory.h"
+#include "tsan/rtl/tsan_rtl.h"
+
+namespace __tsan {
+namespace __relacy {
+
+template<typename T>
+class SharedVector {
+  public:
+   explicit SharedVector(const char* name)
+           : begin_()
+           , end_()
+           , last_()
+           , fd_(SharedMemoryOpen(name)) {
+   }
+
+   ~SharedVector() {
+       //if (begin_)
+       //    InternalFree(begin_, (end_ - begin_) * sizeof(T));
+       //SharedMemoryClose(fd_, "Physical");
+   }
+
+   void Reset() {
+       if (begin_)
+           InternalFree(begin_, (end_ - begin_) * sizeof(T));
+       begin_ = 0;
+       end_ = 0;
+       last_ = 0;
+   }
+
+   uptr Size() const {
+       return end_ - begin_;
+   }
+
+   T &operator[](uptr i) {
+       DCHECK_LT(i, end_ - begin_);
+       return begin_[i];
+   }
+
+   const T &operator[](uptr i) const {
+       DCHECK_LT(i, end_ - begin_);
+       return begin_[i];
+   }
+
+   T *PushBack() {
+       EnsureSize(Size() + 1);
+       T *p = &end_[-1];
+       internal_memset(p, 0, sizeof(*p));
+       return p;
+   }
+
+   T *PushBack(const T& v) {
+       EnsureSize(Size() + 1);
+       T *p = &end_[-1];
+       internal_memcpy(p, &v, sizeof(*p));
+       return p;
+   }
+
+   void PopBack() {
+       DCHECK_GT(end_, begin_);
+       end_--;
+   }
+
+   void Resize(uptr size) {
+       if (size == 0) {
+           end_ = begin_;
+           return;
+       }
+       uptr old_size = Size();
+       if (size <= old_size) {
+           end_ = begin_ + size;
+           return;
+       }
+       EnsureSize(size);
+       if (old_size < size) {
+           for (uptr i = old_size; i < size; i++)
+               internal_memset(&begin_[i], 0, sizeof(begin_[i]));
+       }
+   }
+
+   void Revalidate() {
+       int size = end_ - begin_;
+       int capacity = last_ - begin_;
+       begin_ = (T*)InternalAlloc(capacity * sizeof(T));
+       end_ = begin_ + size;
+       last_ = begin_ + capacity;
+   }
+
+  private:
+   SharedValue<T*> begin_;
+   SharedValue<T*> end_;
+   SharedValue<T*> last_;
+   int fd_;
+
+   void *InternalAlloc(unsigned int size) {
+       Truncate(fd_, size);
+       return CreateSharedMemory(size, fd_, 0);
+   }
+
+   void InternalFree(void* value, unsigned int size) {
+       FreeSharedMemory(value, size);
+   }
+
+   void EnsureSize(uptr size) {
+       if (size <= Size())
+           return;
+       if (size <= (uptr)(last_ - begin_)) {
+           end_ = begin_ + size;
+           return;
+       }
+       uptr cap0 = last_ - begin_;
+       uptr cap = cap0 * 5 / 4;  // 25% growth
+       if (cap == 0)
+           cap = 16;
+       if (cap < size)
+           cap = size;
+       T *p = (T*)InternalAlloc(cap * sizeof(T));
+       if (cap0) {
+           internal_memcpy(p, begin_, cap0 * sizeof(T));
+           InternalFree(begin_, cap0 * sizeof(T));
+       }
+       begin_ = p;
+       end_ = begin_ + size;
+       last_ = begin_ + cap;
+   }
+
+   SharedVector(const SharedVector&);
+   void operator=(const SharedVector&);
+};
+
+}
+}
+
+#endif //TSAN_SHARED_VECTOR_H
Index: lib/tsan/rtl/relacy/tsan_thread_context.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_thread_context.h
@@ -0,0 +1,82 @@
+#ifndef TSAN_THREAD_CONTEXT_H
+#define TSAN_THREAD_CONTEXT_H
+
+#include <sanitizer_common/sanitizer_vector.h>
+
+namespace __tsan {
+namespace __relacy {
+
+class ThreadContext {
+  public:
+   explicit ThreadContext(int tid = 0);
+
+   int GetTid() const;
+
+   void SetTid(int tid);
+
+   ThreadContext* GetParent();
+
+   void SetParent(ThreadContext *parent);
+
+  private:
+   int tid_;
+   ThreadContext* parent_;
+};
+
+class JoinContext {
+  public:
+   JoinContext(ThreadContext *current_thread, ThreadContext *wait_thread);
+
+   int GetTid() const;
+
+   int GetWaitTid() const;
+
+   ThreadContext* GetCurrentThread();
+
+   ThreadContext* GetWaitThread();
+
+  private:
+   ThreadContext *wait_thread_;
+   ThreadContext *current_thread_;
+};
+
+class MutexContext {
+  public:
+   MutexContext(ThreadContext* thread, void* mutex);
+
+   int GetTid() const;
+
+   ThreadContext* GetCurrentThread();
+
+   void* GetMutex();
+
+  private:
+   ThreadContext* thread_;
+   void* mutex_;
+};
+
+class ConditionVariableContext {
+  public:
+   ConditionVariableContext(void* cond_var);
+
+   ThreadContext* ExtractByTid(int tid);
+
+   ThreadContext* ExtractBack();
+
+   ThreadContext* GetByTid(int tid);
+
+   int CountThreads() const;
+
+   void PushBack(ThreadContext* context);
+
+   void* GetConditionVariable();
+
+  private:
+   Vector<ThreadContext*> threads_;
+   void* cond_var_;
+};
+
+}
+}
+
+#endif //TSAN_THREAD_CONTEXT_H
Index: lib/tsan/rtl/relacy/tsan_thread_context.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_thread_context.cc
@@ -0,0 +1,105 @@
+#include "tsan_thread_context.h"
+
+namespace __tsan {
+namespace __relacy {
+
+ThreadContext::ThreadContext(int tid)
+    : tid_(tid)
+    , parent_(nullptr)
+{}
+
+int ThreadContext::GetTid() const {
+  return tid_;
+}
+
+void ThreadContext::SetTid(int tid) {
+  tid_ = tid;
+}
+
+ThreadContext* ThreadContext::GetParent() {
+  return parent_;
+}
+
+void ThreadContext::SetParent(ThreadContext *parent) {
+  parent_ = parent;
+}
+
+JoinContext::JoinContext(ThreadContext *current_thread, ThreadContext *wait_thread)
+    : wait_thread_(wait_thread), current_thread_(current_thread) {}
+
+int JoinContext::GetTid() const {
+  return current_thread_->GetTid();
+}
+
+int JoinContext::GetWaitTid() const {
+  return wait_thread_->GetTid();
+}
+
+ThreadContext* JoinContext::GetCurrentThread() {
+  return current_thread_;
+}
+
+ThreadContext* JoinContext::GetWaitThread() {
+  return wait_thread_;
+}
+
+MutexContext::MutexContext(ThreadContext* thread, void* mutex) : thread_(thread), mutex_(mutex) {
+}
+
+int MutexContext::GetTid() const {
+  return thread_->GetTid();
+}
+
+ThreadContext* MutexContext::GetCurrentThread() {
+  return thread_;
+}
+
+void* MutexContext::GetMutex() {
+  return mutex_;
+}
+
+ConditionVariableContext::ConditionVariableContext(void* cond_var) : cond_var_(cond_var) {
+
+}
+
+ThreadContext* ConditionVariableContext::ExtractByTid(int tid) {
+  for (uptr i = 0; i < threads_.Size(); i++) {
+    if (threads_[i]->GetTid() == tid) {
+      ThreadContext* context = threads_[i];
+      threads_[i] = threads_[threads_.Size() - 1];
+      threads_.PopBack();
+      return context;
+    }
+  }
+  return nullptr;
+}
+
+ThreadContext* ConditionVariableContext::ExtractBack() {
+  ThreadContext* context = threads_[threads_.Size() - 1];
+  threads_.PopBack();
+  return context;
+}
+
+ThreadContext* ConditionVariableContext::GetByTid(int tid) {
+    for (uptr i = 0; i < threads_.Size(); i++) {
+      if (threads_[i]->GetTid() == tid) {
+        return threads_[i];
+      }
+    }
+    return nullptr;
+}
+
+int ConditionVariableContext::CountThreads() const {
+  return static_cast<int>(threads_.Size());
+}
+
+void ConditionVariableContext::PushBack(ThreadContext* context) {
+  threads_.PushBack(context);
+}
+
+void* ConditionVariableContext::GetConditionVariable() {
+  return cond_var_;
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/tsan_threads_box.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_threads_box.h
@@ -0,0 +1,186 @@
+#ifndef TSAN_THREADS_BOX_H
+#define TSAN_THREADS_BOX_H
+
+#include <tsan/rtl/tsan_defs.h>
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_thread_context.h"
+#include "tsan_type_traits.h"
+
+namespace __tsan {
+namespace __relacy {
+
+class ThreadsBox {
+  public:
+   ThreadContext *GetCurrentThread();
+
+   void SetCurrentThread(ThreadContext *context);
+
+#define THREADS_INFO(Type, ReturnType) \
+    bool Contains##Type##ByTid(int tid) const; \
+    int Max##Type##Tid() const; \
+    ReturnType Extract##Type##ByTid(int tid); \
+    ReturnType Get##Type##ByTid(int tid); \
+    ReturnType Extract##Type##ByIndex(uptr idx); \
+    ReturnType Get##Type##ByIndex(uptr idx); \
+    void Add##Type(ReturnType context); \
+    uptr GetCount##Type();
+
+   THREADS_INFO(Running, ThreadContext*)
+
+   THREADS_INFO(Joining, JoinContext)
+
+   THREADS_INFO(Stopped, ThreadContext*)
+
+   THREADS_INFO(Waiting, MutexContext)
+
+   THREADS_INFO(Sleping, ThreadContext*)
+
+#undef THREADS_INFO
+
+   void WakeupJoiningByWaitTid(int wait_tid);
+
+   unsigned long GetRunningBitSet();
+
+   void AddMutex(void* mutex);
+
+   void ExtractMutex(void* mutex);
+
+   bool ExistsMutex(void* mutex);
+
+   ThreadContext* ExtractWaitByMutex(void* mutex);
+
+   void AddConditionVariable(void *c, ThreadContext* context);
+
+   ThreadContext* ExtractWaitByConditionVariable(void *c);
+
+   bool ExistsConditionVariable(void *c);
+
+   ConditionVariableContext* GetConditionVariable(void *c);
+
+   ThreadContext* GetConditionVariableThreadByTid(int tid);
+
+   void PrintDebugInfo();
+
+  private:
+   template<typename T>
+   void Add(typename enable_if<!is_pointer<T>::value, T>::type context, Vector<T> &threads) {
+       if (!ContainsByTid(context.GetTid(), threads)) {
+           threads.PushBack(context);
+       }
+   }
+
+   template<typename T>
+   void Add(typename enable_if<is_pointer<T>::value, T>::type context, Vector<T> &threads) {
+       if (!ContainsByTid(context->GetTid(), threads)) {
+           threads.PushBack(context);
+       }
+   }
+
+   template<typename T>
+   typename enable_if<!is_pointer<T>::value, int>::type MaxTid(const Vector<T> &threads) const {
+     int m = 0;
+     for (uptr i = 0; i < threads.Size(); i++) {
+       m = max(threads[i].GetTid(), m);
+     }
+     return m;
+   }
+
+   template<typename T>
+   typename enable_if<is_pointer<T>::value, int>::type MaxTid(const Vector<T> &threads) const {
+     int m = 0;
+     for (uptr i = 0; i < threads.Size(); i++) {
+       m = max(threads[i]->GetTid(), m);
+     }
+     return m;
+   }
+
+   template<typename T>
+   typename enable_if<!is_pointer<T>::value, bool>::type ContainsByTid(int tid, const Vector<T> &threads) const {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i].GetTid() == tid) {
+         return true;
+       }
+     }
+     return false;
+   }
+
+   template<typename T>
+   typename enable_if<is_pointer<T>::value, bool>::type ContainsByTid(int tid, const Vector<T> &threads) const {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i]->GetTid() == tid) {
+         return true;
+       }
+     }
+     return false;
+   }
+
+   template<typename T>
+   typename enable_if<!is_pointer<T>::value, T>::type GetByTid(int tid, Vector<T> &threads) {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i].GetTid() == tid) {
+         return threads[i];
+       }
+     }
+     Printf("FATAL: ThreadSanitizer invalid tid for GetByTid\n");
+     Die();
+   }
+
+   template<typename T>
+   typename enable_if<!is_pointer<T>::value, T>::type ExtractByTid(int tid, Vector<T> &threads) {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i].GetTid() == tid) {
+         T context = threads[i];
+         threads[i] = threads[threads.Size() - 1];
+         threads.PopBack();
+         return context;
+       }
+     }
+     Printf("FATAL: ThreadSanitizer invalid tid for ExtractByTid\n");
+     Die();
+   }
+
+   template<typename T>
+   typename enable_if<is_pointer<T>::value, T>::type ExtractByTid(int tid, Vector<T> &threads) {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i]->GetTid() == tid) {
+         T context = threads[i];
+         threads[i] = threads[threads.Size() - 1];
+         threads.PopBack();
+         return context;
+       }
+     }
+     return nullptr;
+   }
+
+   template<typename T>
+   typename enable_if<is_pointer<T>::value, T>::type GetByTid(int tid, Vector<T> &threads) {
+     for (uptr i = 0; i < threads.Size(); i++) {
+       if (threads[i]->GetTid() == tid) {
+         return threads[i];
+       }
+     }
+     return nullptr;
+   }
+
+   template<typename T>
+   T ExtractByIndex(uptr idx, Vector<T> &threads);
+
+   template<typename T>
+   T GetByIndex(uptr idx, Vector<T> &threads);
+
+  private:
+   ThreadContext *current_thread_;
+   Vector<ThreadContext *> running_threads_;
+   Vector<JoinContext> joining_threads_;
+   Vector<ThreadContext *> stopped_threads_;
+   Vector<MutexContext> waiting_threads_;
+   Vector<ThreadContext *> sleping_threads_;
+
+   Vector<void*> locked_mutexes_;
+   Vector<ConditionVariableContext> condition_variables_;
+};
+
+}
+}
+
+#endif // TSAN_THREADS_BOX_H
Index: lib/tsan/rtl/relacy/tsan_threads_box.cc
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_threads_box.cc
@@ -0,0 +1,193 @@
+#include "tsan_threads_box.h"
+
+namespace __tsan {
+namespace __relacy {
+
+ThreadContext *ThreadsBox::GetCurrentThread() {
+  return current_thread_;
+}
+
+void ThreadsBox::SetCurrentThread(ThreadContext *context) {
+  current_thread_ = context;
+}
+
+#define THREADS_INFO(Type, ReturnType, Threads) \
+bool ThreadsBox::Contains##Type##ByTid(int tid) const { \
+  return ContainsByTid(tid, Threads); \
+} \
+ \
+int ThreadsBox::Max##Type##Tid() const { \
+  return MaxTid(Threads); \
+} \
+ReturnType ThreadsBox::Extract##Type##ByTid(int tid) { \
+  return ExtractByTid(tid, Threads); \
+} \
+ \
+ReturnType ThreadsBox::Get##Type##ByTid(int tid) { \
+  return GetByTid(tid, Threads); \
+} \
+ \
+ReturnType ThreadsBox::Extract##Type##ByIndex(uptr idx) { \
+  return ExtractByIndex(idx, Threads); \
+} \
+ \
+ReturnType ThreadsBox::Get##Type##ByIndex(uptr idx) { \
+  return GetByIndex(idx, Threads); \
+} \
+ \
+void ThreadsBox::Add##Type(ReturnType context) { \
+  Add(context, Threads); \
+} \
+ \
+uptr ThreadsBox::GetCount##Type() { \
+  return Threads.Size(); \
+}
+
+THREADS_INFO(Running, ThreadContext*, running_threads_)
+
+THREADS_INFO(Joining, JoinContext, joining_threads_)
+
+THREADS_INFO(Stopped, ThreadContext*, stopped_threads_)
+
+THREADS_INFO(Waiting, MutexContext, waiting_threads_)
+
+THREADS_INFO(Sleping, ThreadContext*, sleping_threads_)
+
+#undef THREADS_INFO
+
+void ThreadsBox::WakeupJoiningByWaitTid(int wait_tid) {
+  for (int i = 0; i < (int) joining_threads_.Size(); i++) {
+    if (joining_threads_[i].GetWaitTid() == wait_tid) {
+        ThreadContext* ctx = ExtractJoiningByIndex(i).GetCurrentThread();
+        if (!ContainsWaitingByTid(ctx->GetTid())) {
+            AddRunning(ctx);
+        }
+      i--;
+    }
+  }
+}
+
+unsigned long ThreadsBox::GetRunningBitSet() {
+    unsigned long bit_set = 0;
+    for (uptr i = 0; i < running_threads_.Size(); i++) {
+        bit_set |= 1UL << (unsigned long)running_threads_[i]->GetTid();
+    }
+    return bit_set;
+}
+
+template<typename T>
+T ThreadsBox::ExtractByIndex(uptr idx, Vector<T> &threads) {
+  DCHECK_GE(threads.Size(), idx + 1);
+  T context = threads[idx];
+  threads[idx] = threads[threads.Size() - 1];
+  threads.PopBack();
+  return context;
+}
+
+template<typename T>
+T ThreadsBox::GetByIndex(uptr idx, Vector<T> &threads) {
+  DCHECK_GE(threads.Size(), idx + 1);
+  return threads[idx];
+}
+
+void ThreadsBox::AddMutex(void* mutex) {
+    if (ExistsMutex(mutex))
+        return;
+    locked_mutexes_.PushBack(mutex);
+}
+
+void ThreadsBox::ExtractMutex(void* mutex) {
+    for (uptr i = 0; i < locked_mutexes_.Size(); i++) {
+        if (locked_mutexes_[i] == mutex) {
+            locked_mutexes_[i] = locked_mutexes_[locked_mutexes_.Size() - 1];
+            locked_mutexes_.PopBack();
+            return;
+        }
+    }
+}
+
+bool ThreadsBox::ExistsMutex(void* mutex) {
+    for (uptr i = 0; i < locked_mutexes_.Size(); i++) {
+        if (locked_mutexes_[i] == mutex)
+            return true;
+    }
+    return false;
+}
+
+ThreadContext* ThreadsBox::ExtractWaitByMutex(void* mutex) {
+    for (uptr i = 0; i < waiting_threads_.Size(); i++) {
+        if (waiting_threads_[i].GetMutex() == mutex) {
+            ThreadContext *context = waiting_threads_[i].GetCurrentThread();
+            waiting_threads_[i] = waiting_threads_[waiting_threads_.Size() - 1];
+            waiting_threads_.PopBack();
+            return context;
+        }
+    }
+    return nullptr;
+}
+
+void ThreadsBox::AddConditionVariable(void *c, ThreadContext* context) {
+    ConditionVariableContext* ctx = GetConditionVariable(c);
+    if (ctx == nullptr) {
+        condition_variables_.PushBack(ConditionVariableContext { c });
+        ctx = &condition_variables_[condition_variables_.Size() - 1];
+    }
+    ctx->PushBack(context);
+}
+
+ThreadContext* ThreadsBox::ExtractWaitByConditionVariable(void *c) {
+    ConditionVariableContext* context = GetConditionVariable(c);
+    if (context == nullptr || context->CountThreads() == 0) {
+        return nullptr;
+    }
+
+    ThreadContext* thread_context = context->ExtractBack();
+    return thread_context;
+}
+
+bool ThreadsBox::ExistsConditionVariable(void *c) {
+    return GetConditionVariable(c) != nullptr;
+}
+
+ThreadContext* ThreadsBox::GetConditionVariableThreadByTid(int tid) {
+    for (uptr i = 0; i < condition_variables_.Size(); i++) {
+        if (ThreadContext* context = condition_variables_[i].GetByTid(tid)) {
+            return context;
+        }
+    }
+    return nullptr;
+}
+
+ConditionVariableContext* ThreadsBox::GetConditionVariable(void *c) {
+    for (uptr i = 0; i < condition_variables_.Size(); i++) {
+        if (condition_variables_[i].GetConditionVariable() == c && condition_variables_[i].CountThreads() != 0) {
+            return &condition_variables_[i];
+        }
+    }
+    return nullptr;
+}
+
+void ThreadsBox::PrintDebugInfo() {
+    Printf("Current thread: %d\n", current_thread_->GetTid());
+
+    Printf("Running threads [%d]: ", running_threads_.Size());
+    for (uptr i = 0; i < running_threads_.Size(); i++) {
+        Printf("%d ", running_threads_[i]->GetTid());
+    }
+    Printf("\n");
+
+    Printf("Joining threads [%d]: ", joining_threads_.Size());
+    for (uptr i = 0; i < joining_threads_.Size(); i++) {
+        Printf("(%d, %d) ", joining_threads_[i].GetTid(), joining_threads_[i].GetWaitTid());
+    }
+    Printf("\n");
+
+    Printf("Waiting threads [%d]: ", waiting_threads_.Size());
+    for (uptr i = 0; i < waiting_threads_.Size(); i++) {
+        Printf("(%d, %p) ", waiting_threads_[i].GetTid(), waiting_threads_[i].GetMutex());
+    }
+    Printf("\n");
+}
+
+}
+}
\ No newline at end of file
Index: lib/tsan/rtl/relacy/tsan_type_traits.h
===================================================================
--- /dev/null
+++ lib/tsan/rtl/relacy/tsan_type_traits.h
@@ -0,0 +1,44 @@
+#ifndef TSAN_TYPE_TRAITS_H
+#define TSAN_TYPE_TRAITS_H
+
+namespace __tsan {
+namespace __relacy {
+
+template<bool B, class T = void>
+struct enable_if {
+};
+
+template<class T>
+struct enable_if<true, T> {
+  typedef T type;
+};
+
+template< class T > struct remove_const          { typedef T type; };
+template< class T > struct remove_const<const T> { typedef T type; };
+
+template< class T > struct remove_volatile             { typedef T type; };
+template< class T > struct remove_volatile<volatile T> { typedef T type; };
+
+template<class T>
+struct remove_cv {
+  typedef typename remove_volatile<typename remove_const<T>::type>::type type;
+};
+
+template<class T>
+struct is_pointer_helper {
+  constexpr static bool value = false;
+};
+
+template<class T>
+struct is_pointer_helper<T *> {
+  constexpr static bool value = true;
+};
+
+template<class T>
+struct is_pointer : is_pointer_helper<typename remove_cv<T>::type> {
+};
+
+}
+}
+
+#endif //TSAN_TYPE_TRAITS_H
Index: lib/tsan/rtl/tsan_flags.cc
===================================================================
--- lib/tsan/rtl/tsan_flags.cc
+++ lib/tsan/rtl/tsan_flags.cc
@@ -76,7 +76,6 @@
   FlagParser parser;
   RegisterTsanFlags(&parser, f);
   RegisterCommonFlags(&parser);
-
 #if TSAN_CONTAINS_UBSAN
   __ubsan::Flags *uf = __ubsan::flags();
   uf->SetDefaults();
Index: lib/tsan/rtl/tsan_flags.inc
===================================================================
--- lib/tsan/rtl/tsan_flags.inc
+++ lib/tsan/rtl/tsan_flags.inc
@@ -84,3 +84,7 @@
           "modules.")
 TSAN_FLAG(bool, shared_ptr_interceptor, true,
           "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
+TSAN_FLAG(const char *, scheduler_platform, "",
+          "If set, will be enable scheduler model")
+TSAN_FLAG(const char *, scheduler_type, "",
+          "Selection scheduler type")
Index: lib/tsan/rtl/tsan_interceptors.cc
===================================================================
--- lib/tsan/rtl/tsan_interceptors.cc
+++ lib/tsan/rtl/tsan_interceptors.cc
@@ -31,6 +31,9 @@
 #include "tsan_rtl.h"
 #include "tsan_mman.h"
 #include "tsan_fd.h"
+#if SANITIZER_RELACY_SCHEDULER
+#include "relacy/tsan_scheduler_engine.h"
+#endif
 
 
 using namespace __tsan;  // NOLINT
@@ -928,6 +931,10 @@
 };
 
 extern "C" void *__tsan_thread_start_func(void *arg) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Initialize();
+{
+#endif
   ThreadParam *p = (ThreadParam*)arg;
   void* (*callback)(void *arg) = p->callback;
   void *param = p->param;
@@ -951,13 +958,43 @@
     ProcWire(proc, thr);
     ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
     atomic_store(&p->tid, 0, memory_order_release);
+#if SANITIZER_RELACY_SCHEDULER
+    _scheduler_engine.Yield(_scheduler_engine.GetParent());
+#endif
   }
   void *res = callback(param);
   // Prevent the callback from being tail called,
   // it mixes up stack traces.
+#ifndef SANITIZER_RELACY_SCHEDULER
   volatile int foo = 42;
   foo++;
   return res;
+#endif
+
+#if SANITIZER_RELACY_SCHEDULER
+  /*_scheduler_engine.Yield();
+  if (_scheduler_engine.GetPlatformType() == __relacy::PlatformType::PTHREAD) {
+    _scheduler_engine.StopThread();
+    _scheduler_engine.Yield();
+    volatile int foo = 42;
+    foo++;
+    return res;
+  } */
+  if (_scheduler_engine.GetPlatformType() == __relacy::PlatformType::OS) {
+    volatile int foo = 42;
+    foo++;
+    return res;
+  }
+}
+  volatile int foo = 42;
+  foo++;
+  _scheduler_engine.StopThread();
+  DestroyThreadState();
+  _scheduler_engine.Yield();
+  Printf("ThreadSanitizer: failed stopped thread was running!");
+  Die();
+  return nullptr;
+#endif
 }
 
 TSAN_INTERCEPTOR(int, pthread_create,
@@ -990,16 +1027,38 @@
   p.callback = callback;
   p.param = param;
   atomic_store(&p.tid, 0, memory_order_relaxed);
+#if SANITIZER_RELACY_SCHEDULER
+  __relacy::ThreadContext* context = nullptr;
+#endif
   int res = -1;
   {
     // Otherwise we see false positives in pthread stack manipulation.
     ScopedIgnoreInterceptors ignore;
     ThreadIgnoreBegin(thr, pc);
+#if SANITIZER_RELACY_SCHEDULER
+    if (_scheduler_engine.GetPlatformType() != __relacy::PlatformType::OS) {
+      context = _scheduler_engine
+          .CreateFiber(th, attr,
+                       reinterpret_cast<void(*)()>(__tsan_thread_start_func),
+                       &p);
+      res = 0;
+    } else {
+      res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+    }
+#else
     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+#endif
     ThreadIgnoreEnd(thr, pc);
   }
+#if SANITIZER_RELACY_SCHEDULER
+  int tid = 0;
+#endif
   if (res == 0) {
+#if SANITIZER_RELACY_SCHEDULER
+    tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
+#else
     int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
+#endif
     CHECK_NE(tid, 0);
     // Synchronization on p.tid serves two purposes:
     // 1. ThreadCreate must finish before the new thread starts.
@@ -1008,12 +1067,20 @@
     // 2. ThreadStart must finish before this thread continues.
     //    Otherwise, this thread can call pthread_detach and reset thr->sync
     //    before the new thread got a chance to acquire from it in ThreadStart.
+
     atomic_store(&p.tid, tid, memory_order_release);
+#if SANITIZER_RELACY_SCHEDULER
+    _scheduler_engine.Yield(context);
+#endif
     while (atomic_load(&p.tid, memory_order_acquire) != 0)
       internal_sched_yield();
   }
   if (attr == &myattr)
     pthread_attr_destroy(&myattr);
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.AddFiberContext(tid, context);
+  _scheduler_engine.Yield();
+#endif
   return res;
 }
 
@@ -1021,7 +1088,17 @@
   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
   int tid = ThreadTid(thr, pc, (uptr)th);
   ThreadIgnoreBegin(thr, pc);
+#if SANITIZER_RELACY_SCHEDULER
+  int res = 0;
+  if (_scheduler_engine.GetPlatformType() != __relacy::PlatformType::OS) {
+    _scheduler_engine.Join(tid);
+    _scheduler_engine.Yield();
+  } else {
+    res = BLOCK_REAL(pthread_join)(th, ret);
+  }
+#else
   int res = BLOCK_REAL(pthread_join)(th, ret);
+#endif
   ThreadIgnoreEnd(thr, pc);
   if (res == 0) {
     ThreadJoin(thr, pc, tid);
@@ -1128,12 +1205,51 @@
   return res;
 }
 
+static int cond_wait_scheduler(ThreadState *thr, uptr pc, ScopedInterceptor *si,
+                     int (*fn)(void *c, void *m, void *abstime), void *c,
+                     void *m, void *t) {
+    MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+    MutexUnlock(thr, pc, (uptr)m);
+    CondMutexUnlockCtx arg = {si, thr, pc, m};
+    int res = 0;
+    // This ensures that we handle mutex lock even in case of pthread_cancel.
+    // See test/tsan/cond_cancel.cc.
+    {
+        // Enable signal delivery while the thread is blocked.
+        BlockingCall bc(thr);
+#ifdef SANITIZER_RELACY_SCHEDULER
+        res = _scheduler_engine.CondWait(c, m);
+#endif
+    }
+    if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
+    MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
+    return res;
+}
+
+
+
 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+#ifdef SANITIZER_RELACY_SCHEDULER
+  if (_scheduler_engine.GetPlatformType() == __relacy::PlatformType::OS) {
+    void *cond = init_cond(c);
+    SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+    return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+                                     pthread_cond_wait),
+                   cond, m, 0);
+  } else {
+    void *cond = init_cond(c);
+    SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+    return cond_wait_scheduler(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+                                     pthread_cond_wait),
+                   cond, m, 0);
+  }
+#else
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
   return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
                                      pthread_cond_wait),
                    cond, m, 0);
+#endif
 }
 
 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
@@ -1154,17 +1270,35 @@
 #endif
 
 INTERCEPTOR(int, pthread_cond_signal, void *c) {
+#ifdef SANITIZER_RELACY_SCHEDULER
+  void *cond = init_cond(c);
+  SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+  MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+  return _scheduler_engine.GetPlatformType() == __relacy::PlatformType::OS
+            ? REAL(pthread_cond_signal)(cond)
+            : _scheduler_engine.Signal(c);
+#else
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
   return REAL(pthread_cond_signal)(cond);
+#endif
 }
 
 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+#ifdef SANITIZER_RELACY_SCHEDULER
   void *cond = init_cond(c);
   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
-  return REAL(pthread_cond_broadcast)(cond);
+  return _scheduler_engine.GetPlatformType() == __relacy::PlatformType::OS
+            ? REAL(pthread_cond_broadcast)(cond)
+            : _scheduler_engine.Broadcast(c);
+#else
+    void *cond = init_cond(c);
+    SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+    MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+    return REAL(pthread_cond_broadcast)(cond);
+#endif
 }
 
 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
@@ -1207,6 +1341,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+    Printf("trylock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
   int res = REAL(pthread_mutex_trylock)(m);
   if (res == errno_EOWNERDEAD)
@@ -1218,6 +1353,7 @@
 
 #if !SANITIZER_MAC
 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+    Printf("timed\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
   int res = REAL(pthread_mutex_timedlock)(m, abstime);
   if (res == 0) {
@@ -1247,6 +1383,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+    Printf("spin lock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
   MutexPreLock(thr, pc, (uptr)m);
   int res = REAL(pthread_spin_lock)(m);
@@ -1257,6 +1394,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+    Printf("spin trylock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
   int res = REAL(pthread_spin_trylock)(m);
   if (res == 0) {
@@ -1266,6 +1404,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+    Printf("spin unlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
   MutexUnlock(thr, pc, (uptr)m);
   int res = REAL(pthread_spin_unlock)(m);
@@ -1292,6 +1431,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+    Printf("rwlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
   MutexPreReadLock(thr, pc, (uptr)m);
   int res = REAL(pthread_rwlock_rdlock)(m);
@@ -1302,6 +1442,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+    Printf("rdlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
   int res = REAL(pthread_rwlock_tryrdlock)(m);
   if (res == 0) {
@@ -1312,6 +1453,7 @@
 
 #if !SANITIZER_MAC
 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+    Printf("timed_rdlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
   if (res == 0) {
@@ -1322,6 +1464,7 @@
 #endif
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+    Printf("rwlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
   MutexPreLock(thr, pc, (uptr)m);
   int res = REAL(pthread_rwlock_wrlock)(m);
@@ -1332,6 +1475,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+    Printf("rw trylock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
   int res = REAL(pthread_rwlock_trywrlock)(m);
   if (res == 0) {
@@ -1342,6 +1486,7 @@
 
 #if !SANITIZER_MAC
 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+    Printf("rwlock timed\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
   if (res == 0) {
@@ -1352,6 +1497,7 @@
 #endif
 
 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+    Printf("rwlock unlock\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
   int res = REAL(pthread_rwlock_unlock)(m);
@@ -1374,6 +1520,7 @@
 }
 
 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+    Printf("barried wait\n");
   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
   Release(thr, pc, (uptr)b);
   MemoryRead(thr, pc, (uptr)b, kSizeLog1);
@@ -1387,6 +1534,7 @@
 #endif
 
 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+    Printf("pthread onces\n");
   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
   if (o == 0 || f == 0)
     return errno_EINVAL;
Index: lib/tsan/rtl/tsan_interface_atomic.cc
===================================================================
--- lib/tsan/rtl/tsan_interface_atomic.cc
+++ lib/tsan/rtl/tsan_interface_atomic.cc
@@ -25,6 +25,7 @@
 #include "tsan_flags.h"
 #include "tsan_interface.h"
 #include "tsan_rtl.h"
+#include "rtl/relacy/tsan_scheduler_engine.h"
 
 using namespace __tsan;  // NOLINT
 
@@ -522,243 +523,378 @@
 extern "C" {
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Load, a, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Load, a, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Load, a, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Store, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(Exchange, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAdd, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchSub, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchAnd, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+_scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchOr, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 
 #if __TSAN_HAS_INT128
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(FetchNand, a, v, mo);
 }
 #endif
@@ -766,24 +902,36 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
@@ -791,6 +939,9 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 #endif
@@ -798,24 +949,36 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
@@ -823,6 +986,9 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+_scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 #endif
@@ -830,24 +996,36 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 
@@ -855,18 +1033,27 @@
 SANITIZER_INTERFACE_ATTRIBUTE
 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
     morder mo, morder fmo) {
+#if SANITIZER_RELACY_SCHEDULER
+_scheduler_engine.Yield();
+#endif
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
 }
 #endif
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic_thread_fence(morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+_scheduler_engine.Yield();
+#endif
   char* a = 0;
   SCOPED_ATOMIC(Fence, mo);
 }
 
 SANITIZER_INTERFACE_ATTRIBUTE
 void __tsan_atomic_signal_fence(morder mo) {
+#if SANITIZER_RELACY_SCHEDULER
+  _scheduler_engine.Yield();
+#endif
 }
 }  // extern "C"
 
Index: lib/tsan/rtl/tsan_rtl.cc
===================================================================
--- lib/tsan/rtl/tsan_rtl.cc
+++ lib/tsan/rtl/tsan_rtl.cc
@@ -26,6 +26,9 @@
 #include "tsan_suppressions.h"
 #include "tsan_symbolize.h"
 #include "ubsan/ubsan_init.h"
+#if SANITIZER_RELACY_SCHEDULER
+#include "relacy/tsan_scheduler_engine.h"
+#endif
 
 #ifdef __SSE3__
 // <emmintrin.h> transitively includes <stdlib.h>,
Index: lib/tsan/tests/CMakeLists.txt
===================================================================
--- lib/tsan/tests/CMakeLists.txt
+++ lib/tsan/tests/CMakeLists.txt
@@ -24,7 +24,7 @@
     $<TARGET_OBJECTS:RTSanitizerCommonLibc.osx>
     $<TARGET_OBJECTS:RTSanitizerCommonCoverage.osx>
     $<TARGET_OBJECTS:RTSanitizerCommonSymbolizer.osx>
-    $<TARGET_OBJECTS:RTUbsan.osx>)
+    $<TARGET_OBJECTS:RTUbsan.osx> ../rtl/relacy/schedulers/tsan_all_states_scheduler.cc ../rtl/relacy/schedulers/tsan_fixed_window_scheduler.cc ../rtl/relacy/schedulers/tsan_full_path_scheduler.cc ../rtl/relacy/schedulers/tsan_parallel_full_path_scheduler.cc ../rtl/relacy/schedulers/tsan_random_scheduler.cc ../rtl/relacy/schedulers/tsan_random_with_different_distributions_scheduler.cc ../rtl/relacy/platforms/tsan_fiber_tls_copy_platform.cc ../rtl/relacy/platforms/tsan_fiber_tls_swap_platform.cc ../rtl/relacy/platforms/tsan_pthread_platform.cc)
   set(TSAN_TEST_RUNTIME RTTsanTest)
   add_library(${TSAN_TEST_RUNTIME} STATIC ${TSAN_TEST_RUNTIME_OBJECTS})
   set_target_properties(${TSAN_TEST_RUNTIME} PROPERTIES