Index: compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp =================================================================== --- compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp +++ compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp @@ -402,33 +402,64 @@ template static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo, morder fmo) { - (void)fmo; // Unused because llvm does not pass it yet. + // 31.7.2.18: "The failure argument shall not be memory_order_release + // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic (relaxed) + // when those are used. + CHECK(IsLoadOrder(fmo)); + MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog()); SyncVar *s = 0; bool write_lock = mo != mo_acquire && mo != mo_consume; - if (mo != mo_relaxed) { + bool should_lock = mo != mo_relaxed || fmo != mo_relaxed; + + if (should_lock) s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock); - thr->fast_state.IncrementEpoch(); - // Can't increment epoch w/o writing to the trace as well. - TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); - if (IsAcqRelOrder(mo)) - AcquireReleaseImpl(thr, pc, &s->clock); - else if (IsReleaseOrder(mo)) - ReleaseImpl(thr, pc, &s->clock); - else if (IsAcquireOrder(mo)) - AcquireImpl(thr, pc, &s->clock); - } + T cc = *c; T pr = func_cas(a, cc, v); - if (s) { + if (pr == cc) { + if (!should_lock) + return true; + + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + if (IsAcqRelOrder(mo)) + AcquireReleaseImpl(thr, pc, &s->clock); + else if (IsReleaseOrder(mo)) + ReleaseImpl(thr, pc, &s->clock); + else if (IsAcquireOrder(mo)) + AcquireImpl(thr, pc, &s->clock); + if (write_lock) s->mtx.Unlock(); else s->mtx.ReadUnlock(); - } - if (pr == cc) return true; + } + + // Honor failure memory order. *c = pr; + if (!should_lock || !s) + return false; + + // Lock cannot be reused if get or created is based on write_lock, + // since fmo is about load's. + if (write_lock) { + s->mtx.Unlock(); + s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, /*write_lock*/ false); + } + + if (IsAcquireOrder(fmo)) { + thr->fast_state.IncrementEpoch(); + // Can't increment epoch w/o writing to the trace as well. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + AcquireImpl(thr, pc, &s->clock); + // No write_lock's are handled here, since fmo is required + // to use load style memory order. + s->mtx.ReadUnlock(); + } + return false; } Index: compiler-rt/test/tsan/compare_exchange.cpp =================================================================== --- /dev/null +++ compiler-rt/test/tsan/compare_exchange.cpp @@ -0,0 +1,141 @@ +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELEASE_RELAXED -o %t && %deflake %run %t 2>&1 | FileCheck --check-prefix=CHECK-REPORT %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELEASE_ACQUIRE -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELEASE_CONSUME -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELEASE_SEQ_CST -o %t && %run %t 2>&1 | FileCheck %s + +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DREL_ACQ_RELAXED -o %t && %deflake %run %t 2>&1 | FileCheck --check-prefix=CHECK-REPORT %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DREL_ACQ_ACQUIRE -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DREL_ACQ_CONSUME -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DREL_ACQ_SEQ_CST -o %t && %run %t 2>&1 | FileCheck %s + +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DSEQ_CST_RELAXED -o %t && %deflake %run %t 2>&1 | FileCheck --check-prefix=CHECK-REPORT %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DSEQ_CST_ACQUIRE -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DSEQ_CST_CONSUME -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DSEQ_CST_SEQ_CST -o %t && %run %t 2>&1 | FileCheck %s + +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELAXED_RELAXED -o %t && %deflake %run %t 2>&1 | FileCheck --check-prefix=CHECK-REPORT %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELAXED_ACQUIRE -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELAXED_CONSUME -o %t && %run %t 2>&1 | FileCheck %s +// RUN: %clangxx_tsan -O1 %s %link_libcxx_tsan -DRELAXED_SEQ_CST -o %t && %run %t 2>&1 | FileCheck %s +#include +#include +#include +#include + +#ifdef RELEASE_ACQUIRE +static constexpr auto SuccessOrder = std::memory_order_release; +static constexpr auto FailureOrder = std::memory_order_acquire; +#endif + +#ifdef RELEASE_RELAXED +static constexpr auto SuccessOrder = std::memory_order_release; +static constexpr auto FailureOrder = std::memory_order_relaxed; +#endif + +#ifdef RELEASE_CONSUME +static constexpr auto SuccessOrder = std::memory_order_release; +static constexpr auto FailureOrder = std::memory_order_consume; +#endif + +#ifdef RELEASE_SEQ_CST +static constexpr auto SuccessOrder = std::memory_order_release; +static constexpr auto FailureOrder = std::memory_order_seq_cst; +#endif + +#ifdef REL_ACQ_RELAXED +static constexpr auto SuccessOrder = std::memory_order_acq_rel; +static constexpr auto FailureOrder = std::memory_order_relaxed; +#endif + +#ifdef REL_ACQ_ACQUIRE +static constexpr auto SuccessOrder = std::memory_order_acq_rel; +static constexpr auto FailureOrder = std::memory_order_acquire; +#endif + +#ifdef REL_ACQ_CONSUME +static constexpr auto SuccessOrder = std::memory_order_acq_rel; +static constexpr auto FailureOrder = std::memory_order_consume; +#endif + +#ifdef REL_ACQ_SEQ_CST +static constexpr auto SuccessOrder = std::memory_order_acq_rel; +static constexpr auto FailureOrder = std::memory_order_seq_cst; +#endif + +#ifdef SEQ_CST_RELAXED +static constexpr auto SuccessOrder = std::memory_order_seq_cst; +static constexpr auto FailureOrder = std::memory_order_relaxed; +#endif + +#ifdef SEQ_CST_ACQUIRE +static constexpr auto SuccessOrder = std::memory_order_seq_cst; +static constexpr auto FailureOrder = std::memory_order_acquire; +#endif + +#ifdef SEQ_CST_CONSUME +static constexpr auto SuccessOrder = std::memory_order_seq_cst; +static constexpr auto FailureOrder = std::memory_order_consume; +#endif + +#ifdef SEQ_CST_SEQ_CST +static constexpr auto SuccessOrder = std::memory_order_seq_cst; +static constexpr auto FailureOrder = std::memory_order_seq_cst; +#endif + +#ifdef RELAXED_RELAXED +static constexpr auto SuccessOrder = std::memory_order_relaxed; +static constexpr auto FailureOrder = std::memory_order_relaxed; +#endif + +#ifdef RELAXED_ACQUIRE +static constexpr auto SuccessOrder = std::memory_order_relaxed; +static constexpr auto FailureOrder = std::memory_order_acquire; +#endif + +#ifdef RELAXED_CONSUME +static constexpr auto SuccessOrder = std::memory_order_relaxed; +static constexpr auto FailureOrder = std::memory_order_consume; +#endif + +#ifdef RELAXED_SEQ_CST +static constexpr auto SuccessOrder = std::memory_order_relaxed; +static constexpr auto FailureOrder = std::memory_order_seq_cst; +#endif + +struct node { + int val; +}; +std::atomic _node{nullptr}; + +void f1() { + auto n = new node(); + n->val = 42; + _node.store(n, std::memory_order_release); +} + +void f2() { + node *expected = nullptr; + while (expected == nullptr) { + _node.compare_exchange_weak(expected, nullptr, SuccessOrder, + FailureOrder); + }; + + ++expected->val; + assert(expected->val == 43); +} + +int main() { + std::thread t1(f1); + std::thread t2(f2); + + t1.join(); + t2.join(); + + fprintf(stderr, "DONE\n"); + return 0; +} + +// CHECK-NOT: WARNING: ThreadSanitizer: data race +// CHECK: DONE + +// CHECK-REPORT: WARNING: ThreadSanitizer: data race \ No newline at end of file Index: llvm/test/Instrumentation/ThreadSanitizer/atomic.ll =================================================================== --- llvm/test/Instrumentation/ThreadSanitizer/atomic.ll +++ llvm/test/Instrumentation/ThreadSanitizer/atomic.ll @@ -349,41 +349,61 @@ define void @atomic8_cas_monotonic(i8* %a) nounwind uwtable { entry: cmpxchg i8* %a, i8 0, i8 1 monotonic monotonic, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 monotonic acquire, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 monotonic seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic8_cas_monotonic ; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 0), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 2), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 0, i32 5), !dbg define void @atomic8_cas_acquire(i8* %a) nounwind uwtable { entry: + cmpxchg i8* %a, i8 0, i8 1 acquire monotonic, !dbg !7 cmpxchg i8* %a, i8 0, i8 1 acquire acquire, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 acquire seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic8_cas_acquire +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 0), !dbg ; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 2), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 2, i32 5), !dbg define void @atomic8_cas_release(i8* %a) nounwind uwtable { entry: cmpxchg i8* %a, i8 0, i8 1 release monotonic, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 release acquire, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 release seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic8_cas_release ; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 0), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 2), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 3, i32 5), !dbg define void @atomic8_cas_acq_rel(i8* %a) nounwind uwtable { entry: + cmpxchg i8* %a, i8 0, i8 1 acq_rel monotonic, !dbg !7 cmpxchg i8* %a, i8 0, i8 1 acq_rel acquire, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 acq_rel seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic8_cas_acq_rel +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 0), !dbg ; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 2), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 4, i32 5), !dbg define void @atomic8_cas_seq_cst(i8* %a) nounwind uwtable { entry: + cmpxchg i8* %a, i8 0, i8 1 seq_cst monotonic, !dbg !7 + cmpxchg i8* %a, i8 0, i8 1 seq_cst acquire, !dbg !7 cmpxchg i8* %a, i8 0, i8 1 seq_cst seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic8_cas_seq_cst +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 0), !dbg +; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 2), !dbg ; CHECK: call i8 @__tsan_atomic8_compare_exchange_val(i8* %a, i8 0, i8 1, i32 5, i32 5), !dbg define i16 @atomic16_load_unordered(i16* %a) nounwind uwtable { @@ -733,41 +753,61 @@ define void @atomic16_cas_monotonic(i16* %a) nounwind uwtable { entry: cmpxchg i16* %a, i16 0, i16 1 monotonic monotonic, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 monotonic acquire, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 monotonic seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic16_cas_monotonic ; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 0), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 2), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 0, i32 5), !dbg define void @atomic16_cas_acquire(i16* %a) nounwind uwtable { entry: + cmpxchg i16* %a, i16 0, i16 1 acquire monotonic, !dbg !7 cmpxchg i16* %a, i16 0, i16 1 acquire acquire, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 acquire seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic16_cas_acquire +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 0), !dbg ; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 2), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 2, i32 5), !dbg define void @atomic16_cas_release(i16* %a) nounwind uwtable { entry: cmpxchg i16* %a, i16 0, i16 1 release monotonic, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 release acquire, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 release seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic16_cas_release ; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 0), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 2), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 3, i32 5), !dbg define void @atomic16_cas_acq_rel(i16* %a) nounwind uwtable { entry: + cmpxchg i16* %a, i16 0, i16 1 acq_rel monotonic, !dbg !7 cmpxchg i16* %a, i16 0, i16 1 acq_rel acquire, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 acq_rel seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic16_cas_acq_rel +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 0), !dbg ; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 2), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 4, i32 5), !dbg define void @atomic16_cas_seq_cst(i16* %a) nounwind uwtable { entry: + cmpxchg i16* %a, i16 0, i16 1 seq_cst monotonic, !dbg !7 + cmpxchg i16* %a, i16 0, i16 1 seq_cst acquire, !dbg !7 cmpxchg i16* %a, i16 0, i16 1 seq_cst seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic16_cas_seq_cst +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 0), !dbg +; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 2), !dbg ; CHECK: call i16 @__tsan_atomic16_compare_exchange_val(i16* %a, i16 0, i16 1, i32 5, i32 5), !dbg define i32 @atomic32_load_unordered(i32* %a) nounwind uwtable { @@ -1117,41 +1157,61 @@ define void @atomic32_cas_monotonic(i32* %a) nounwind uwtable { entry: cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 monotonic acquire, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 monotonic seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic32_cas_monotonic ; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 0), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 2), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 0, i32 5), !dbg define void @atomic32_cas_acquire(i32* %a) nounwind uwtable { entry: + cmpxchg i32* %a, i32 0, i32 1 acquire monotonic, !dbg !7 cmpxchg i32* %a, i32 0, i32 1 acquire acquire, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 acquire seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic32_cas_acquire +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 0), !dbg ; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 2), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 2, i32 5), !dbg define void @atomic32_cas_release(i32* %a) nounwind uwtable { entry: cmpxchg i32* %a, i32 0, i32 1 release monotonic, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 release acquire, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 release seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic32_cas_release ; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 0), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 2), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 3, i32 5), !dbg define void @atomic32_cas_acq_rel(i32* %a) nounwind uwtable { entry: + cmpxchg i32* %a, i32 0, i32 1 acq_rel monotonic, !dbg !7 cmpxchg i32* %a, i32 0, i32 1 acq_rel acquire, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 acq_rel seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic32_cas_acq_rel +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 0), !dbg ; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 2), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 4, i32 5), !dbg define void @atomic32_cas_seq_cst(i32* %a) nounwind uwtable { entry: + cmpxchg i32* %a, i32 0, i32 1 seq_cst monotonic, !dbg !7 + cmpxchg i32* %a, i32 0, i32 1 seq_cst acquire, !dbg !7 cmpxchg i32* %a, i32 0, i32 1 seq_cst seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic32_cas_seq_cst +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 0), !dbg +; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 2), !dbg ; CHECK: call i32 @__tsan_atomic32_compare_exchange_val(i32* %a, i32 0, i32 1, i32 5, i32 5), !dbg define i64 @atomic64_load_unordered(i64* %a) nounwind uwtable { @@ -1521,41 +1581,61 @@ define void @atomic64_cas_monotonic(i64* %a) nounwind uwtable { entry: cmpxchg i64* %a, i64 0, i64 1 monotonic monotonic, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 monotonic acquire, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 monotonic seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic64_cas_monotonic ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 0), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 2), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 0, i32 5), !dbg define void @atomic64_cas_acquire(i64* %a) nounwind uwtable { entry: + cmpxchg i64* %a, i64 0, i64 1 acquire monotonic, !dbg !7 cmpxchg i64* %a, i64 0, i64 1 acquire acquire, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 acquire seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic64_cas_acquire +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 0), !dbg ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 2), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 2, i32 5), !dbg define void @atomic64_cas_release(i64* %a) nounwind uwtable { entry: cmpxchg i64* %a, i64 0, i64 1 release monotonic, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 release acquire, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 release seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic64_cas_release ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 0), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 2), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 3, i32 5), !dbg define void @atomic64_cas_acq_rel(i64* %a) nounwind uwtable { entry: + cmpxchg i64* %a, i64 0, i64 1 acq_rel monotonic, !dbg !7 cmpxchg i64* %a, i64 0, i64 1 acq_rel acquire, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 acq_rel seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic64_cas_acq_rel +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 0), !dbg ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 2), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 4, i32 5), !dbg define void @atomic64_cas_seq_cst(i64* %a) nounwind uwtable { entry: + cmpxchg i64* %a, i64 0, i64 1 seq_cst monotonic, !dbg !7 + cmpxchg i64* %a, i64 0, i64 1 seq_cst acquire, !dbg !7 cmpxchg i64* %a, i64 0, i64 1 seq_cst seq_cst, !dbg !7 ret void, !dbg !7 } ; CHECK-LABEL: atomic64_cas_seq_cst +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 0), !dbg +; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 2), !dbg ; CHECK: call i64 @__tsan_atomic64_compare_exchange_val(i64* %a, i64 0, i64 1, i32 5, i32 5), !dbg define void @atomic64_cas_seq_cst_ptr_ty(i8** %a, i8* %v1, i8* %v2) nounwind uwtable {