diff --git a/compiler-rt/lib/tsan/rtl/tsan_clock.h b/compiler-rt/lib/tsan/rtl/tsan_clock.h --- a/compiler-rt/lib/tsan/rtl/tsan_clock.h +++ b/compiler-rt/lib/tsan/rtl/tsan_clock.h @@ -17,7 +17,7 @@ namespace __tsan { -typedef DenseSlabAlloc ClockAlloc; +typedef DenseSlabAlloc ClockAlloc; typedef DenseSlabAllocCache ClockCache; // The clock that lives in sync variables (mutexes, atomics, etc). diff --git a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h --- a/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h +++ b/compiler-rt/lib/tsan/rtl/tsan_dense_alloc.h @@ -29,28 +29,40 @@ typedef u32 IndexT; uptr pos; IndexT cache[kSize]; - template friend class DenseSlabAlloc; + template + friend class DenseSlabAlloc; }; -template +template class DenseSlabAlloc { public: typedef DenseSlabAllocCache Cache; typedef typename Cache::IndexT IndexT; - explicit DenseSlabAlloc(const char *name) { - // Check that kL1Size and kL2Size are sane. - CHECK_EQ(kL1Size & (kL1Size - 1), 0); - CHECK_EQ(kL2Size & (kL2Size - 1), 0); - CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size); - // Check that it makes sense to use the dense alloc. - CHECK_GE(sizeof(T), sizeof(IndexT)); - internal_memset(map_, 0, sizeof(map_)); + static_assert((kL1Size & (kL1Size - 1)) == 0, + "kL1Size must be a power-of-two"); + static_assert((kL2Size & (kL2Size - 1)) == 0, + "kL2Size must be a power-of-two"); + static_assert((kL1Size * kL2Size) <= (1ull << (sizeof(IndexT) * 8)), + "kL1Size/kL2Size are too large"); + static_assert(((kL1Size * kL2Size - 1) & kReserved) == 0, + "reserved bits don't fit"); + static_assert(sizeof(T) > sizeof(IndexT), + "it doesn't make sense to use dense alloc"); + + explicit DenseSlabAlloc(LinkerInitialized, const char *name) { freelist_ = 0; fillpos_ = 0; name_ = name; } + explicit DenseSlabAlloc(const char *name) + : DenseSlabAlloc(LINKER_INITIALIZED, name) { + // It can be very large. + // Don't page it in for linker initialized objects. + internal_memset(map_, 0, sizeof(map_)); + } + ~DenseSlabAlloc() { for (uptr i = 0; i < kL1Size; i++) { if (map_[i] != 0) diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -114,17 +114,17 @@ #endif Context::Context() - : initialized() - , report_mtx(MutexTypeReport, StatMtxReport) - , nreported() - , nmissed_expected() - , thread_registry(new(thread_registry_placeholder) ThreadRegistry( - CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) - , racy_mtx(MutexTypeRacy, StatMtxRacy) - , racy_stacks() - , racy_addresses() - , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) - , clock_alloc("clock allocator") { + : initialized(), + report_mtx(MutexTypeReport, StatMtxReport), + nreported(), + nmissed_expected(), + thread_registry(new (thread_registry_placeholder) ThreadRegistry( + CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)), + racy_mtx(MutexTypeRacy, StatMtxRacy), + racy_stacks(), + racy_addresses(), + fired_suppressions_mtx(MutexTypeFired, StatMtxFired), + clock_alloc(LINKER_INITIALIZED, "clock allocator") { fired_suppressions.reserve(8); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.h b/compiler-rt/lib/tsan/rtl/tsan_sync.h --- a/compiler-rt/lib/tsan/rtl/tsan_sync.h +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.h @@ -130,8 +130,8 @@ static const u32 kFlagMask = 3u << 30; static const u32 kFlagBlock = 1u << 30; static const u32 kFlagSync = 2u << 30; - typedef DenseSlabAlloc BlockAlloc; - typedef DenseSlabAlloc SyncAlloc; + typedef DenseSlabAlloc BlockAlloc; + typedef DenseSlabAlloc SyncAlloc; BlockAlloc block_alloc_; SyncAlloc sync_alloc_; atomic_uint64_t uid_gen_; diff --git a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_sync.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_sync.cpp @@ -53,8 +53,8 @@ } MetaMap::MetaMap() - : block_alloc_("heap block allocator") - , sync_alloc_("sync allocator") { + : block_alloc_(LINKER_INITIALIZED, "heap block allocator"), + sync_alloc_(LINKER_INITIALIZED, "sync allocator") { atomic_store(&uid_gen_, 0, memory_order_relaxed); } diff --git a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt --- a/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt +++ b/compiler-rt/lib/tsan/tests/unit/CMakeLists.txt @@ -1,5 +1,6 @@ set(TSAN_UNIT_TEST_SOURCES tsan_clock_test.cpp + tsan_dense_alloc_test.cpp tsan_flags_test.cpp tsan_mman_test.cpp tsan_mutex_test.cpp diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_dense_alloc_test.cpp @@ -21,28 +21,29 @@ namespace __tsan { TEST(DenseSlabAlloc, Basic) { - typedef DenseSlabAlloc Alloc; + typedef u64 T; + typedef DenseSlabAlloc Alloc; typedef Alloc::Cache Cache; typedef Alloc::IndexT IndexT; - const int N = 1000; + const T N = 1000; - Alloc alloc; + Alloc alloc("test"); Cache cache; alloc.InitCache(&cache); IndexT blocks[N]; for (int ntry = 0; ntry < 3; ntry++) { - for (int i = 0; i < N; i++) { + for (T i = 0; i < N; i++) { IndexT idx = alloc.Alloc(&cache); blocks[i] = idx; EXPECT_NE(idx, 0U); - int *v = alloc.Map(idx); + T *v = alloc.Map(idx); *v = i; } - for (int i = 0; i < N; i++) { + for (T i = 0; i < N; i++) { IndexT idx = blocks[i]; - int *v = alloc.Map(idx); + T *v = alloc.Map(idx); EXPECT_EQ(*v, i); alloc.Free(&cache, idx); }