diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -249,9 +249,9 @@ // - unlinking the local stats from the global ones (destroying the cache does // the last two items). void commitBack(TSD *TSD) { - Quarantine.drain(&TSD->QuarantineCache, - QuarantineCallback(*this, TSD->Cache)); - TSD->Cache.destroy(&Stats); + Quarantine.drain(&TSD->getQuarantineCache(), + QuarantineCallback(*this, TSD->getCache())); + TSD->getCache().destroy(&Stats); } ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) { @@ -375,14 +375,14 @@ DCHECK_NE(ClassId, 0U); bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); - Block = TSD->Cache.allocate(ClassId); + Block = TSD->getCache().allocate(ClassId); // If the allocation failed, the most likely reason with a 32-bit primary // is the region being full. In that event, retry in each successively // larger class until it fits. If it fails to fit in the largest class, // fallback to the Secondary. if (UNLIKELY(!Block)) { while (ClassId < SizeClassMap::LargestClassId && !Block) - Block = TSD->Cache.allocate(++ClassId); + Block = TSD->getCache().allocate(++ClassId); if (!Block) ClassId = 0; } @@ -1168,7 +1168,7 @@ if (LIKELY(ClassId)) { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); - TSD->Cache.deallocate(ClassId, BlockBegin); + TSD->getCache().deallocate(ClassId, BlockBegin); if (UnlockRequired) TSD->unlock(); } else { @@ -1180,8 +1180,8 @@ } else { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); - Quarantine.put(&TSD->QuarantineCache, - QuarantineCallback(*this, TSD->Cache), Ptr, Size); + Quarantine.put(&TSD->getQuarantineCache(), + QuarantineCallback(*this, TSD->getCache()), Ptr, Size); if (UnlockRequired) TSD->unlock(); } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -447,9 +447,9 @@ bool UnlockRequired; auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); - EXPECT_TRUE(!TSD->Cache.isEmpty()); - TSD->Cache.drain(); - EXPECT_TRUE(TSD->Cache.isEmpty()); + EXPECT_TRUE(!TSD->getCache().isEmpty()); + TSD->getCache().drain(); + EXPECT_TRUE(TSD->getCache().isEmpty()); if (UnlockRequired) TSD->unlock(); } @@ -738,7 +738,7 @@ bool UnlockRequired; auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired); - TSD->Cache.drain(); + TSD->getCache().drain(); Allocator->releaseToOS(); } diff --git a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/tsd_test.cpp @@ -102,15 +102,15 @@ bool UnlockRequired; auto TSD = Registry->getTSDAndLock(&UnlockRequired); EXPECT_NE(TSD, nullptr); - EXPECT_EQ(TSD->Cache.Canary, 0U); + EXPECT_EQ(TSD->getCache().Canary, 0U); if (UnlockRequired) TSD->unlock(); Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false); TSD = Registry->getTSDAndLock(&UnlockRequired); EXPECT_NE(TSD, nullptr); - EXPECT_EQ(TSD->Cache.Canary, 0U); - memset(&TSD->Cache, 0x42, sizeof(TSD->Cache)); + EXPECT_EQ(TSD->getCache().Canary, 0U); + memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache())); if (UnlockRequired) TSD->unlock(); } @@ -141,14 +141,14 @@ // For an exclusive TSD, the cache should be empty. We cannot guarantee the // same for a shared TSD. if (!UnlockRequired) - EXPECT_EQ(TSD->Cache.Canary, 0U); + EXPECT_EQ(TSD->getCache().Canary, 0U); // Transform the thread id to a uptr to use it as canary. const scudo::uptr Canary = static_cast( std::hash{}(std::this_thread::get_id())); - TSD->Cache.Canary = Canary; + TSD->getCache().Canary = Canary; // Loop a few times to make sure that a concurrent thread isn't modifying it. for (scudo::uptr I = 0; I < 4096U; I++) - EXPECT_EQ(TSD->Cache.Canary, Canary); + EXPECT_EQ(TSD->getCache().Canary, Canary); if (UnlockRequired) TSD->unlock(); } diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h --- a/compiler-rt/lib/scudo/standalone/tsd.h +++ b/compiler-rt/lib/scudo/standalone/tsd.h @@ -25,9 +25,6 @@ namespace scudo { template struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { - // TODO: Add thread-safety annotation on `Cache` and `QuarantineCache`. - typename Allocator::CacheT Cache; - typename Allocator::QuarantineCacheT QuarantineCache; using ThisT = TSD; u8 DestructorIterations = 0; @@ -60,9 +57,29 @@ Instance->commitBack(this); } + // Ideally, we may want to assert that all the operations on + // Cache/QuarantineCache always have the `Mutex` acquired. However, the + // current architecture of accessing TSD is not easy to cooperate with the + // thread-safety analysis because of pointer aliasing. So now we just add the + // assertion on the getters of Cache/QuarantineCache. + // + // TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring + // TSD doesn't always require holding the lock. Add this assertion while the + // lock is always acquired. + typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) { + return Cache; + } + typename Allocator::QuarantineCacheT &getQuarantineCache() + ASSERT_CAPABILITY(Mutex) { + return QuarantineCache; + } + private: HybridMutex Mutex; atomic_uptr Precedence = {}; + + typename Allocator::CacheT Cache GUARDED_BY(Mutex); + typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex); }; } // namespace scudo