[scudo] Add thread-safety annotations on TSD data members

Ideally, we want to assert that all the operations on
Cache/QuarantineCache always have the `Mutex` acquired. However,
the current architecture of accessing TSD is not easy to cooperate
with the thread-safety analysis because of pointer aliasing. In
alternative, we add the getters for accessing TSD member and attach
proper thread-safety annotations on them.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D142151
This commit is contained in:
Chia-hung Duan 2023-02-15 01:30:45 +00:00
parent e7d3f43eaf
commit ae1bd3adf0
4 changed files with 38 additions and 21 deletions

View File

@ -249,9 +249,9 @@ public:
// - unlinking the local stats from the global ones (destroying the cache does
// the last two items).
void commitBack(TSD<ThisT> *TSD) {
Quarantine.drain(&TSD->QuarantineCache,
QuarantineCallback(*this, TSD->Cache));
TSD->Cache.destroy(&Stats);
Quarantine.drain(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()));
TSD->getCache().destroy(&Stats);
}
ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
@ -375,14 +375,14 @@ public:
DCHECK_NE(ClassId, 0U);
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
Block = TSD->Cache.allocate(ClassId);
Block = TSD->getCache().allocate(ClassId);
// If the allocation failed, the most likely reason with a 32-bit primary
// is the region being full. In that event, retry in each successively
// larger class until it fits. If it fails to fit in the largest class,
// fallback to the Secondary.
if (UNLIKELY(!Block)) {
while (ClassId < SizeClassMap::LargestClassId && !Block)
Block = TSD->Cache.allocate(++ClassId);
Block = TSD->getCache().allocate(++ClassId);
if (!Block)
ClassId = 0;
}
@ -1168,7 +1168,7 @@ private:
if (LIKELY(ClassId)) {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
TSD->Cache.deallocate(ClassId, BlockBegin);
TSD->getCache().deallocate(ClassId, BlockBegin);
if (UnlockRequired)
TSD->unlock();
} else {
@ -1180,8 +1180,8 @@ private:
} else {
bool UnlockRequired;
auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
Quarantine.put(&TSD->QuarantineCache,
QuarantineCallback(*this, TSD->Cache), Ptr, Size);
Quarantine.put(&TSD->getQuarantineCache(),
QuarantineCallback(*this, TSD->getCache()), Ptr, Size);
if (UnlockRequired)
TSD->unlock();
}

View File

@ -447,9 +447,9 @@ SCUDO_TYPED_TEST(ScudoCombinedTest, CacheDrain) NO_THREAD_SAFETY_ANALYSIS {
bool UnlockRequired;
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
EXPECT_TRUE(!TSD->Cache.isEmpty());
TSD->Cache.drain();
EXPECT_TRUE(TSD->Cache.isEmpty());
EXPECT_TRUE(!TSD->getCache().isEmpty());
TSD->getCache().drain();
EXPECT_TRUE(TSD->getCache().isEmpty());
if (UnlockRequired)
TSD->unlock();
}
@ -738,7 +738,7 @@ TEST(ScudoCombinedTest, BasicTrustyConfig) {
bool UnlockRequired;
auto *TSD = Allocator->getTSDRegistry()->getTSDAndLock(&UnlockRequired);
TSD->Cache.drain();
TSD->getCache().drain();
Allocator->releaseToOS();
}

View File

@ -102,15 +102,15 @@ template <class AllocatorT> static void testRegistry() {
bool UnlockRequired;
auto TSD = Registry->getTSDAndLock(&UnlockRequired);
EXPECT_NE(TSD, nullptr);
EXPECT_EQ(TSD->Cache.Canary, 0U);
EXPECT_EQ(TSD->getCache().Canary, 0U);
if (UnlockRequired)
TSD->unlock();
Registry->initThreadMaybe(Allocator.get(), /*MinimalInit=*/false);
TSD = Registry->getTSDAndLock(&UnlockRequired);
EXPECT_NE(TSD, nullptr);
EXPECT_EQ(TSD->Cache.Canary, 0U);
memset(&TSD->Cache, 0x42, sizeof(TSD->Cache));
EXPECT_EQ(TSD->getCache().Canary, 0U);
memset(&TSD->getCache(), 0x42, sizeof(TSD->getCache()));
if (UnlockRequired)
TSD->unlock();
}
@ -141,14 +141,14 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
// For an exclusive TSD, the cache should be empty. We cannot guarantee the
// same for a shared TSD.
if (!UnlockRequired)
EXPECT_EQ(TSD->Cache.Canary, 0U);
EXPECT_EQ(TSD->getCache().Canary, 0U);
// Transform the thread id to a uptr to use it as canary.
const scudo::uptr Canary = static_cast<scudo::uptr>(
std::hash<std::thread::id>{}(std::this_thread::get_id()));
TSD->Cache.Canary = Canary;
TSD->getCache().Canary = Canary;
// Loop a few times to make sure that a concurrent thread isn't modifying it.
for (scudo::uptr I = 0; I < 4096U; I++)
EXPECT_EQ(TSD->Cache.Canary, Canary);
EXPECT_EQ(TSD->getCache().Canary, Canary);
if (UnlockRequired)
TSD->unlock();
}

View File

@ -25,9 +25,6 @@
namespace scudo {
template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
// TODO: Add thread-safety annotation on `Cache` and `QuarantineCache`.
typename Allocator::CacheT Cache;
typename Allocator::QuarantineCacheT QuarantineCache;
using ThisT = TSD<Allocator>;
u8 DestructorIterations = 0;
@ -60,9 +57,29 @@ template <class Allocator> struct alignas(SCUDO_CACHE_LINE_SIZE) TSD {
Instance->commitBack(this);
}
// Ideally, we may want to assert that all the operations on
// Cache/QuarantineCache always have the `Mutex` acquired. However, the
// current architecture of accessing TSD is not easy to cooperate with the
// thread-safety analysis because of pointer aliasing. So now we just add the
// assertion on the getters of Cache/QuarantineCache.
//
// TODO(chiahungduan): Ideally, we want to do `Mutex.assertHeld` but acquiring
// TSD doesn't always require holding the lock. Add this assertion while the
// lock is always acquired.
typename Allocator::CacheT &getCache() ASSERT_CAPABILITY(Mutex) {
return Cache;
}
typename Allocator::QuarantineCacheT &getQuarantineCache()
ASSERT_CAPABILITY(Mutex) {
return QuarantineCache;
}
private:
HybridMutex Mutex;
atomic_uptr Precedence = {};
typename Allocator::CacheT Cache GUARDED_BY(Mutex);
typename Allocator::QuarantineCacheT QuarantineCache GUARDED_BY(Mutex);
};
} // namespace scudo