diff --git a/llvm/include/llvm/Support/ThreadSafeAllocator.h b/llvm/include/llvm/Support/ThreadSafeAllocator.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Support/ThreadSafeAllocator.h @@ -0,0 +1,60 @@ +//===- ThreadSafeAllocator.h ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_THREADSAFEALLOCATOR_H +#define LLVM_SUPPORT_THREADSAFEALLOCATOR_H + +#include "llvm/ADT/STLFunctionalExtras.h" +#include "llvm/Support/Allocator.h" +#include + +namespace llvm { + +/// Thread-safe allocator adaptor. Uses a spin lock on the assumption that +/// contention here is extremely rare. +/// +/// TODO: Using a spin lock on every allocation can be quite expensive when +/// contention is high. Since this is mainly used for BumpPtrAllocator and +/// SpecificBumpPtrAllocator, it'd be better to have a specific thread-safe +/// BumpPtrAllocator implementation that only use a fair lock when allocating a +/// new slab but otherwise using atomic and be lock-free. +template class ThreadSafeAllocator { + struct LockGuard { + LockGuard(std::atomic_flag &Flag) : Flag(Flag) { + if (LLVM_UNLIKELY(Flag.test_and_set(std::memory_order_acquire))) + while (Flag.test_and_set(std::memory_order_acquire)) { + } + } + ~LockGuard() { Flag.clear(std::memory_order_release); } + std::atomic_flag &Flag; + }; + +public: + auto Allocate(size_t N) { + LockGuard Lock(Flag); + return Alloc.Allocate(N); + } + + auto Allocate(size_t Size, size_t Align) { + LockGuard Lock(Flag); + return Alloc.Allocate(Size, Align); + } + + void applyLocked(llvm::function_ref Fn) { + LockGuard Lock(Flag); + Fn(Alloc); + } + +private: + AllocatorType Alloc; + std::atomic_flag Flag = ATOMIC_FLAG_INIT; +}; + +} // namespace llvm + +#endif // LLVM_SUPPORT_THREADSAFEALLOCATOR_H diff --git a/llvm/unittests/Support/CMakeLists.txt b/llvm/unittests/Support/CMakeLists.txt --- a/llvm/unittests/Support/CMakeLists.txt +++ b/llvm/unittests/Support/CMakeLists.txt @@ -83,6 +83,7 @@ TaskQueueTest.cpp ThreadLocalTest.cpp ThreadPool.cpp + ThreadSafeAllocatorTest.cpp Threading.cpp TimerTest.cpp TimeProfilerTest.cpp diff --git a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp new file mode 100644 --- /dev/null +++ b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp @@ -0,0 +1,73 @@ +//===- llvm/unittest/Support/ThreadSafeAllocatorTest.cpp ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/ThreadSafeAllocator.h" +#include "llvm/Support/ThreadPool.h" +#include "gtest/gtest.h" +#include +#include + +using namespace llvm; + +namespace { +class MockAllocator : public AllocatorBase { +public: + MockAllocator() = default; + + void *Allocate(size_t Size, size_t Alignment) { + auto NotExclusive = AllocFlag.test_and_set(); + (void)NotExclusive; + assert(!NotExclusive && "Not exclusive"); + BytesAllocated += Size; + usleep(5); // add some delay. + AllocFlag.clear(); + return Reserved; + } + unsigned getBytesAllocated() const { return BytesAllocated; } + +private: + unsigned BytesAllocated = 0; + int Reserved[100]; + std::atomic_flag AllocFlag = ATOMIC_FLAG_INIT; +}; + +} // namespace + +TEST(ThreadSafeAllocatorTest, AllocWithAlign) { + ThreadSafeAllocator Alloc; + ThreadPool Threads; + + for (unsigned Index = 1; Index < 100; ++Index) + Threads.async( + [&Alloc](unsigned I) { + int *P = (int *)Alloc.Allocate(sizeof(int) * I, alignof(int)); + P[I - 1] = I; + }, + Index); + + Threads.wait(); + + Alloc.applyLocked([](MockAllocator &Alloc) { + EXPECT_EQ(4950U * sizeof(int), Alloc.getBytesAllocated()); + }); +} + +TEST(ThreadSafeAllocatorTest, SpecificBumpPtrAllocator) { + ThreadSafeAllocator> Alloc; + ThreadPool Threads; + + for (unsigned Index = 1; Index < 100; ++Index) + Threads.async( + [&Alloc](unsigned I) { + int *P = Alloc.Allocate(I); + P[I - 1] = I; + }, + Index); + + Threads.wait(); +}