diff --git a/llvm/include/llvm/Support/ThreadSafeAllocator.h b/llvm/include/llvm/Support/ThreadSafeAllocator.h new file mode 100644 --- /dev/null +++ b/llvm/include/llvm/Support/ThreadSafeAllocator.h @@ -0,0 +1,63 @@ +//===- ThreadSafeAllocator.h ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_SUPPORT_THREADSAFEALLOCATOR_H +#define LLVM_SUPPORT_THREADSAFEALLOCATOR_H + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/STLFunctionalExtras.h" +#include "llvm/Support/Allocator.h" +#include + +namespace llvm { + +/// Thread-safe allocator adaptor. Uses a spin lock on the assumption that +/// contention here is extremely rare. +/// +/// TODO: Using a spin lock on every allocation can be quite expensive when +/// contention is high. Since this is mainly used for BumpPtrAllocator and +/// SpecificBumpPtrAllocator, it'd be better to have a specific thread-safe +/// BumpPtrAllocator implementation that only use a fair lock when allocating a +/// new slab but otherwise using atomic and be lock-free. +template class ThreadSafeAllocator { + struct LockGuard { + LockGuard(std::atomic_flag &Flag) : Flag(Flag) { + if (LLVM_UNLIKELY(Flag.test_and_set(std::memory_order_acquire))) + while (Flag.test_and_set(std::memory_order_acquire)) { + } + } + ~LockGuard() { Flag.clear(std::memory_order_release); } + std::atomic_flag &Flag; + }; + +public: + auto Allocate(size_t N) { + return applyLocked([N](AllocatorType &Alloc) { return Alloc.Allocate(N); }); + } + + auto Allocate(size_t Size, size_t Align) { + return applyLocked([Size, Align](AllocatorType &Alloc) { + return Alloc.Allocate(Size, Align); + }); + } + + template >::result_t> + T applyLocked(FnT Fn) { + LockGuard Lock(Flag); + return Fn(Alloc); + } + +private: + AllocatorType Alloc; + std::atomic_flag Flag = ATOMIC_FLAG_INIT; +}; + +} // namespace llvm + +#endif // LLVM_SUPPORT_THREADSAFEALLOCATOR_H diff --git a/llvm/unittests/Support/CMakeLists.txt b/llvm/unittests/Support/CMakeLists.txt --- a/llvm/unittests/Support/CMakeLists.txt +++ b/llvm/unittests/Support/CMakeLists.txt @@ -80,6 +80,7 @@ TarWriterTest.cpp TaskQueueTest.cpp ThreadPool.cpp + ThreadSafeAllocatorTest.cpp Threading.cpp TimerTest.cpp TimeProfilerTest.cpp diff --git a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp new file mode 100644 --- /dev/null +++ b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp @@ -0,0 +1,138 @@ +//===- llvm/unittest/Support/ThreadSafeAllocatorTest.cpp ------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/ThreadSafeAllocator.h" +#include "llvm/Config/llvm-config.h" +#include "llvm/Support/ThreadPool.h" +#include "gtest/gtest.h" +#include +#include + +using namespace llvm; + +namespace { + +struct AllocCondition { + std::mutex BusyLock, EndLock; + std::condition_variable Busy, End; + bool IsBusy = false, IsEnd = false; + std::atomic BytesAllocated = 0; + + void startAllocation() { + { + std::lock_guard Lock(BusyLock); + IsBusy = true; + } + Busy.notify_all(); + } + void waitAllocationStarted() { + std::unique_lock LBusy(BusyLock); + Busy.wait(LBusy, [&]() { return IsBusy; }); + IsBusy = false; + } + void finishAllocation() { + { + std::lock_guard Lock(EndLock); + IsEnd = true; + } + End.notify_all(); + } + void waitAllocationFinished() { + std::unique_lock LEnd(EndLock); + // Wait for end state. + End.wait(LEnd, [&]() { return IsEnd; }); + IsEnd = false; + } +}; + +class MockAllocator : public AllocatorBase { +public: + MockAllocator() = default; + + void *Allocate(size_t Size, size_t Alignment) { + C.startAllocation(); + C.waitAllocationFinished(); + C.BytesAllocated += Size; + return Reserved; + } + + AllocCondition &getAllocCondition() { return C; } + +private: + char Reserved[16]; + AllocCondition C; +}; + +} // namespace + +#if (LLVM_ENABLE_THREADS) +TEST(ThreadSafeAllocatorTest, AllocWait) { + ThreadSafeAllocator Alloc; + AllocCondition *C; + // Get the allocation from the allocator first since this requires a lock. + Alloc.applyLocked( + [&](MockAllocator &Alloc) { C = &Alloc.getAllocCondition(); }); + ThreadPool Threads; + // First allocation of 1 byte. + Threads.async([&Alloc]() { + char *P = (char *)Alloc.Allocate(1, alignof(char)); + P[0] = 0; + }); + // No allocation yet. + EXPECT_EQ(C->BytesAllocated, 0u); + C->waitAllocationStarted(); // wait till 1st alloocation starts. + // Second allocation of 2 bytes. + Threads.async([&Alloc]() { + char *P = (char *)Alloc.Allocate(2, alignof(char)); + P[1] = 0; + }); + C->finishAllocation(); // finish 1st allocation. + + C->waitAllocationStarted(); // wait till 2nd allocation starts. + // still 1 byte allocated since 2nd allocation is not finished yet. + EXPECT_EQ(C->BytesAllocated, 1u); + C->finishAllocation(); // finish 2nd allocation. + + Threads.wait(); // all allocations done. + EXPECT_EQ(C->BytesAllocated, 3u); +} + +TEST(ThreadSafeAllocatorTest, AllocWithAlign) { + ThreadSafeAllocator Alloc; + ThreadPool Threads; + + for (unsigned Index = 1; Index < 100; ++Index) + Threads.async( + [&Alloc](unsigned I) { + int *P = (int *)Alloc.Allocate(sizeof(int) * I, alignof(int)); + P[I - 1] = I; + }, + Index); + + Threads.wait(); + + Alloc.applyLocked([](BumpPtrAllocator &Alloc) { + EXPECT_EQ(4950U * sizeof(int), Alloc.getBytesAllocated()); + }); +} + +TEST(ThreadSafeAllocatorTest, SpecificBumpPtrAllocator) { + ThreadSafeAllocator> Alloc; + ThreadPool Threads; + + for (unsigned Index = 1; Index < 100; ++Index) + Threads.async( + [&Alloc](unsigned I) { + int *P = Alloc.Allocate(I); + P[I - 1] = I; + }, + Index); + + Threads.wait(); +} +#endif