Index: include/llvm/Support/ThreadPool.h =================================================================== --- include/llvm/Support/ThreadPool.h +++ include/llvm/Support/ThreadPool.h @@ -16,23 +16,6 @@ #include "llvm/Support/thread.h" -#ifdef _MSC_VER -// concrt.h depends on eh.h for __uncaught_exception declaration -// even if we disable exceptions. -#include - -// Disable warnings from ppltasks.h transitively included by . -#pragma warning(push) -#pragma warning(disable:4530) -#pragma warning(disable:4062) -#endif - -#include - -#ifdef _MSC_VER -#pragma warning(pop) -#endif - #include #include #include @@ -50,17 +33,7 @@ /// for some work to become available. class ThreadPool { public: -#ifndef _MSC_VER - using VoidTy = void; using TaskTy = std::function; - using PackagedTaskTy = std::packaged_task; -#else - // MSVC 2013 has a bug and can't use std::packaged_task; - // We force it to use bool(bool) instead. - using VoidTy = bool; - using TaskTy = std::function; - using PackagedTaskTy = std::packaged_task; -#endif /// Construct a pool with the number of core available on the system (or /// whatever the value returned by std::thread::hardware_concurrency() is). @@ -72,49 +45,31 @@ /// Blocking destructor: the pool will wait for all the threads to complete. ~ThreadPool(); - /// Asynchronous submission of a task to the pool. The returned future can be - /// used to wait for the task to finish and is *non-blocking* on destruction. + /// Asynchronous submission of a task to the pool. template - inline std::shared_future async(Function &&F, Args &&... ArgList) { + inline void async(Function &&F, Args &&... ArgList) { auto Task = std::bind(std::forward(F), std::forward(ArgList)...); -#ifndef _MSC_VER - return asyncImpl(std::move(Task)); -#else - // This lambda has to be marked mutable because MSVC 2013's std::bind call - // operator isn't const qualified. - return asyncImpl([Task](VoidTy) mutable -> VoidTy { - Task(); - return VoidTy(); - }); -#endif + asyncImpl(std::move(Task)); } - /// Asynchronous submission of a task to the pool. The returned future can be - /// used to wait for the task to finish and is *non-blocking* on destruction. + /// Asynchronous submission of a task to the pool. template - inline std::shared_future async(Function &&F) { -#ifndef _MSC_VER - return asyncImpl(std::forward(F)); -#else - return asyncImpl([F] (VoidTy) -> VoidTy { F(); return VoidTy(); }); -#endif - } + inline void async(Function &&F) { asyncImpl([F] () { F(); }); } /// Blocking wait for all the threads to complete and the queue to be empty. /// It is an error to try to add new tasks while blocking on this call. void wait(); private: - /// Asynchronous submission of a task to the pool. The returned future can be - /// used to wait for the task to finish and is *non-blocking* on destruction. - std::shared_future asyncImpl(TaskTy F); + /// Asynchronous submission of a task to the pool. + void asyncImpl(TaskTy F); /// Threads in flight std::vector Threads; /// Tasks waiting for execution in the pool. - std::queue Tasks; + std::queue Tasks; /// Locking and signaling for accessing the Tasks queue. std::mutex QueueLock; Index: lib/Support/ThreadPool.cpp =================================================================== --- lib/Support/ThreadPool.cpp +++ lib/Support/ThreadPool.cpp @@ -31,7 +31,7 @@ for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) { Threads.emplace_back([&] { while (true) { - PackagedTaskTy Task; + TaskTy Task; { std::unique_lock LockGuard(QueueLock); // Wait for tasks to be pushed in the queue @@ -53,11 +53,7 @@ Tasks.pop(); } // Run the task we just grabbed -#ifndef _MSC_VER Task(); -#else - Task(/* unused */ false); -#endif { // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait() @@ -82,10 +78,7 @@ [&] { return !ActiveThreads && Tasks.empty(); }); } -std::shared_future ThreadPool::asyncImpl(TaskTy Task) { - /// Wrap the Task in a packaged_task to return a future object. - PackagedTaskTy PackagedTask(std::move(Task)); - auto Future = PackagedTask.get_future(); +void ThreadPool::asyncImpl(TaskTy Task) { { // Lock the queue and push the new task std::unique_lock LockGuard(QueueLock); @@ -93,10 +86,9 @@ // Don't allow enqueueing after disabling the pool assert(EnableFlag && "Queuing a thread during ThreadPool destruction"); - Tasks.push(std::move(PackagedTask)); + Tasks.push(std::move(Task)); } QueueCondition.notify_one(); - return Future.share(); } // The destructor joins all threads, waiting for completion. @@ -128,27 +120,12 @@ while (!Tasks.empty()) { auto Task = std::move(Tasks.front()); Tasks.pop(); -#ifndef _MSC_VER - Task(); -#else - Task(/* unused */ false); -#endif + Task(); } } -std::shared_future ThreadPool::asyncImpl(TaskTy Task) { -#ifndef _MSC_VER - // Get a Future with launch::deferred execution using std::async - auto Future = std::async(std::launch::deferred, std::move(Task)).share(); - // Wrap the future so that both ThreadPool::wait() can operate and the - // returned future can be sync'ed on. - PackagedTaskTy PackagedTask([Future]() { Future.get(); }); -#else - auto Future = std::async(std::launch::deferred, std::move(Task), false).share(); - PackagedTaskTy PackagedTask([Future](bool) -> bool { Future.get(); return false; }); -#endif - Tasks.push(std::move(PackagedTask)); - return Future; +void ThreadPool::asyncImpl(TaskTy Task) { + Tasks.push(std::move(Task)); } ThreadPool::~ThreadPool() { Index: unittests/Support/ThreadPool.cpp =================================================================== --- unittests/Support/ThreadPool.cpp +++ unittests/Support/ThreadPool.cpp @@ -131,22 +131,6 @@ ASSERT_EQ(2, i.load()); } -TEST_F(ThreadPoolTest, GetFuture) { - CHECK_UNSUPPORTED(); - ThreadPool Pool{2}; - std::atomic_int i{0}; - Pool.async([this, &i] { - waitForMainThread(); - ++i; - }); - // Force the future using get() - Pool.async([&i] { ++i; }).get(); - ASSERT_NE(2, i.load()); - setMainThreadReady(); - Pool.wait(); - ASSERT_EQ(2, i.load()); -} - TEST_F(ThreadPoolTest, PoolDestruction) { CHECK_UNSUPPORTED(); // Test that we are waiting on destruction