Index: llvm/trunk/include/llvm/Support/ThreadPool.h =================================================================== --- llvm/trunk/include/llvm/Support/ThreadPool.h +++ llvm/trunk/include/llvm/Support/ThreadPool.h @@ -35,17 +35,8 @@ /// for some work to become available. class ThreadPool { public: -#ifndef _MSC_VER - using VoidTy = void; using TaskTy = std::function; using PackagedTaskTy = std::packaged_task; -#else - // MSVC 2013 has a bug and can't use std::packaged_task; - // We force it to use bool(bool) instead. - using VoidTy = bool; - using TaskTy = std::function; - using PackagedTaskTy = std::packaged_task; -#endif /// Construct a pool with the number of core available on the system (or /// whatever the value returned by std::thread::hardware_concurrency() is). @@ -60,30 +51,17 @@ /// Asynchronous submission of a task to the pool. The returned future can be /// used to wait for the task to finish and is *non-blocking* on destruction. template - inline std::shared_future async(Function &&F, Args &&... ArgList) { + inline std::shared_future async(Function &&F, Args &&... ArgList) { auto Task = std::bind(std::forward(F), std::forward(ArgList)...); -#ifndef _MSC_VER return asyncImpl(std::move(Task)); -#else - // This lambda has to be marked mutable because MSVC 2013's std::bind call - // operator isn't const qualified. - return asyncImpl([Task](VoidTy) mutable -> VoidTy { - Task(); - return VoidTy(); - }); -#endif } /// Asynchronous submission of a task to the pool. The returned future can be /// used to wait for the task to finish and is *non-blocking* on destruction. template - inline std::shared_future async(Function &&F) { -#ifndef _MSC_VER + inline std::shared_future async(Function &&F) { return asyncImpl(std::forward(F)); -#else - return asyncImpl([F] (VoidTy) -> VoidTy { F(); return VoidTy(); }); -#endif } /// Blocking wait for all the threads to complete and the queue to be empty. @@ -93,7 +71,7 @@ private: /// Asynchronous submission of a task to the pool. The returned future can be /// used to wait for the task to finish and is *non-blocking* on destruction. - std::shared_future asyncImpl(TaskTy F); + std::shared_future asyncImpl(TaskTy F); /// Threads in flight std::vector Threads; Index: llvm/trunk/lib/Support/ThreadPool.cpp =================================================================== --- llvm/trunk/lib/Support/ThreadPool.cpp +++ llvm/trunk/lib/Support/ThreadPool.cpp @@ -53,11 +53,7 @@ Tasks.pop(); } // Run the task we just grabbed -#ifndef _MSC_VER Task(); -#else - Task(/* unused */ false); -#endif { // Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait() @@ -82,7 +78,7 @@ [&] { return !ActiveThreads && Tasks.empty(); }); } -std::shared_future ThreadPool::asyncImpl(TaskTy Task) { +std::shared_future ThreadPool::asyncImpl(TaskTy Task) { /// Wrap the Task in a packaged_task to return a future object. PackagedTaskTy PackagedTask(std::move(Task)); auto Future = PackagedTask.get_future(); @@ -128,25 +124,16 @@ while (!Tasks.empty()) { auto Task = std::move(Tasks.front()); Tasks.pop(); -#ifndef _MSC_VER - Task(); -#else - Task(/* unused */ false); -#endif + Task(); } } -std::shared_future ThreadPool::asyncImpl(TaskTy Task) { -#ifndef _MSC_VER +std::shared_future ThreadPool::asyncImpl(TaskTy Task) { // Get a Future with launch::deferred execution using std::async auto Future = std::async(std::launch::deferred, std::move(Task)).share(); // Wrap the future so that both ThreadPool::wait() can operate and the // returned future can be sync'ed on. PackagedTaskTy PackagedTask([Future]() { Future.get(); }); -#else - auto Future = std::async(std::launch::deferred, std::move(Task), false).share(); - PackagedTaskTy PackagedTask([Future](bool) -> bool { Future.get(); return false; }); -#endif Tasks.push(std::move(PackagedTask)); return Future; }