Index: lldb/trunk/include/lldb/Host/TaskPool.h =================================================================== --- lldb/trunk/include/lldb/Host/TaskPool.h +++ lldb/trunk/include/lldb/Host/TaskPool.h @@ -18,6 +18,8 @@ #include // for mutex, unique_lock, condition_variable #include // for forward, result_of, move +namespace lldb_private { + // Global TaskPool class for running tasks in parallel on a set of worker thread // created the first // time the task pool is used. The TaskPool provide no guarantee about the order @@ -89,4 +91,8 @@ void TaskMapOverInt(size_t begin, size_t end, const llvm::function_ref &func); +unsigned GetHardwareConcurrencyHint(); + +} // namespace lldb_private + #endif // #ifndef utility_TaskPool_h_ Index: lldb/trunk/source/Host/common/TaskPool.cpp =================================================================== --- lldb/trunk/source/Host/common/TaskPool.cpp +++ lldb/trunk/source/Host/common/TaskPool.cpp @@ -14,6 +14,8 @@ #include // for queue #include // for thread +namespace lldb_private { + namespace { class TaskPoolImpl { public: @@ -46,13 +48,20 @@ TaskPoolImpl::TaskPoolImpl() : m_thread_count(0) {} +unsigned GetHardwareConcurrencyHint() { + // std::thread::hardware_concurrency may return 0 + // if the value is not well defined or not computable. + static const unsigned g_hardware_concurrency = + std::max(1u, std::thread::hardware_concurrency()); + return g_hardware_concurrency; +} + void TaskPoolImpl::AddTask(std::function &&task_fn) { - static const uint32_t max_threads = std::thread::hardware_concurrency(); const size_t min_stack_size = 8 * 1024 * 1024; std::unique_lock lock(m_tasks_mutex); m_tasks.emplace(std::move(task_fn)); - if (m_thread_count < max_threads) { + if (m_thread_count < GetHardwareConcurrencyHint()) { m_thread_count++; // Note that this detach call needs to happen with the m_tasks_mutex held. // This prevents the thread @@ -77,7 +86,7 @@ break; } - std::function f = pool->m_tasks.front(); + std::function f = std::move(pool->m_tasks.front()); pool->m_tasks.pop(); lock.unlock(); @@ -87,10 +96,9 @@ void TaskMapOverInt(size_t begin, size_t end, const llvm::function_ref &func) { + const size_t num_workers = std::min(end, GetHardwareConcurrencyHint()); std::atomic idx{begin}; - size_t num_workers = - std::min(end, std::thread::hardware_concurrency()); - + auto wrapper = [&idx, end, &func]() { while (true) { size_t i = idx.fetch_add(1); @@ -107,3 +115,6 @@ for (size_t i = 0; i < num_workers; i++) futures[i].wait(); } + +} // namespace lldb_private + Index: lldb/trunk/unittests/Host/TaskPoolTest.cpp =================================================================== --- lldb/trunk/unittests/Host/TaskPoolTest.cpp +++ lldb/trunk/unittests/Host/TaskPoolTest.cpp @@ -2,6 +2,8 @@ #include "lldb/Host/TaskPool.h" +using namespace lldb_private; + TEST(TaskPoolTest, AddTask) { auto fn = [](int x) { return x * x + 1; };