Index: openmp/runtime/CMakeLists.txt =================================================================== --- openmp/runtime/CMakeLists.txt +++ openmp/runtime/CMakeLists.txt @@ -288,6 +288,9 @@ set (LIBOMP_USE_VERSION_SYMBOLS FALSE) endif() +# Unshackled task support defaults to OFF +set(LIBOMP_USE_UNSHACKLED_TASK FALSE CACHE BOOL "Use unshackled task?") + # OMPT-support defaults to ON for OpenMP 5.0+ and if the requirements in # cmake/config-ix.cmake are fulfilled. set(OMPT_DEFAULT FALSE) Index: openmp/runtime/src/kmp.h =================================================================== --- openmp/runtime/src/kmp.h +++ openmp/runtime/src/kmp.h @@ -2239,7 +2239,10 @@ unsigned priority_specified : 1; /* set if the compiler provides priority setting for the task */ unsigned detachable : 1; /* 1 == can detach */ - unsigned reserved : 9; /* reserved for compiler use */ +#if USE_UNSHACKLED_TASK + unsigned unshackled : 1; /* 1 == unshackled task */ +#endif + unsigned reserved : 8; /* reserved for compiler use */ /* Library flags */ /* Total library flags must be 16 bits */ unsigned tasktype : 1; /* task is either explicit(1) or implicit (0) */ @@ -2324,6 +2327,10 @@ kmp_task_stack_t td_susp_tied_tasks; // Stack of suspended tied tasks for task // scheduling constraint #endif // BUILD_TIED_TASK_STACK +#if USE_UNSHACKLED_TASK + // Lock for per-thread operation by unshackled thread like memory allocation + kmp_bootstrap_lock_t td_thread_lock; +#endif } kmp_base_thread_data_t; #define TASK_DEQUE_BITS 8 // Used solely to define INITIAL_TASK_DEQUE_SIZE @@ -2822,6 +2829,7 @@ extern volatile int __kmp_init_monitor; #endif extern volatile int __kmp_init_user_locks; +extern volatile int __kmp_init_unshackled_threads; extern int __kmp_init_counter; extern int __kmp_root_counter; extern int __kmp_version; @@ -3052,7 +3060,9 @@ static inline bool KMP_UBER_GTID(int gtid) { KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN); - KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity); + KMP_DEBUG_ASSERT(gtid < __kmp_init_unshackled_threads + ? 2 * __kmp_threads_capacity + : __kmp_threads_capacity); return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] && __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread); } @@ -3910,6 +3920,30 @@ extern void __kmp_omp_display_env(int verbose); +#if USE_UNSHACKLED_TASK +// Master thread of unshackled team +extern kmp_info_t *__kmp_unshackled_master_thread; +// Descriptors for the unshackled threads +extern kmp_info_t **__kmp_unshackled_threads; +extern int __kmp_unshackled_threads_num; + +extern void __kmp_unshackled_threads_initz_routine(); +extern void __kmp_initialize_unshackled_threads(); +extern void __kmp_do_initialize_unshackled_threads(); +extern void __kmp_unshackled_threads_initz_wait(); +extern void __kmp_unshackled_initz_release(); +extern void __kmp_unshackled_master_thread_wait(); +extern void __kmp_unshackled_worker_thread_wait(); +extern void __kmp_unshackled_worker_thread_signal(); + +// Check whether a given thread is an unshackled thread +#define KMP_UNSHACKLED_THREAD(gtid) ((gtid) >= __kmp_threads_capacity) +// Map a gtid to an unshackled thread. The first unshackled thread, a.k.a master +// thread, is skipped. +#define KMP_GTID_TO_SHADOW_GTID(gtid) \ + ((gtid) % (__kmp_unshackled_threads_num - 1) + 1) +#endif + #ifdef __cplusplus } #endif Index: openmp/runtime/src/kmp_config.h.cmake =================================================================== --- openmp/runtime/src/kmp_config.h.cmake +++ openmp/runtime/src/kmp_config.h.cmake @@ -44,6 +44,8 @@ #define OMPT_DEBUG LIBOMP_OMPT_DEBUG #cmakedefine01 LIBOMP_OMPT_SUPPORT #define OMPT_SUPPORT LIBOMP_OMPT_SUPPORT +#cmakedefine01 LIBOMP_USE_UNSHACKLED_TASK +#define USE_UNSHACKLED_TASK LIBOMP_USE_UNSHACKLED_TASK #cmakedefine01 LIBOMP_OMPT_OPTIONAL #define OMPT_OPTIONAL LIBOMP_OMPT_OPTIONAL #cmakedefine01 LIBOMP_USE_ADAPTIVE_LOCKS Index: openmp/runtime/src/kmp_global.cpp =================================================================== --- openmp/runtime/src/kmp_global.cpp +++ openmp/runtime/src/kmp_global.cpp @@ -51,6 +51,9 @@ 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ #endif volatile int __kmp_init_user_locks = FALSE; +#if USE_UNSHACKLED_TASK +volatile int __kmp_init_unshackled_threads = FALSE; +#endif /* list of address of allocated caches for commons */ kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; Index: openmp/runtime/src/kmp_runtime.cpp =================================================================== --- openmp/runtime/src/kmp_runtime.cpp +++ openmp/runtime/src/kmp_runtime.cpp @@ -3611,6 +3611,12 @@ serial initialization may be not a real initial thread). */ capacity = __kmp_threads_capacity; +#if USE_UNSHACKLED_TASK + if (__kmp_init_unshackled_threads) { + // The capacity doubles if we have unshackled threads + capacity *= 2; + } +#endif if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) { --capacity; } @@ -3627,15 +3633,27 @@ } } - /* find an available thread slot */ - /* Don't reassign the zero slot since we need that to only be used by initial - thread */ - for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL; - gtid++) - ; - KA_TRACE(1, - ("__kmp_register_root: found slot in threads array: T#%d\n", gtid)); - KMP_ASSERT(gtid < __kmp_threads_capacity); + if (!__kmp_init_unshackled_threads) { + /* find an available thread slot */ + /* Don't reassign the zero slot since we need that to only be used by + initial thread */ + for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL; + gtid++) + ; + KA_TRACE( + 1, ("__kmp_register_root: found slot in threads array: T#%d\n", gtid)); + KMP_ASSERT(gtid < __kmp_threads_capacity); + } else { + /* find an available thread slot */ + /* Don't reassign the zero slot since we need that to only be used by + initial thread */ + for (gtid = __kmp_threads_capacity; TCR_PTR(__kmp_threads[gtid]) != NULL; + gtid++) + ; + KA_TRACE( + 1, ("__kmp_register_root: found slot in threads array: T#%d\n", gtid)); + KMP_ASSERT(gtid < 2 * __kmp_threads_capacity); + } /* update global accounting */ __kmp_all_nth++; @@ -4292,9 +4310,23 @@ #endif KMP_MB(); - for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) { - KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity); + +#if USE_UNSHACKLED_TASK + // If we're initializing the unshackled threads, the start point is at the end + // of regular threads array, a.k.a the start of unshackled threads array + if (__kmp_init_unshackled_threads) { + for (new_gtid = __kmp_threads_capacity; + TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) { + KMP_DEBUG_ASSERT(new_gtid < 2 * __kmp_threads_capacity); + } + } else { +#endif + for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) { + KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity); + } +#if USE_UNSHACKLED_TASK } +#endif /* allocate space for it. */ new_thr = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t)); @@ -6677,9 +6709,16 @@ size = (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * __kmp_threads_capacity + CACHE_LINE; +#if USE_UNSHACKLED_TASK + size *= 2; +#endif __kmp_threads = (kmp_info_t **)__kmp_allocate(size); __kmp_root = (kmp_root_t **)((char *)__kmp_threads + sizeof(kmp_info_t *) * __kmp_threads_capacity); +#if USE_UNSHACKLED_TASK + __kmp_root = (kmp_root_t **)((char *)__kmp_root + + sizeof(kmp_info_t *) * __kmp_threads_capacity); +#endif /* init thread counts */ KMP_DEBUG_ASSERT(__kmp_all_nth == @@ -6951,6 +6990,10 @@ KA_TRACE(10, ("__kmp_parallel_initialize: exit\n")); __kmp_release_bootstrap_lock(&__kmp_initz_lock); + +#if USE_UNSHACKLED_TASK + __kmp_initialize_unshackled_threads(); +#endif } /* ------------------------------------------------------------------------ */ @@ -8297,7 +8340,6 @@ } } - void __kmp_omp_display_env(int verbose) { __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); if (__kmp_init_serial == 0) @@ -8305,3 +8347,48 @@ __kmp_display_env_impl(!verbose, verbose); __kmp_release_bootstrap_lock(&__kmp_initz_lock); } + +#if USE_UNSHACKLED_TASK +kmp_info_t **__kmp_unshackled_threads; +kmp_info_t *__kmp_unshackled_master_thread; +int __kmp_unshackled_threads_num; + +namespace { +void __kmp_unshackled_wrapper_fn(int *gtid, int *, ...) { + // If master thread, then wait for signal + if (__kmpc_master(nullptr, *gtid)) { + // First, unset the initial state and release the initial thread + __kmp_init_unshackled_threads = FALSE; + __kmp_unshackled_initz_release(); + __kmp_unshackled_master_thread_wait(); + } +} +} // namespace + +void __kmp_unshackled_threads_initz_routine() { + kmp_info_t *master_thread = nullptr; + + // Create a new root for unshackled team/threads + const int gtid = __kmp_register_root(TRUE); + __kmp_unshackled_master_thread = master_thread = __kmp_threads[gtid]; + __kmp_unshackled_threads = &__kmp_threads[gtid]; + + // TODO: Determine how many unshackled threads + __kmp_unshackled_threads_num = 8; + master_thread->th.th_set_nproc = __kmp_unshackled_threads_num; + + __kmpc_fork_call(nullptr, 0, __kmp_unshackled_wrapper_fn); +} + +void __kmp_initialize_unshackled_threads() { + // Set the global variable indicating that we're initializing unshackled + // team/threads + __kmp_init_unshackled_threads = TRUE; + + __kmp_do_initialize_unshackled_threads(); + + // Wait here for the finish of initialization of unshackled teams + __kmp_unshackled_threads_initz_wait(); +} + +#endif Index: openmp/runtime/src/kmp_tasking.cpp =================================================================== --- openmp/runtime/src/kmp_tasking.cpp +++ openmp/runtime/src/kmp_tasking.cpp @@ -324,6 +324,16 @@ static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) { kmp_info_t *thread = __kmp_threads[gtid]; kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); + +#if USE_UNSHACKLED_TASK + // If the task is unshackled, we always push it into the master thread of + // unshackled team, and update gtid to the gtid of the master thread + if (taskdata->td_flags.unshackled) { + thread = __kmp_unshackled_threads[KMP_GTID_TO_SHADOW_GTID(gtid)]; + gtid = thread->th.th_info.ds.ds_gtid; + } +#endif + kmp_task_team_t *task_team = thread->th.th_task_team; kmp_int32 tid = __kmp_tid_from_gtid(gtid); kmp_thread_data_t *thread_data; @@ -362,7 +372,8 @@ // Find tasking deque specific to encountering thread thread_data = &task_team->tt.tt_threads_data[tid]; - // No lock needed since only owner can allocate + // No lock needed even if the task is unshackled because we have initialized + // the dequeue for unshackled thread data if (thread_data->td.td_deque == NULL) { __kmp_alloc_task_deque(thread, thread_data); } @@ -424,6 +435,13 @@ __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock); +#if USE_UNSHACKLED_TASK + // Signal one worker thread to execute the task + if (taskdata->td_flags.unshackled) { + __kmp_unshackled_worker_thread_signal(); + } +#endif + return TASK_SUCCESSFULLY_PUSHED; } @@ -1165,6 +1183,24 @@ if (!TCR_4(__kmp_init_middle)) __kmp_middle_initialize(); +#if USE_UNSHACKLED_TASK + if (flags->unshackled) { + // Since unshackled threads are allocated via __kmpc_fork_call, we need to + // initialize parallel correspondingly + if (!TCR_4(__kmp_init_parallel)) + __kmp_parallel_initialize(); + + // For an unshackled task encountered by a regular thread, we will push the + // task to the (gtid%__kmp_unshackled_threads_num)-th unshackled thread + if (!KMP_UNSHACKLED_THREAD(gtid)) { + thread = __kmp_unshackled_threads[KMP_GTID_TO_SHADOW_GTID(gtid)]; + team = thread->th.th_team; + // We don't change the parent-child relation for unshackled task as we + // need that to do per-task-region synchronization + } + } +#endif + KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) " "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t, @@ -1175,6 +1211,13 @@ } flags->final = 1; } + +#if USE_UNSHACKLED_TASK + // Unshackled thread is never final + if (flags->unshackled) + flags->final = 0; +#endif + if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) { // Untied task encountered causes the TSC algorithm to check entire deque of // the victim thread. If no untied task encountered, then checking the head @@ -1235,6 +1278,14 @@ KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid, sizeof_shareds)); +#if USE_UNSHACKLED_TASK + { + kmp_task_team_t *task_team = thread->th.th_task_team; + kmp_thread_data_t *thread_data = + &task_team->tt.tt_threads_data[thread->th.th_info.ds.ds_tid]; + if (flags->unshackled) + __kmp_acquire_bootstrap_lock(&thread_data->td.td_thread_lock); +#endif // Avoid double allocation here by combining shareds with taskdata #if USE_FAST_MEMORY taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset + @@ -1243,6 +1294,11 @@ taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset + sizeof_shareds); #endif /* USE_FAST_MEMORY */ +#if USE_UNSHACKLED_TASK + if (flags->unshackled) + __kmp_release_bootstrap_lock(&thread_data->td.td_thread_lock); + } +#endif ANNOTATE_HAPPENS_AFTER(taskdata); task = KMP_TASKDATA_TO_TASK(taskdata); @@ -1288,6 +1344,7 @@ taskdata->td_flags.destructors_thunk = flags->destructors_thunk; taskdata->td_flags.proxy = flags->proxy; taskdata->td_flags.detachable = flags->detachable; + taskdata->td_flags.unshackled = flags->unshackled; taskdata->td_task_team = thread->th.th_task_team; taskdata->td_size_alloc = shareds_offset + sizeof_shareds; taskdata->td_flags.tasktype = TASK_EXPLICIT; @@ -1848,6 +1905,13 @@ must_wait = must_wait || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks); + +#if USE_UNSHACKLED_TASK + // If unshackled thread is enabled, we must enable wait as there might be + // task outside of any parallel region + must_wait = true; +#endif + if (must_wait) { kmp_flag_32 flag(RCAST(std::atomic *, &(taskdata->td_incomplete_child_tasks)), @@ -3486,6 +3550,23 @@ __kmp_gtid_from_thread(this_thr), team->t.t_task_team[other_team], ((team != NULL) ? team->t.t_id : -1), other_team)); +#if USE_UNSHACKLED_TASK + // For regular thread, this function should be called when the task is + // going to be pushed to a dequeue. However, for the unshackled thread, we + // need it ahead of time so that some operations can be performed without + // race condition. + kmp_task_team_t *task_team = team->t.t_task_team[other_team]; + if (this_thr == __kmp_unshackled_master_thread && + !KMP_TASKING_ENABLED(task_team)) { + __kmp_enable_tasking(task_team, this_thr); + for (int i = 0; i < task_team->tt.tt_nproc; ++i) { + kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[i]; + if (thread_data->td.td_deque == NULL) { + __kmp_alloc_task_deque(__kmp_unshackled_threads[i], thread_data); + } + } + } +#endif } else { // Leave the old task team struct in place for the upcoming region; // adjust as needed kmp_task_team_t *task_team = team->t.t_task_team[other_team]; Index: openmp/runtime/src/kmp_wait_release.h =================================================================== --- openmp/runtime/src/kmp_wait_release.h +++ openmp/runtime/src/kmp_wait_release.h @@ -337,8 +337,29 @@ flag->execute_tasks( this_thr, th_gtid, final_spin, &tasks_completed USE_ITT_BUILD_ARG(itt_sync_obj), 0); - else - this_thr->th.th_reap_state = KMP_SAFE_TO_REAP; + else { +#if USE_UNSHACKLED_TASK + // Usually this block can be reached only when there is no task + // encountered during the execution of the team. However, when + // unshackled task is enabled, we also need to check whether there + // are still unshackled tasks that have not finished. We only + // perform this check in master thread. + if (!KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid)) +#endif + this_thr->th.th_reap_state = KMP_SAFE_TO_REAP; +#if USE_UNSHACKLED_TASK + else { + kmp_team_t *team = this_thr->th.th_team; + // Spin is okay here because it only runs on master thread + for (int tid = 0; tid < team->t.t_nproc; ++tid) { + kmp_info_t *thr = team->t.t_threads[tid]; + kmp_taskdata_t *taskdata = thr->th.th_current_task; + while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks)) + ; + } + } +#endif + } } else { KMP_DEBUG_ASSERT(!KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid)); #if OMPT_SUPPORT @@ -381,6 +402,23 @@ break; } +#if USE_UNSHACKLED_TASK + // For unshackled thread, if task_team is nullptr, it means the master + // thread has not released the barrier. We cannot wait here because once the + // master thread releases all children barriers, all unshackled threads are + // still sleeping. This leads to a problem that following configuration, + // such as task team sync, will not be performed such that this thread does + // not have task team. Usually it is not bad. However, a corner case is, + // when the first task encountered is an untied task, the check in + // __kmp_task_alloc will crash because it uses the task team pointer without + // checking whether it is nullptr. It is probably under some kind of + // assumption. + if (task_team && KMP_UNSHACKLED_THREAD(th_gtid)) { + __kmp_unshackled_worker_thread_wait(); + continue; + } +#endif + // Don't suspend if KMP_BLOCKTIME is set to "infinite" if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && __kmp_pause_status != kmp_soft_paused) Index: openmp/runtime/src/z_Linux_util.cpp =================================================================== --- openmp/runtime/src/z_Linux_util.cpp +++ openmp/runtime/src/z_Linux_util.cpp @@ -2439,7 +2439,7 @@ , void **exit_frame_ptr #endif - ) { +) { #if OMPT_SUPPORT *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0); #endif @@ -2518,4 +2518,92 @@ #endif +#if USE_UNSHACKLED_TASK + +namespace { +pthread_t __kmp_unshackled_master_thread_handle; + +pthread_cond_t __kmp_unshackled_worker_thread_cond_var; +pthread_mutex_t __kmp_unshackled_threads_wait_lock; + +// Condition variable for initializing unshackled team +pthread_cond_t __kmp_unshackled_threads_initz_cond_var; +pthread_mutex_t __kmp_unshackled_threads_initz_lock; + +// Condition variable for the wrapper function of master thread +pthread_cond_t __kmp_unshackled_master_thread_cond_var; +pthread_mutex_t _kmp_unshackled_master_thread_lock; + +} // namespace + +void __kmp_unshackled_worker_thread_wait() { + if (pthread_mutex_lock(&__kmp_unshackled_threads_wait_lock)) + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + if (pthread_cond_wait(&__kmp_unshackled_worker_thread_cond_var, + &__kmp_unshackled_threads_wait_lock)) + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + if (pthread_mutex_unlock(&__kmp_unshackled_threads_wait_lock)) + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); +} + +void __kmp_do_initialize_unshackled_threads() { + // Initialize condition variable + if (pthread_cond_init(&__kmp_unshackled_threads_initz_cond_var, nullptr)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } + if (pthread_cond_init(&__kmp_unshackled_worker_thread_cond_var, nullptr)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } + if (pthread_cond_init(&__kmp_unshackled_master_thread_cond_var, nullptr)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } + + // Create a new thread to finish initialization + if (pthread_create( + &__kmp_unshackled_master_thread_handle, nullptr, + [](void *) -> void * { + __kmp_unshackled_threads_initz_routine(); + return nullptr; + }, + nullptr)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } +} + +void __kmp_unshackled_threads_initz_wait() { + // Initial thread waits here for the completion of the initialization. The + // condition variable will be notified by master thread of unshackled teams + if (pthread_cond_wait(&__kmp_unshackled_threads_initz_cond_var, + &__kmp_unshackled_threads_initz_lock)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } +} + +void __kmp_unshackled_initz_release() { + // After all initialization, reset __kmp_init_unshackled_threads to false + __kmp_init_unshackled_threads = FALSE; + + // Notify the initial thread + if (pthread_cond_signal(&__kmp_unshackled_threads_initz_cond_var)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } +} + +void __kmp_unshackled_master_thread_wait() { + // The master thread of unshackled team will be blocked here. The + // condition variable can only be signal in the destructor of RTL + if (pthread_cond_wait(&__kmp_unshackled_master_thread_cond_var, + &_kmp_unshackled_master_thread_lock)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } +} + +void __kmp_unshackled_worker_thread_signal() { + if (pthread_cond_signal(&__kmp_unshackled_worker_thread_cond_var)) { + __kmp_fatal(KMP_MSG(CantRegisterNewThread)); + } +} + +#endif + // end of file //