Index: runtime/src/kmp.h =================================================================== --- runtime/src/kmp.h +++ runtime/src/kmp.h @@ -2181,10 +2181,9 @@ typedef struct kmp_dephash { kmp_dephash_entry_t **buckets; size_t size; -#ifdef KMP_DEBUG + size_t generation; kmp_uint32 nelements; kmp_uint32 nconflicts; -#endif } kmp_dephash_t; typedef struct kmp_task_affinity_info { Index: runtime/src/kmp_taskdeps.cpp =================================================================== --- runtime/src/kmp_taskdeps.cpp +++ runtime/src/kmp_taskdeps.cpp @@ -54,12 +54,64 @@ enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 }; +size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 }; +const size_t MAX_GEN = 8; + static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) { // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % // m_num_sets ); return ((addr >> 6) ^ (addr >> 2)) % hsize; } +static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread, + kmp_dephash_t *current_dephash) { + kmp_dephash_t *h; + + size_t gen = current_dephash->generation + 1; + if (gen >= MAX_GEN) + return current_dephash; + size_t new_size = sizes[gen]; + + kmp_int32 size_to_allocate = + new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); + +#if USE_FAST_MEMORY + h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate); +#else + h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate); +#endif + + h->size = new_size; + h->nelements = current_dephash->nelements; + h->buckets = (kmp_dephash_entry **)(h + 1); + h->generation = gen; + + // insert existing elements in the new table + for (size_t i = 0; i < current_dephash->size; i++) { + kmp_dephash_entry_t *next; + for (kmp_dephash_entry_t *entry = current_dephash->buckets[i]; entry; entry = next) { + next = entry->next_in_bucket; + // Compute the new hash using the new size, and insert the entry in + // the new bucket. + kmp_int32 new_bucket = __kmp_dephash_hash(entry->addr, h->size); + if (entry->next_in_bucket) { + h->nconflicts++; + } + entry->next_in_bucket = h->buckets[new_bucket]; + h->buckets[new_bucket] = entry; + } + } + + // Free old hash table +#if USE_FAST_MEMORY + __kmp_fast_free(thread, current_dephash); +#else + __kmp_thread_free(thread, current_dephash); +#endif + + return h; +} + static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread, kmp_taskdata_t *current_task) { kmp_dephash_t *h; @@ -81,10 +133,9 @@ #endif h->size = h_size; -#ifdef KMP_DEBUG + h->generation = 0; h->nelements = 0; h->nconflicts = 0; -#endif h->buckets = (kmp_dephash_entry **)(h + 1); for (size_t i = 0; i < h_size; i++) @@ -97,7 +148,13 @@ #define ENTRY_LAST_MTXS 1 static kmp_dephash_entry * -__kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) { +__kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) { + kmp_dephash_t *h = *hash; + if (h->nelements != 0 + && h->nconflicts/h->size >= 1) { + *hash = __kmp_dephash_extend(thread, h); + h = *hash; + } kmp_int32 bucket = __kmp_dephash_hash(addr, h->size); kmp_dephash_entry_t *entry; @@ -122,11 +179,9 @@ entry->mtx_lock = NULL; entry->next_in_bucket = h->buckets[bucket]; h->buckets[bucket] = entry; -#ifdef KMP_DEBUG h->nelements++; if (entry->next_in_bucket) h->nconflicts++; -#endif } return entry; } @@ -232,7 +287,7 @@ template static inline kmp_int32 -__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash, +__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash, bool dep_barrier, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_task_t *task) { KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : " @@ -352,7 +407,7 @@ // returns true if the task has any outstanding dependence static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node, - kmp_task_t *task, kmp_dephash_t *hash, + kmp_task_t *task, kmp_dephash_t **hash, bool dep_barrier, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, @@ -552,7 +607,7 @@ __kmp_init_node(node); new_taskdata->td_depnode = node; - if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash, + if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash, NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias, noalias_dep_list)) { KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking " @@ -633,7 +688,7 @@ kmp_depnode_t node = {0}; __kmp_init_node(&node); - if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash, + if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash, DEP_BARRIER, ndeps, dep_list, ndeps_noalias, noalias_dep_list)) { KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking " Index: runtime/test/tasking/omp_task_depend_resize_hashmap.c =================================================================== --- /dev/null +++ runtime/test/tasking/omp_task_depend_resize_hashmap.c @@ -0,0 +1,38 @@ +// RUN: %libomp-compile && env KMP_ENABLE_TASK_THROTTLING=0 %libomp-run + +#include +#include +#include + +// The first hashtable static size is 997 +#define NUM_DEPS 4000 + + +int main() +{ + int *deps = calloc(NUM_DEPS, sizeof(int)); + int i; + int failed = 0; + + #pragma omp parallel + #pragma omp master + { + for (i = 0; i < NUM_DEPS; i++) { + #pragma omp task firstprivate(i) depend(inout: deps[i]) + { + deps[i] = 1; + } + #pragma omp task firstprivate(i) depend(inout: deps[i]) + { + deps[i] = 2; + } + } + } + + for (i = 0; i < NUM_DEPS; i++) { + if (deps[i] != 2) + failed++; + } + + return failed; +}