Index: openmp/trunk/runtime/src/kmp_alloc.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_alloc.cpp +++ openmp/trunk/runtime/src/kmp_alloc.cpp @@ -794,7 +794,7 @@ the length of this buffer to the previous free buffer. Note that we subtract the size in the buffer being released, since it's negative to indicate that the buffer is allocated. */ - register bufsize size = b->bh.bb.bsize; + bufsize size = b->bh.bb.bsize; /* Make the previous buffer the one we're working on. */ KMP_DEBUG_ASSERT(BH((char *)b - b->bh.bb.prevfree)->bb.bsize == Index: openmp/trunk/runtime/src/kmp_barrier.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_barrier.cpp +++ openmp/trunk/runtime/src/kmp_barrier.cpp @@ -49,9 +49,9 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_linear_gather); - register kmp_team_t *team = this_thr->th.th_team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_info_t **other_threads = team->t.t_threads; + kmp_team_t *team = this_thr->th.th_team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_info_t **other_threads = team->t.t_threads; KA_TRACE( 20, @@ -83,11 +83,11 @@ kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]); flag.release(); } else { - register kmp_balign_team_t *team_bar = &team->t.t_bar[bt]; - register int nproc = this_thr->th.th_team_nproc; - register int i; + kmp_balign_team_t *team_bar = &team->t.t_bar[bt]; + int nproc = this_thr->th.th_team_nproc; + int i; // Don't have to worry about sleep bit here or atomic since team setting - register kmp_uint64 new_state = + kmp_uint64 new_state = team_bar->b_arrived + KMP_BARRIER_STATE_BUMP; // Collect all the worker team member threads. @@ -145,13 +145,13 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_linear_release); - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_team_t *team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_team_t *team; if (KMP_MASTER_TID(tid)) { - register unsigned int i; - register kmp_uint32 nproc = this_thr->th.th_team_nproc; - register kmp_info_t **other_threads; + unsigned int i; + kmp_uint32 nproc = this_thr->th.th_team_nproc; + kmp_info_t **other_threads; team = __kmp_threads[gtid]->th.th_team; KMP_DEBUG_ASSERT(team != NULL); @@ -249,15 +249,15 @@ int tid, void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_tree_gather); - register kmp_team_t *team = this_thr->th.th_team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_info_t **other_threads = team->t.t_threads; - register kmp_uint32 nproc = this_thr->th.th_team_nproc; - register kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; - register kmp_uint32 branch_factor = 1 << branch_bits; - register kmp_uint32 child; - register kmp_uint32 child_tid; - register kmp_uint64 new_state; + kmp_team_t *team = this_thr->th.th_team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_info_t **other_threads = team->t.t_threads; + kmp_uint32 nproc = this_thr->th.th_team_nproc; + kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; + kmp_uint32 branch_factor = 1 << branch_bits; + kmp_uint32 child; + kmp_uint32 child_tid; + kmp_uint64 new_state; KA_TRACE( 20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n", @@ -279,8 +279,8 @@ new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; child = 1; do { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; #if KMP_CACHE_MANAGE // Prefetch next thread's arrived count if (child + 1 <= branch_factor && child_tid + 1 < nproc) @@ -321,7 +321,7 @@ } if (!KMP_MASTER_TID(tid)) { // Worker threads - register kmp_int32 parent_tid = (tid - 1) >> branch_bits; + kmp_int32 parent_tid = (tid - 1) >> branch_bits; KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) " @@ -357,13 +357,13 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_tree_release); - register kmp_team_t *team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_uint32 nproc; - register kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; - register kmp_uint32 branch_factor = 1 << branch_bits; - register kmp_uint32 child; - register kmp_uint32 child_tid; + kmp_team_t *team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_uint32 nproc; + kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; + kmp_uint32 branch_factor = 1 << branch_bits; + kmp_uint32 child; + kmp_uint32 child_tid; // Perform a tree release for all of the threads that have been gathered if (!KMP_MASTER_TID( @@ -416,12 +416,12 @@ child_tid = (tid << branch_bits) + 1; if (child_tid < nproc) { - register kmp_info_t **other_threads = team->t.t_threads; + kmp_info_t **other_threads = team->t.t_threads; child = 1; // Parent threads release all their children do { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; #if KMP_CACHE_MANAGE // Prefetch next thread's go count if (child + 1 <= branch_factor && child_tid + 1 < nproc) @@ -466,15 +466,15 @@ int tid, void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_hyper_gather); - register kmp_team_t *team = this_thr->th.th_team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_info_t **other_threads = team->t.t_threads; - register kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE; - register kmp_uint32 num_threads = this_thr->th.th_team_nproc; - register kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; - register kmp_uint32 branch_factor = 1 << branch_bits; - register kmp_uint32 offset; - register kmp_uint32 level; + kmp_team_t *team = this_thr->th.th_team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_info_t **other_threads = team->t.t_threads; + kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE; + kmp_uint32 num_threads = this_thr->th.th_team_nproc; + kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt]; + kmp_uint32 branch_factor = 1 << branch_bits; + kmp_uint32 offset; + kmp_uint32 level; KA_TRACE( 20, @@ -494,11 +494,11 @@ kmp_flag_64 p_flag(&thr_bar->b_arrived); for (level = 0, offset = 1; offset < num_threads; level += branch_bits, offset <<= branch_bits) { - register kmp_uint32 child; - register kmp_uint32 child_tid; + kmp_uint32 child; + kmp_uint32 child_tid; if (((tid >> level) & (branch_factor - 1)) != 0) { - register kmp_int32 parent_tid = tid & ~((1 << (level + branch_bits)) - 1); + kmp_int32 parent_tid = tid & ~((1 << (level + branch_bits)) - 1); KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) " @@ -523,10 +523,10 @@ for (child = 1, child_tid = tid + (1 << level); child < branch_factor && child_tid < num_threads; child++, child_tid += (1 << level)) { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; #if KMP_CACHE_MANAGE - register kmp_uint32 next_child_tid = child_tid + (1 << level); + kmp_uint32 next_child_tid = child_tid + (1 << level); // Prefetch next thread's arrived count if (child + 1 < branch_factor && next_child_tid < num_threads) KMP_CACHE_PREFETCH( @@ -585,16 +585,16 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_hyper_release); - register kmp_team_t *team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_info_t **other_threads; - register kmp_uint32 num_threads; - register kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; - register kmp_uint32 branch_factor = 1 << branch_bits; - register kmp_uint32 child; - register kmp_uint32 child_tid; - register kmp_uint32 offset; - register kmp_uint32 level; + kmp_team_t *team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_info_t **other_threads; + kmp_uint32 num_threads; + kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt]; + kmp_uint32 branch_factor = 1 << branch_bits; + kmp_uint32 child; + kmp_uint32 child_tid; + kmp_uint32 offset; + kmp_uint32 level; /* Perform a hypercube-embedded tree release for all of the threads that have been gathered. If KMP_REVERSE_HYPER_BAR is defined (default) the threads @@ -690,10 +690,10 @@ if (child_tid >= num_threads) continue; // Child doesn't exist so keep going else { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; #if KMP_CACHE_MANAGE - register kmp_uint32 next_child_tid = child_tid - (1 << level); + kmp_uint32 next_child_tid = child_tid - (1 << level); // Prefetch next thread's go count #ifdef KMP_REVERSE_HYPER_BAR if (child - 1 >= 1 && next_child_tid < num_threads) @@ -817,11 +817,11 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, void (*reduce)(void *, void *) USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_hier_gather); - register kmp_team_t *team = this_thr->th.th_team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_uint32 nproc = this_thr->th.th_team_nproc; - register kmp_info_t **other_threads = team->t.t_threads; - register kmp_uint64 new_state; + kmp_team_t *team = this_thr->th.th_team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_uint32 nproc = this_thr->th.th_team_nproc; + kmp_info_t **other_threads = team->t.t_threads; + kmp_uint64 new_state; int level = team->t.t_level; #if OMP_40_ENABLED @@ -851,7 +851,7 @@ team); if (thr_bar->my_level) { // not a leaf (my_level==0 means leaf) - register kmp_int32 child_tid; + kmp_int32 child_tid; new_state = (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && @@ -895,8 +895,8 @@ if (last > nproc) last = nproc; for (child_tid = tid + skip; child_tid < (int)last; child_tid += skip) { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait " "T#%d(%d:%d) " "arrived(%p) == %llu\n", @@ -928,8 +928,8 @@ if (last > nproc) last = nproc; for (child_tid = tid + skip; child_tid < (int)last; child_tid += skip) { - register kmp_info_t *child_thr = other_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = other_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait " "T#%d(%d:%d) " "arrived(%p) == %llu\n", @@ -999,9 +999,9 @@ enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid, int propagate_icvs USE_ITT_BUILD_ARG(void *itt_sync_obj)) { KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_hier_release); - register kmp_team_t *team; - register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; - register kmp_uint32 nproc; + kmp_team_t *team; + kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb; + kmp_uint32 nproc; bool team_change = false; // indicates on-core barrier shouldn't be used if (KMP_MASTER_TID(tid)) { @@ -1111,7 +1111,7 @@ // Now, release my children if (thr_bar->my_level) { // not a leaf - register kmp_int32 child_tid; + kmp_int32 child_tid; kmp_uint32 last; if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) { @@ -1126,7 +1126,7 @@ // hierarchy for (child_tid = thr_bar->skip_per_level[1]; child_tid < (int)nproc; child_tid += thr_bar->skip_per_level[1]) { - register kmp_bstate_t *child_bar = + kmp_bstate_t *child_bar = &team->t.t_threads[child_tid]->th.th_bar[bt].bb; KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) " "releasing T#%d(%d:%d)" @@ -1157,8 +1157,8 @@ last = nproc; for (child_tid = tid + 1 + old_leaf_kids; child_tid < (int)last; ++child_tid) { // skip_per_level[0]=1 - register kmp_info_t *child_thr = team->t.t_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = team->t.t_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; KA_TRACE( 20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing" @@ -1184,8 +1184,8 @@ if (last > nproc) last = nproc; for (child_tid = tid + skip; child_tid < (int)last; child_tid += skip) { - register kmp_info_t *child_thr = team->t.t_threads[child_tid]; - register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; + kmp_info_t *child_thr = team->t.t_threads[child_tid]; + kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb; KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) " "releasing T#%d(%d:%d) go(%p): %u => %u\n", gtid, team->t.t_id, tid, @@ -1223,10 +1223,10 @@ void (*reduce)(void *, void *)) { KMP_TIME_PARTITIONED_BLOCK(OMP_plain_barrier); KMP_SET_THREAD_STATE_BLOCK(PLAIN_BARRIER); - register int tid = __kmp_tid_from_gtid(gtid); - register kmp_info_t *this_thr = __kmp_threads[gtid]; - register kmp_team_t *team = this_thr->th.th_team; - register int status = 0; + int tid = __kmp_tid_from_gtid(gtid); + kmp_info_t *this_thr = __kmp_threads[gtid]; + kmp_team_t *team = this_thr->th.th_team; + int status = 0; ident_t *loc = __kmp_threads[gtid]->th.th_ident; #if OMPT_SUPPORT ompt_task_id_t my_task_id; @@ -1553,9 +1553,9 @@ void __kmp_join_barrier(int gtid) { KMP_TIME_PARTITIONED_BLOCK(OMP_join_barrier); KMP_SET_THREAD_STATE_BLOCK(FORK_JOIN_BARRIER); - register kmp_info_t *this_thr = __kmp_threads[gtid]; - register kmp_team_t *team; - register kmp_uint nproc; + kmp_info_t *this_thr = __kmp_threads[gtid]; + kmp_team_t *team; + kmp_uint nproc; kmp_info_t *master_thread; int tid; #ifdef KMP_DEBUG @@ -1807,8 +1807,8 @@ #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */ #ifdef KMP_DEBUG - register kmp_info_t **other_threads = team->t.t_threads; - register int i; + kmp_info_t **other_threads = team->t.t_threads; + int i; // Verify state KMP_MB(); Index: openmp/trunk/runtime/src/kmp_dispatch.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_dispatch.cpp +++ openmp/trunk/runtime/src/kmp_dispatch.cpp @@ -256,11 +256,11 @@ void *obj) // Higher-level synchronization object, or NULL. ) { // note: we may not belong to a team at this point - register volatile UT *spin = spinner; - register UT check = checker; - register kmp_uint32 spins; - register kmp_uint32 (*f)(UT, UT) = pred; - register UT r; + volatile UT *spin = spinner; + UT check = checker; + kmp_uint32 spins; + kmp_uint32 (*f)(UT, UT) = pred; + UT r; KMP_FSYNC_SPIN_INIT(obj, CCAST(UT *, spin)); KMP_INIT_YIELD(spins); @@ -2395,10 +2395,10 @@ typename traits_t::signed_t incr) { typedef typename traits_t::unsigned_t UT; typedef typename traits_t::signed_t ST; - register kmp_uint32 team_id; - register kmp_uint32 nteams; - register UT trip_count; - register kmp_team_t *team; + kmp_uint32 team_id; + kmp_uint32 nteams; + UT trip_count; + kmp_team_t *team; kmp_info_t *th; KMP_DEBUG_ASSERT(plastiter && plower && pupper); @@ -2470,17 +2470,17 @@ *plastiter = (team_id == trip_count - 1); } else { if (__kmp_static == kmp_sch_static_balanced) { - register UT chunk = trip_count / nteams; - register UT extras = trip_count % nteams; + UT chunk = trip_count / nteams; + UT extras = trip_count % nteams; *plower += incr * (team_id * chunk + (team_id < extras ? team_id : extras)); *pupper = *plower + chunk * incr - (team_id < extras ? 0 : incr); if (plastiter != NULL) *plastiter = (team_id == nteams - 1); } else { - register T chunk_inc_count = + T chunk_inc_count = (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr; - register T upper = *pupper; + T upper = *pupper; KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy); // Unknown static scheduling type. *plower += team_id * chunk_inc_count; @@ -2714,11 +2714,11 @@ void *obj // Higher-level synchronization object, or NULL. ) { // note: we may not belong to a team at this point - register volatile kmp_uint32 *spin = spinner; - register kmp_uint32 check = checker; - register kmp_uint32 spins; - register kmp_uint32 (*f)(kmp_uint32, kmp_uint32) = pred; - register kmp_uint32 r; + volatile kmp_uint32 *spin = spinner; + kmp_uint32 check = checker; + kmp_uint32 spins; + kmp_uint32 (*f)(kmp_uint32, kmp_uint32) = pred; + kmp_uint32 r; KMP_FSYNC_SPIN_INIT(obj, CCAST(kmp_uint32 *, spin)); KMP_INIT_YIELD(spins); @@ -2744,10 +2744,10 @@ void *obj // Higher-level synchronization object, or NULL. ) { // note: we may not belong to a team at this point - register void *spin = spinner; - register kmp_uint32 check = checker; - register kmp_uint32 spins; - register kmp_uint32 (*f)(void *, kmp_uint32) = pred; + void *spin = spinner; + kmp_uint32 check = checker; + kmp_uint32 spins; + kmp_uint32 (*f)(void *, kmp_uint32) = pred; KMP_FSYNC_SPIN_INIT(obj, spin); KMP_INIT_YIELD(spins); Index: openmp/trunk/runtime/src/kmp_lock.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_lock.cpp +++ openmp/trunk/runtime/src/kmp_lock.cpp @@ -1130,7 +1130,7 @@ __forceinline static int __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck, kmp_int32 gtid) { - register kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid); + kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid); volatile kmp_int32 *head_id_p = &lck->lk.head_id; volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; volatile kmp_uint32 *spin_here_p; @@ -1401,7 +1401,7 @@ } int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { - register kmp_info_t *this_thr; + kmp_info_t *this_thr; volatile kmp_int32 *head_id_p = &lck->lk.head_id; volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; Index: openmp/trunk/runtime/src/kmp_sched.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_sched.cpp +++ openmp/trunk/runtime/src/kmp_sched.cpp @@ -53,12 +53,12 @@ typedef typename traits_t::unsigned_t UT; typedef typename traits_t::signed_t ST; /* this all has to be changed back to TID and such.. */ - register kmp_int32 gtid = global_tid; - register kmp_uint32 tid; - register kmp_uint32 nth; - register UT trip_count; - register kmp_team_t *team; - register kmp_info_t *th = __kmp_threads[gtid]; + kmp_int32 gtid = global_tid; + kmp_uint32 tid; + kmp_uint32 nth; + UT trip_count; + kmp_team_t *team; + kmp_info_t *th = __kmp_threads[gtid]; #if OMPT_SUPPORT && OMPT_TRACE ompt_team_info_t *team_info = NULL; @@ -247,16 +247,16 @@ *plastiter = (tid == trip_count - 1); } else { if (__kmp_static == kmp_sch_static_balanced) { - register UT small_chunk = trip_count / nth; - register UT extras = trip_count % nth; + UT small_chunk = trip_count / nth; + UT extras = trip_count % nth; *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras)); *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr); if (plastiter != NULL) *plastiter = (tid == nth - 1); } else { - register T big_chunk_inc_count = + T big_chunk_inc_count = (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr; - register T old_upper = *pupper; + T old_upper = *pupper; KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy); // Unknown static scheduling type. @@ -284,7 +284,7 @@ break; } case kmp_sch_static_chunked: { - register ST span; + ST span; if (chunk < 1) { chunk = 1; } @@ -298,9 +298,9 @@ } #if OMP_45_ENABLED case kmp_sch_static_balanced_chunked: { - register T old_upper = *pupper; + T old_upper = *pupper; // round up to make sure the chunk is enough to cover all iterations - register UT span = (trip_count + nth - 1) / nth; + UT span = (trip_count + nth - 1) / nth; // perform chunk adjustment chunk = (span + chunk - 1) & ~(chunk - 1); @@ -376,12 +376,12 @@ KMP_COUNT_BLOCK(OMP_DISTRIBUTE); typedef typename traits_t::unsigned_t UT; typedef typename traits_t::signed_t ST; - register kmp_uint32 tid; - register kmp_uint32 nth; - register kmp_uint32 team_id; - register kmp_uint32 nteams; - register UT trip_count; - register kmp_team_t *team; + kmp_uint32 tid; + kmp_uint32 nth; + kmp_uint32 team_id; + kmp_uint32 nteams; + UT trip_count; + kmp_team_t *team; kmp_info_t *th; KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride); @@ -462,17 +462,17 @@ } else { // Get the team's chunk first (each team gets at most one chunk) if (__kmp_static == kmp_sch_static_balanced) { - register UT chunkD = trip_count / nteams; - register UT extras = trip_count % nteams; + UT chunkD = trip_count / nteams; + UT extras = trip_count % nteams; *plower += incr * (team_id * chunkD + (team_id < extras ? team_id : extras)); *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr); if (plastiter != NULL) *plastiter = (team_id == nteams - 1); } else { - register T chunk_inc_count = + T chunk_inc_count = (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr; - register T upper = *pupper; + T upper = *pupper; KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy); // Unknown static scheduling type. *plower += team_id * chunk_inc_count; @@ -531,17 +531,17 @@ *plastiter = 0; } else { if (__kmp_static == kmp_sch_static_balanced) { - register UT chunkL = trip_count / nth; - register UT extras = trip_count % nth; + UT chunkL = trip_count / nth; + UT extras = trip_count % nth; *plower += incr * (tid * chunkL + (tid < extras ? tid : extras)); *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr); if (plastiter != NULL) if (*plastiter != 0 && !(tid == nth - 1)) *plastiter = 0; } else { - register T chunk_inc_count = + T chunk_inc_count = (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr; - register T upper = *pupperDist; + T upper = *pupperDist; KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy); // Unknown static scheduling type. *plower += tid * chunk_inc_count; @@ -570,7 +570,7 @@ break; } case kmp_sch_static_chunked: { - register ST span; + ST span; if (chunk < 1) chunk = 1; span = chunk * incr; Index: openmp/trunk/runtime/src/thirdparty/ittnotify/ittnotify_static.c =================================================================== --- openmp/trunk/runtime/src/thirdparty/ittnotify/ittnotify_static.c +++ openmp/trunk/runtime/src/thirdparty/ittnotify/ittnotify_static.c @@ -810,7 +810,7 @@ static __itt_group_id __itt_get_groups(void) { - register int i; + int i; __itt_group_id res = __itt_group_none; const char* var_name = "INTEL_ITTNOTIFY_GROUPS"; const char* group_str = __itt_get_env_var(var_name); @@ -868,7 +868,7 @@ /* It's not used right now! Comment it out to avoid warnings. static void __itt_reinit_all_pointers(void) { - register int i; + int i; // Fill all pointers with initial stubs for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++) *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].init_func; @@ -877,7 +877,7 @@ static void __itt_nullify_all_pointers(void) { - register int i; + int i; /* Nulify all pointers except domain_create and string_handle_create */ for (i = 0; _N_(_ittapi_global).api_list_ptr[i].name != NULL; i++) *_N_(_ittapi_global).api_list_ptr[i].func_ptr = _N_(_ittapi_global).api_list_ptr[i].null_func; @@ -923,7 +923,7 @@ ITT_EXTERN_C int _N_(init_ittlib)(const char* lib_name, __itt_group_id init_groups) { - register int i; + int i; __itt_group_id groups; #ifdef ITT_COMPLETE_GROUP __itt_group_id zero_group = __itt_group_none; Index: openmp/trunk/runtime/src/z_Windows_NT_util.cpp =================================================================== --- openmp/trunk/runtime/src/z_Windows_NT_util.cpp +++ openmp/trunk/runtime/src/z_Windows_NT_util.cpp @@ -1232,7 +1232,7 @@ // TODO: This code is very similar to KMP_WAIT_YIELD. Need to generalize // KMP_WAIT_YIELD to cover this usage also. void *obj = NULL; - register kmp_uint32 spins; + kmp_uint32 spins; #if USE_ITT_BUILD KMP_FSYNC_SPIN_INIT(obj, (void *)&th->th.th_info.ds.ds_alive); #endif /* USE_ITT_BUILD */