Index: runtime/cmake/LibompUtils.cmake =================================================================== --- runtime/cmake/LibompUtils.cmake +++ runtime/cmake/LibompUtils.cmake @@ -133,12 +133,12 @@ endfunction() # void libomp_get_build_number(string src_dir, string* return_build_number); -# - grab the eight digit build number (or 00000000) from kmp_version.c +# - grab the eight digit build number (or 00000000) from kmp_version.cpp function(libomp_get_build_number src_dir return_build_number) - # sets file_lines_list to a list of all lines in kmp_version.c - file(STRINGS "${src_dir}/src/kmp_version.c" file_lines_list) + # sets file_lines_list to a list of all lines in kmp_version.cpp + file(STRINGS "${src_dir}/src/kmp_version.cpp" file_lines_list) - # runs through each line in kmp_version.c + # runs through each line in kmp_version.cpp foreach(line IN LISTS file_lines_list) # if the line begins with "#define KMP_VERSION_BUILD" then we take not of the build number string(REGEX MATCH "^[ \t]*#define[ \t]+KMP_VERSION_BUILD" valid "${line}") Index: runtime/src/CMakeLists.txt =================================================================== --- runtime/src/CMakeLists.txt +++ runtime/src/CMakeLists.txt @@ -56,43 +56,27 @@ set(LIBOMP_CXXFILES) set(LIBOMP_ASMFILES) if(${STUBS_LIBRARY}) - set(LIBOMP_CFILES kmp_stub.c) + set(LIBOMP_CFILES kmp_stub.cpp) else() - # Get C files - set(LIBOMP_CFILES - kmp_alloc.c - kmp_atomic.c - kmp_csupport.c - kmp_debug.c - kmp_itt.c - kmp_environment.c - kmp_error.c - kmp_global.c - kmp_i18n.c - kmp_io.c - kmp_runtime.c - kmp_settings.c - kmp_str.c - kmp_tasking.c - kmp_taskq.c - kmp_threadprivate.c - kmp_utility.c - ) - if(WIN32) - # Windows specific files - libomp_append(LIBOMP_CFILES z_Windows_NT_util.c) - libomp_append(LIBOMP_CFILES z_Windows_NT-586_util.c) - libomp_append(LIBOMP_ASMFILES z_Windows_NT-586_asm.asm) # Windows assembly file - else() - # Unix specific files - libomp_append(LIBOMP_CFILES z_Linux_util.c) - libomp_append(LIBOMP_CFILES kmp_gsupport.c) - libomp_append(LIBOMP_ASMFILES z_Linux_asm.s) # Unix assembly file - endif() - libomp_append(LIBOMP_CFILES thirdparty/ittnotify/ittnotify_static.c LIBOMP_USE_ITT_NOTIFY) - libomp_append(LIBOMP_CFILES kmp_debugger.c LIBOMP_USE_DEBUGGER) # Get C++ files set(LIBOMP_CXXFILES + kmp_alloc.cpp + kmp_atomic.cpp + kmp_csupport.cpp + kmp_debug.cpp + kmp_itt.cpp + kmp_environment.cpp + kmp_error.cpp + kmp_global.cpp + kmp_i18n.cpp + kmp_io.cpp + kmp_runtime.cpp + kmp_settings.cpp + kmp_str.cpp + kmp_tasking.cpp + kmp_taskq.cpp + kmp_threadprivate.cpp + kmp_utility.cpp kmp_barrier.cpp kmp_wait_release.cpp kmp_affinity.cpp @@ -100,6 +84,19 @@ kmp_lock.cpp kmp_sched.cpp ) + if(WIN32) + # Windows specific files + libomp_append(LIBOMP_CXXFILES z_Windows_NT_util.cpp) + libomp_append(LIBOMP_CXXFILES z_Windows_NT-586_util.cpp) + libomp_append(LIBOMP_ASMFILES z_Windows_NT-586_asm.asm) # Windows assembly file + else() + # Unix specific files + libomp_append(LIBOMP_CXXFILES z_Linux_util.cpp) + libomp_append(LIBOMP_CXXFILES kmp_gsupport.cpp) + libomp_append(LIBOMP_ASMFILES z_Linux_asm.s) # Unix assembly file + endif() + libomp_append(LIBOMP_CFILES thirdparty/ittnotify/ittnotify_static.c LIBOMP_USE_ITT_NOTIFY) + libomp_append(LIBOMP_CXXFILES kmp_debugger.cpp LIBOMP_USE_DEBUGGER) libomp_append(LIBOMP_CXXFILES kmp_stats.cpp LIBOMP_STATS) libomp_append(LIBOMP_CXXFILES kmp_stats_timing.cpp LIBOMP_STATS) if(${LIBOMP_OMP_VERSION} GREATER 40 OR ${LIBOMP_OMP_VERSION} EQUAL 40) @@ -108,12 +105,11 @@ endif() endif() # Files common to stubs and normal library -libomp_append(LIBOMP_CFILES kmp_ftn_cdecl.c) -libomp_append(LIBOMP_CFILES kmp_ftn_extra.c) -libomp_append(LIBOMP_CFILES kmp_version.c) -libomp_append(LIBOMP_CFILES ompt-general.c IF_TRUE LIBOMP_OMPT_SUPPORT) - -libomp_append(LIBOMP_CFILES tsan_annotations.c IF_TRUE LIBOMP_TSAN_SUPPORT) +libomp_append(LIBOMP_CXXFILES kmp_ftn_cdecl.cpp) +libomp_append(LIBOMP_CXXFILES kmp_ftn_extra.cpp) +libomp_append(LIBOMP_CXXFILES kmp_version.cpp) +libomp_append(LIBOMP_CXXFILES ompt-general.cpp IF_TRUE LIBOMP_OMPT_SUPPORT) +libomp_append(LIBOMP_CXXFILES tsan_annotations.cpp IF_TRUE LIBOMP_TSAN_SUPPORT) set(LIBOMP_SOURCE_FILES ${LIBOMP_CFILES} ${LIBOMP_CXXFILES} ${LIBOMP_ASMFILES}) # For Windows, there is a resource file (.rc -> .res) that is also compiled @@ -202,7 +198,7 @@ set_source_files_properties(thirdparty/ittnotify/ittnotify_static.c PROPERTIES COMPILE_DEFINITIONS "UNICODE") # Create Windows import library - # the import library is "re-linked" to include kmp_import.c which prevents + # the import library is "re-linked" to include kmp_import.cpp which prevents # linking of both Visual Studio OpenMP and newly built OpenMP set_source_files_properties(kmp_import.c PROPERTIES COMPILE_FLAGS "${LIBOMP_CONFIGURED_CFLAGS}") set(LIBOMP_IMP_LIB_FILE ${LIBOMP_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}) @@ -220,8 +216,8 @@ set(LIBOMP_GENERATED_IMP_LIB ${CMAKE_CURRENT_BINARY_DIR}/${LIBOMP_GENERATED_IMP_LIB_FILENAME}) endif() set_source_files_properties(${LIBOMP_GENERATED_IMP_LIB} PROPERTIES GENERATED TRUE EXTERNAL_OBJECT TRUE) - # Create new import library that is just the previously created one + kmp_import.c - add_library(ompimp STATIC ${LIBOMP_GENERATED_IMP_LIB} kmp_import.c) + # Create new import library that is just the previously created one + kmp_import.cpp + add_library(ompimp STATIC ${LIBOMP_GENERATED_IMP_LIB} kmp_import.cpp) set_target_properties(ompimp PROPERTIES PREFIX "" SUFFIX "" OUTPUT_NAME "${LIBOMP_IMP_LIB_FILE}" LINKER_LANGUAGE C Index: runtime/src/kmp.h =================================================================== --- runtime/src/kmp.h +++ runtime/src/kmp.h @@ -3183,7 +3183,7 @@ extern void __kmp_aux_set_blocktime (int arg, kmp_info_t *thread, int tid); extern void __kmp_aux_set_defaults( char const * str, int len ); -/* Functions below put here to call them from __kmp_aux_env_initialize() in kmp_settings.c */ +/* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */ void kmpc_set_blocktime (int arg); void ompc_set_nested( int flag ); void ompc_set_dynamic( int flag ); Index: runtime/src/kmp_alloc.c =================================================================== --- runtime/src/kmp_alloc.c +++ runtime/src/kmp_alloc.c @@ -1,2078 +0,0 @@ -/* - * kmp_alloc.c -- private/shared dynamic memory allocation and management - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_wrapper_malloc.h" -#include "kmp_io.h" - -// Disable bget when it is not used -#if KMP_USE_BGET - -/* Thread private buffer management code */ - -typedef int (*bget_compact_t)(size_t, int); -typedef void *(*bget_acquire_t)(size_t); -typedef void (*bget_release_t)(void *); - -/* NOTE: bufsize must be a signed datatype */ - -#if KMP_OS_WINDOWS -# if KMP_ARCH_X86 || KMP_ARCH_ARM - typedef kmp_int32 bufsize; -# else - typedef kmp_int64 bufsize; -# endif -#else - typedef ssize_t bufsize; -#endif - -/* The three modes of operation are, fifo search, lifo search, and best-fit */ - -typedef enum bget_mode { - bget_mode_fifo = 0, - bget_mode_lifo = 1, - bget_mode_best = 2 -} bget_mode_t; - - -static void bpool( kmp_info_t *th, void *buffer, bufsize len); -static void *bget( kmp_info_t *th, bufsize size); -static void *bgetz( kmp_info_t *th, bufsize size); -static void *bgetr( kmp_info_t *th, void *buffer, bufsize newsize); -static void brel( kmp_info_t *th, void *buf); -static void bectl( kmp_info_t *th, bget_compact_t compact, bget_acquire_t acquire, bget_release_t release, bufsize pool_incr ); - -#ifdef KMP_DEBUG -static void bstats( kmp_info_t *th, bufsize *curalloc, bufsize *totfree, bufsize *maxfree, long *nget, long *nrel); -static void bstatse( kmp_info_t *th, bufsize *pool_incr, long *npool, long *npget, long *nprel, long *ndget, long *ndrel); -static void bufdump( kmp_info_t *th, void *buf); -static void bpoold( kmp_info_t *th, void *pool, int dumpalloc, int dumpfree); -static int bpoolv( kmp_info_t *th, void *pool); -#endif - -/* BGET CONFIGURATION */ - /* Buffer allocation size quantum: - all buffers allocated are a - multiple of this size. This - MUST be a power of two. */ - - /* On IA-32 architecture with Linux* OS, - malloc() does not - ensure 16 byte alignmnent */ - -#if KMP_ARCH_X86 || !KMP_HAVE_QUAD - -#define SizeQuant 8 -#define AlignType double - -#else - -#define SizeQuant 16 -#define AlignType _Quad - -#endif - -#define BufStats 1 /* Define this symbol to enable the - bstats() function which calculates - the total free space in the buffer - pool, the largest available - buffer, and the total space - currently allocated. */ - -#ifdef KMP_DEBUG - -#define BufDump 1 /* Define this symbol to enable the - bpoold() function which dumps the - buffers in a buffer pool. */ - -#define BufValid 1 /* Define this symbol to enable the - bpoolv() function for validating - a buffer pool. */ - -#define DumpData 1 /* Define this symbol to enable the - bufdump() function which allows - dumping the contents of an allocated - or free buffer. */ -#ifdef NOT_USED_NOW - -#define FreeWipe 1 /* Wipe free buffers to a guaranteed - pattern of garbage to trip up - miscreants who attempt to use - pointers into released buffers. */ - -#define BestFit 1 /* Use a best fit algorithm when - searching for space for an - allocation request. This uses - memory more efficiently, but - allocation will be much slower. */ -#endif /* NOT_USED_NOW */ -#endif /* KMP_DEBUG */ - - -static bufsize bget_bin_size[ ] = { - 0, -// 1 << 6, /* .5 Cache line */ - 1 << 7, /* 1 Cache line, new */ - 1 << 8, /* 2 Cache lines */ - 1 << 9, /* 4 Cache lines, new */ - 1 << 10, /* 8 Cache lines */ - 1 << 11, /* 16 Cache lines, new */ - 1 << 12, - 1 << 13, /* new */ - 1 << 14, - 1 << 15, /* new */ - 1 << 16, - 1 << 17, - 1 << 18, - 1 << 19, - 1 << 20, /* 1MB */ - 1 << 21, /* 2MB */ - 1 << 22, /* 4MB */ - 1 << 23, /* 8MB */ - 1 << 24, /* 16MB */ - 1 << 25, /* 32MB */ -}; - -#define MAX_BGET_BINS (int)(sizeof(bget_bin_size) / sizeof(bufsize)) - -struct bfhead; - -/* Declare the interface, including the requested buffer size type, - bufsize. */ - -/* Queue links */ - -typedef struct qlinks { - struct bfhead *flink; /* Forward link */ - struct bfhead *blink; /* Backward link */ -} qlinks_t; - -/* Header in allocated and free buffers */ - -typedef struct bhead2 { - kmp_info_t *bthr; /* The thread which owns the buffer pool */ - bufsize prevfree; /* Relative link back to previous - free buffer in memory or 0 if - previous buffer is allocated. */ - bufsize bsize; /* Buffer size: positive if free, - negative if allocated. */ -} bhead2_t; - -/* Make sure the bhead structure is a multiple of SizeQuant in size. */ - -typedef union bhead { - KMP_ALIGN( SizeQuant ) - AlignType b_align; - char b_pad[ sizeof(bhead2_t) + (SizeQuant - (sizeof(bhead2_t) % SizeQuant)) ]; - bhead2_t bb; -} bhead_t; -#define BH(p) ((bhead_t *) (p)) - -/* Header in directly allocated buffers (by acqfcn) */ - -typedef struct bdhead -{ - bufsize tsize; /* Total size, including overhead */ - bhead_t bh; /* Common header */ -} bdhead_t; -#define BDH(p) ((bdhead_t *) (p)) - -/* Header in free buffers */ - -typedef struct bfhead { - bhead_t bh; /* Common allocated/free header */ - qlinks_t ql; /* Links on free list */ -} bfhead_t; -#define BFH(p) ((bfhead_t *) (p)) - -typedef struct thr_data { - bfhead_t freelist[ MAX_BGET_BINS ]; -#if BufStats - size_t totalloc; /* Total space currently allocated */ - long numget, numrel; /* Number of bget() and brel() calls */ - long numpblk; /* Number of pool blocks */ - long numpget, numprel; /* Number of block gets and rels */ - long numdget, numdrel; /* Number of direct gets and rels */ -#endif /* BufStats */ - - /* Automatic expansion block management functions */ - bget_compact_t compfcn; - bget_acquire_t acqfcn; - bget_release_t relfcn; - - bget_mode_t mode; /* what allocation mode to use? */ - - bufsize exp_incr; /* Expansion block size */ - bufsize pool_len; /* 0: no bpool calls have been made - -1: not all pool blocks are - the same size - >0: (common) block size for all - bpool calls made so far - */ - bfhead_t * last_pool; /* Last pool owned by this thread (delay dealocation) */ -} thr_data_t; - -/* Minimum allocation quantum: */ - -#define QLSize (sizeof(qlinks_t)) -#define SizeQ ((SizeQuant > QLSize) ? SizeQuant : QLSize) -#define MaxSize (bufsize)( ~ ( ( (bufsize)( 1 ) << ( sizeof( bufsize ) * CHAR_BIT - 1 ) ) | ( SizeQuant - 1 ) ) ) - // Maximun for the requested size. - -/* End sentinel: value placed in bsize field of dummy block delimiting - end of pool block. The most negative number which will fit in a - bufsize, defined in a way that the compiler will accept. */ - -#define ESent ((bufsize) (-(((((bufsize)1)<<((int)sizeof(bufsize)*8-2))-1)*2)-2)) - -/* ------------------------------------------------------------------------ */ - -/* Thread Data management routines */ - -static int -bget_get_bin( bufsize size ) -{ - // binary chop bins - int lo = 0, hi = MAX_BGET_BINS - 1; - - KMP_DEBUG_ASSERT( size > 0 ); - - while ( (hi - lo) > 1 ) { - int mid = (lo + hi) >> 1; - if (size < bget_bin_size[ mid ]) - hi = mid - 1; - else - lo = mid; - } - - KMP_DEBUG_ASSERT( (lo >= 0) && (lo < MAX_BGET_BINS) ); - - return lo; -} - -static void -set_thr_data( kmp_info_t *th ) -{ - int i; - thr_data_t *data; - - data = - (thr_data_t *)( - ( ! th->th.th_local.bget_data ) ? __kmp_allocate( sizeof( *data ) ) : th->th.th_local.bget_data - ); - - memset( data, '\0', sizeof( *data ) ); - - for (i = 0; i < MAX_BGET_BINS; ++i) { - data->freelist[ i ].ql.flink = & data->freelist[ i ]; - data->freelist[ i ].ql.blink = & data->freelist[ i ]; - } - - th->th.th_local.bget_data = data; - th->th.th_local.bget_list = 0; -#if ! USE_CMP_XCHG_FOR_BGET -#ifdef USE_QUEUING_LOCK_FOR_BGET - __kmp_init_lock( & th->th.th_local.bget_lock ); -#else - __kmp_init_bootstrap_lock( & th->th.th_local.bget_lock ); -#endif /* USE_LOCK_FOR_BGET */ -#endif /* ! USE_CMP_XCHG_FOR_BGET */ -} - -static thr_data_t * -get_thr_data( kmp_info_t *th ) -{ - thr_data_t *data; - - data = (thr_data_t *) th->th.th_local.bget_data; - - KMP_DEBUG_ASSERT( data != 0 ); - - return data; -} - - -#ifdef KMP_DEBUG - -static void -__kmp_bget_validate_queue( kmp_info_t *th ) -{ - /* NOTE: assume that the global_lock is held */ - - void *p = (void *) th->th.th_local.bget_list; - - while (p != 0) { - bfhead_t *b = BFH(((char *) p) - sizeof(bhead_t)); - - KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0); - p = (void *) b->ql.flink; - } -} - -#endif - -/* Walk the free list and release the enqueued buffers */ - -static void -__kmp_bget_dequeue( kmp_info_t *th ) -{ - void *p = TCR_SYNC_PTR(th->th.th_local.bget_list); - - if (p != 0) { - #if USE_CMP_XCHG_FOR_BGET - { - volatile void *old_value = TCR_SYNC_PTR(th->th.th_local.bget_list); - while ( ! KMP_COMPARE_AND_STORE_PTR( - & th->th.th_local.bget_list, old_value, NULL ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_SYNC_PTR(th->th.th_local.bget_list); - } - p = (void *) old_value; - } - #else /* ! USE_CMP_XCHG_FOR_BGET */ - #ifdef USE_QUEUING_LOCK_FOR_BGET - __kmp_acquire_lock( & th->th.th_local.bget_lock, - __kmp_gtid_from_thread(th) ); - #else - __kmp_acquire_bootstrap_lock( & th->th.th_local.bget_lock ); - #endif /* USE_QUEUING_LOCK_FOR_BGET */ - - p = (void *) th->th.th_local.bget_list; - th->th.th_local.bget_list = 0; - - #ifdef USE_QUEUING_LOCK_FOR_BGET - __kmp_release_lock( & th->th.th_local.bget_lock, - __kmp_gtid_from_thread(th) ); - #else - __kmp_release_bootstrap_lock( & th->th.th_local.bget_lock ); - #endif - #endif /* USE_CMP_XCHG_FOR_BGET */ - - /* Check again to make sure the list is not empty */ - - while (p != 0) { - void *buf = p; - bfhead_t *b = BFH(((char *) p) - sizeof(bhead_t)); - - KMP_DEBUG_ASSERT( b->bh.bb.bsize != 0 ); - KMP_DEBUG_ASSERT( ( (kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1 ) == - (kmp_uintptr_t)th ); // clear possible mark - KMP_DEBUG_ASSERT( b->ql.blink == 0 ); - - p = (void *) b->ql.flink; - - brel( th, buf ); - } - } -} - -/* Chain together the free buffers by using the thread owner field */ - -static void -__kmp_bget_enqueue( kmp_info_t *th, void *buf -#ifdef USE_QUEUING_LOCK_FOR_BGET - , kmp_int32 rel_gtid -#endif - ) -{ - bfhead_t *b = BFH(((char *) buf) - sizeof(bhead_t)); - - KMP_DEBUG_ASSERT( b->bh.bb.bsize != 0 ); - KMP_DEBUG_ASSERT( ( (kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1 ) == - (kmp_uintptr_t)th ); // clear possible mark - - b->ql.blink = 0; - - KC_TRACE( 10, ( "__kmp_bget_enqueue: moving buffer to T#%d list\n", - __kmp_gtid_from_thread( th ) ) ); - -#if USE_CMP_XCHG_FOR_BGET - { - volatile void *old_value = TCR_PTR(th->th.th_local.bget_list); - /* the next pointer must be set before setting bget_list to buf to avoid - exposing a broken list to other threads, even for an instant. */ - b->ql.flink = BFH( old_value ); - - while ( ! KMP_COMPARE_AND_STORE_PTR( - & th->th.th_local.bget_list, old_value, buf ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_PTR(th->th.th_local.bget_list); - /* the next pointer must be set before setting bget_list to buf to avoid - exposing a broken list to other threads, even for an instant. */ - b->ql.flink = BFH( old_value ); - } - } -#else /* ! USE_CMP_XCHG_FOR_BGET */ -# ifdef USE_QUEUING_LOCK_FOR_BGET - __kmp_acquire_lock( & th->th.th_local.bget_lock, rel_gtid ); -# else - __kmp_acquire_bootstrap_lock( & th->th.th_local.bget_lock ); - # endif - - b->ql.flink = BFH( th->th.th_local.bget_list ); - th->th.th_local.bget_list = (void *) buf; - -# ifdef USE_QUEUING_LOCK_FOR_BGET - __kmp_release_lock( & th->th.th_local.bget_lock, rel_gtid ); -# else - __kmp_release_bootstrap_lock( & th->th.th_local.bget_lock ); -# endif -#endif /* USE_CMP_XCHG_FOR_BGET */ -} - -/* insert buffer back onto a new freelist */ - -static void -__kmp_bget_insert_into_freelist( thr_data_t *thr, bfhead_t *b ) -{ - int bin; - - KMP_DEBUG_ASSERT( ((size_t)b ) % SizeQuant == 0 ); - KMP_DEBUG_ASSERT( b->bh.bb.bsize % SizeQuant == 0 ); - - bin = bget_get_bin( b->bh.bb.bsize ); - - KMP_DEBUG_ASSERT(thr->freelist[ bin ].ql.blink->ql.flink == &thr->freelist[ bin ]); - KMP_DEBUG_ASSERT(thr->freelist[ bin ].ql.flink->ql.blink == &thr->freelist[ bin ]); - - b->ql.flink = &thr->freelist[ bin ]; - b->ql.blink = thr->freelist[ bin ].ql.blink; - - thr->freelist[ bin ].ql.blink = b; - b->ql.blink->ql.flink = b; -} - -/* unlink the buffer from the old freelist */ - -static void -__kmp_bget_remove_from_freelist( bfhead_t *b ) -{ - KMP_DEBUG_ASSERT(b->ql.blink->ql.flink == b); - KMP_DEBUG_ASSERT(b->ql.flink->ql.blink == b); - - b->ql.blink->ql.flink = b->ql.flink; - b->ql.flink->ql.blink = b->ql.blink; -} - -/* ------------------------------------------------------------------------ */ - -/* GET STATS -- check info on free list */ - -static void -bcheck( kmp_info_t *th, bufsize *max_free, bufsize *total_free ) -{ - thr_data_t *thr = get_thr_data( th ); - int bin; - - *total_free = *max_free = 0; - - for (bin = 0; bin < MAX_BGET_BINS; ++bin) { - bfhead_t *b, *best; - - best = &thr->freelist[ bin ]; - b = best->ql.flink; - - while (b != &thr->freelist[ bin ]) { - *total_free += (b->bh.bb.bsize - sizeof( bhead_t )); - if ((best == &thr->freelist[ bin ]) || (b->bh.bb.bsize < best->bh.bb.bsize)) - best = b; - - /* Link to next buffer */ - b = b->ql.flink; - } - - if (*max_free < best->bh.bb.bsize) - *max_free = best->bh.bb.bsize; - } - - if (*max_free > (bufsize)sizeof( bhead_t )) - *max_free -= sizeof( bhead_t ); -} - -/* ------------------------------------------------------------------------ */ - -/* BGET -- Allocate a buffer. */ - -static void * -bget( kmp_info_t *th, bufsize requested_size ) -{ - thr_data_t *thr = get_thr_data( th ); - bufsize size = requested_size; - bfhead_t *b; - void *buf; - int compactseq = 0; - int use_blink = 0; -/* For BestFit */ - bfhead_t *best; - - if ( size < 0 || size + sizeof( bhead_t ) > MaxSize ) { - return NULL; - }; // if - - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - - if (size < (bufsize)SizeQ) { /* Need at least room for the */ - size = SizeQ; /* queue links. */ - } - #if defined( SizeQuant ) && ( SizeQuant > 1 ) - size = (size + (SizeQuant - 1)) & (~(SizeQuant - 1)); - #endif - - size += sizeof(bhead_t); /* Add overhead in allocated buffer - to size required. */ - KMP_DEBUG_ASSERT( size >= 0 ); - KMP_DEBUG_ASSERT( size % SizeQuant == 0 ); - - use_blink = ( thr->mode == bget_mode_lifo ); - - /* If a compact function was provided in the call to bectl(), wrap - a loop around the allocation process to allow compaction to - intervene in case we don't find a suitable buffer in the chain. */ - - for (;;) { - int bin; - - for (bin = bget_get_bin( size ); bin < MAX_BGET_BINS; ++bin) { - /* Link to next buffer */ - b = ( use_blink ? thr->freelist[ bin ].ql.blink : thr->freelist[ bin ].ql.flink ); - - if (thr->mode == bget_mode_best) { - best = &thr->freelist[ bin ]; - - /* Scan the free list searching for the first buffer big enough - to hold the requested size buffer. */ - - while (b != &thr->freelist[ bin ]) { - if (b->bh.bb.bsize >= (bufsize) size) { - if ((best == &thr->freelist[ bin ]) || (b->bh.bb.bsize < best->bh.bb.bsize)) { - best = b; - } - } - - /* Link to next buffer */ - b = ( use_blink ? b->ql.blink : b->ql.flink ); - } - b = best; - } - - while (b != &thr->freelist[ bin ]) { - if ((bufsize) b->bh.bb.bsize >= (bufsize) size) { - - /* Buffer is big enough to satisfy the request. Allocate it - to the caller. We must decide whether the buffer is large - enough to split into the part given to the caller and a - free buffer that remains on the free list, or whether the - entire buffer should be removed from the free list and - given to the caller in its entirety. We only split the - buffer if enough room remains for a header plus the minimum - quantum of allocation. */ - - if ((b->bh.bb.bsize - (bufsize) size) > (bufsize)(SizeQ + (sizeof(bhead_t)))) { - bhead_t *ba, *bn; - - ba = BH(((char *) b) + (b->bh.bb.bsize - (bufsize) size)); - bn = BH(((char *) ba) + size); - - KMP_DEBUG_ASSERT(bn->bb.prevfree == b->bh.bb.bsize); - - /* Subtract size from length of free block. */ - b->bh.bb.bsize -= (bufsize) size; - - /* Link allocated buffer to the previous free buffer. */ - ba->bb.prevfree = b->bh.bb.bsize; - - /* Plug negative size into user buffer. */ - ba->bb.bsize = -size; - - /* Mark this buffer as owned by this thread. */ - TCW_PTR(ba->bb.bthr, th); // not an allocated address (do not mark it) - /* Mark buffer after this one not preceded by free block. */ - bn->bb.prevfree = 0; - - /* unlink the buffer from the old freelist, and reinsert it into the new freelist */ - __kmp_bget_remove_from_freelist( b ); - __kmp_bget_insert_into_freelist( thr, b ); -#if BufStats - thr->totalloc += (size_t) size; - thr->numget++; /* Increment number of bget() calls */ -#endif - buf = (void *) ((((char *) ba) + sizeof(bhead_t))); - KMP_DEBUG_ASSERT( ((size_t)buf) % SizeQuant == 0 ); - return buf; - } else { - bhead_t *ba; - - ba = BH(((char *) b) + b->bh.bb.bsize); - - KMP_DEBUG_ASSERT(ba->bb.prevfree == b->bh.bb.bsize); - - /* The buffer isn't big enough to split. Give the whole - shebang to the caller and remove it from the free list. */ - - __kmp_bget_remove_from_freelist( b ); -#if BufStats - thr->totalloc += (size_t) b->bh.bb.bsize; - thr->numget++; /* Increment number of bget() calls */ -#endif - /* Negate size to mark buffer allocated. */ - b->bh.bb.bsize = -(b->bh.bb.bsize); - - /* Mark this buffer as owned by this thread. */ - TCW_PTR(ba->bb.bthr, th); // not an allocated address (do not mark it) - /* Zero the back pointer in the next buffer in memory - to indicate that this buffer is allocated. */ - ba->bb.prevfree = 0; - - /* Give user buffer starting at queue links. */ - buf = (void *) &(b->ql); - KMP_DEBUG_ASSERT( ((size_t)buf) % SizeQuant == 0 ); - return buf; - } - } - - /* Link to next buffer */ - b = ( use_blink ? b->ql.blink : b->ql.flink ); - } - } - - /* We failed to find a buffer. If there's a compact function - defined, notify it of the size requested. If it returns - TRUE, try the allocation again. */ - - if ((thr->compfcn == 0) || (!(*thr->compfcn)(size, ++compactseq))) { - break; - } - } - - /* No buffer available with requested size free. */ - - /* Don't give up yet -- look in the reserve supply. */ - - if (thr->acqfcn != 0) { - if (size > (bufsize) (thr->exp_incr - sizeof(bhead_t))) { - - /* Request is too large to fit in a single expansion - block. Try to satisy it by a direct buffer acquisition. */ - - bdhead_t *bdh; - - size += sizeof(bdhead_t) - sizeof(bhead_t); - - KE_TRACE( 10, ("%%%%%% MALLOC( %d )\n", (int) size ) ); - - /* richryan */ - bdh = BDH((*thr->acqfcn)((bufsize) size)); - if (bdh != NULL) { - - /* Mark the buffer special by setting the size field - of its header to zero. */ - bdh->bh.bb.bsize = 0; - - /* Mark this buffer as owned by this thread. */ - TCW_PTR(bdh->bh.bb.bthr, th); // don't mark buffer as allocated, - // because direct buffer never goes to free list - bdh->bh.bb.prevfree = 0; - bdh->tsize = size; -#if BufStats - thr->totalloc += (size_t) size; - thr->numget++; /* Increment number of bget() calls */ - thr->numdget++; /* Direct bget() call count */ -#endif - buf = (void *) (bdh + 1); - KMP_DEBUG_ASSERT( ((size_t)buf) % SizeQuant == 0 ); - return buf; - } - - } else { - - /* Try to obtain a new expansion block */ - - void *newpool; - - KE_TRACE( 10, ("%%%%%% MALLOCB( %d )\n", (int) thr->exp_incr ) ); - - /* richryan */ - newpool = (*thr->acqfcn)((bufsize) thr->exp_incr); - KMP_DEBUG_ASSERT( ((size_t)newpool) % SizeQuant == 0 ); - if (newpool != NULL) { - bpool( th, newpool, thr->exp_incr); - buf = bget( th, requested_size); /* This can't, I say, can't get into a loop. */ - return buf; - } - } - } - - /* Still no buffer available */ - - return NULL; -} - -/* BGETZ -- Allocate a buffer and clear its contents to zero. We clear - the entire contents of the buffer to zero, not just the - region requested by the caller. */ - -static void * -bgetz( kmp_info_t *th, bufsize size ) -{ - char *buf = (char *) bget( th, size); - - if (buf != NULL) { - bhead_t *b; - bufsize rsize; - - b = BH(buf - sizeof(bhead_t)); - rsize = -(b->bb.bsize); - if (rsize == 0) { - bdhead_t *bd; - - bd = BDH(buf - sizeof(bdhead_t)); - rsize = bd->tsize - (bufsize) sizeof(bdhead_t); - } else { - rsize -= sizeof(bhead_t); - } - - KMP_DEBUG_ASSERT(rsize >= size); - - (void) memset(buf, 0, (bufsize) rsize); - } - return ((void *) buf); -} - -/* BGETR -- Reallocate a buffer. This is a minimal implementation, - simply in terms of brel() and bget(). It could be - enhanced to allow the buffer to grow into adjacent free - blocks and to avoid moving data unnecessarily. */ - -static void * -bgetr( kmp_info_t *th, void *buf, bufsize size) -{ - void *nbuf; - bufsize osize; /* Old size of buffer */ - bhead_t *b; - - nbuf = bget( th, size ); - if ( nbuf == NULL ) { /* Acquire new buffer */ - return NULL; - } - if ( buf == NULL ) { - return nbuf; - } - b = BH(((char *) buf) - sizeof(bhead_t)); - osize = -b->bb.bsize; - if (osize == 0) { - /* Buffer acquired directly through acqfcn. */ - bdhead_t *bd; - - bd = BDH(((char *) buf) - sizeof(bdhead_t)); - osize = bd->tsize - (bufsize) sizeof(bdhead_t); - } else { - osize -= sizeof(bhead_t); - }; - - KMP_DEBUG_ASSERT(osize > 0); - - (void) KMP_MEMCPY((char *) nbuf, (char *) buf, /* Copy the data */ - (size_t) ((size < osize) ? size : osize)); - brel( th, buf ); - - return nbuf; -} - -/* BREL -- Release a buffer. */ - -static void -brel( kmp_info_t *th, void *buf ) -{ - thr_data_t *thr = get_thr_data( th ); - bfhead_t *b, *bn; - kmp_info_t *bth; - - KMP_DEBUG_ASSERT(buf != NULL); - KMP_DEBUG_ASSERT( ((size_t)buf) % SizeQuant == 0 ); - - b = BFH(((char *) buf) - sizeof(bhead_t)); - - if (b->bh.bb.bsize == 0) { /* Directly-acquired buffer? */ - bdhead_t *bdh; - - bdh = BDH(((char *) buf) - sizeof(bdhead_t)); - KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0); -#if BufStats - thr->totalloc -= (size_t) bdh->tsize; - thr->numdrel++; /* Number of direct releases */ - thr->numrel++; /* Increment number of brel() calls */ -#endif /* BufStats */ -#ifdef FreeWipe - (void) memset((char *) buf, 0x55, - (size_t) (bdh->tsize - sizeof(bdhead_t))); -#endif /* FreeWipe */ - - KE_TRACE( 10, ("%%%%%% FREE( %p )\n", (void *) bdh ) ); - - KMP_DEBUG_ASSERT( thr->relfcn != 0 ); - (*thr->relfcn)((void *) bdh); /* Release it directly. */ - return; - } - - bth = (kmp_info_t *)( (kmp_uintptr_t)TCR_PTR(b->bh.bb.bthr) & ~1 ); // clear possible mark before comparison - if ( bth != th ) { - /* Add this buffer to be released by the owning thread later */ - __kmp_bget_enqueue( bth, buf -#ifdef USE_QUEUING_LOCK_FOR_BGET - , __kmp_gtid_from_thread( th ) -#endif - ); - return; - } - - /* Buffer size must be negative, indicating that the buffer is - allocated. */ - - if (b->bh.bb.bsize >= 0) { - bn = NULL; - } - KMP_DEBUG_ASSERT(b->bh.bb.bsize < 0); - - /* Back pointer in next buffer must be zero, indicating the - same thing: */ - - KMP_DEBUG_ASSERT(BH((char *) b - b->bh.bb.bsize)->bb.prevfree == 0); - -#if BufStats - thr->numrel++; /* Increment number of brel() calls */ - thr->totalloc += (size_t) b->bh.bb.bsize; -#endif - - /* If the back link is nonzero, the previous buffer is free. */ - - if (b->bh.bb.prevfree != 0) { - /* The previous buffer is free. Consolidate this buffer with it - by adding the length of this buffer to the previous free - buffer. Note that we subtract the size in the buffer being - released, since it's negative to indicate that the buffer is - allocated. */ - - register bufsize size = b->bh.bb.bsize; - - /* Make the previous buffer the one we're working on. */ - KMP_DEBUG_ASSERT(BH((char *) b - b->bh.bb.prevfree)->bb.bsize == b->bh.bb.prevfree); - b = BFH(((char *) b) - b->bh.bb.prevfree); - b->bh.bb.bsize -= size; - - /* unlink the buffer from the old freelist */ - __kmp_bget_remove_from_freelist( b ); - } - else { - /* The previous buffer isn't allocated. Mark this buffer - size as positive (i.e. free) and fall through to place - the buffer on the free list as an isolated free block. */ - - b->bh.bb.bsize = -b->bh.bb.bsize; - } - - /* insert buffer back onto a new freelist */ - __kmp_bget_insert_into_freelist( thr, b ); - - - /* Now we look at the next buffer in memory, located by advancing from - the start of this buffer by its size, to see if that buffer is - free. If it is, we combine this buffer with the next one in - memory, dechaining the second buffer from the free list. */ - - bn = BFH(((char *) b) + b->bh.bb.bsize); - if (bn->bh.bb.bsize > 0) { - - /* The buffer is free. Remove it from the free list and add - its size to that of our buffer. */ - - KMP_DEBUG_ASSERT(BH((char *) bn + bn->bh.bb.bsize)->bb.prevfree == bn->bh.bb.bsize); - - __kmp_bget_remove_from_freelist( bn ); - - b->bh.bb.bsize += bn->bh.bb.bsize; - - /* unlink the buffer from the old freelist, and reinsert it into the new freelist */ - - __kmp_bget_remove_from_freelist( b ); - __kmp_bget_insert_into_freelist( thr, b ); - - /* Finally, advance to the buffer that follows the newly - consolidated free block. We must set its backpointer to the - head of the consolidated free block. We know the next block - must be an allocated block because the process of recombination - guarantees that two free blocks will never be contiguous in - memory. */ - - bn = BFH(((char *) b) + b->bh.bb.bsize); - } -#ifdef FreeWipe - (void) memset(((char *) b) + sizeof(bfhead_t), 0x55, - (size_t) (b->bh.bb.bsize - sizeof(bfhead_t))); -#endif - KMP_DEBUG_ASSERT(bn->bh.bb.bsize < 0); - - /* The next buffer is allocated. Set the backpointer in it to point - to this buffer; the previous free buffer in memory. */ - - bn->bh.bb.prevfree = b->bh.bb.bsize; - - /* If a block-release function is defined, and this free buffer - constitutes the entire block, release it. Note that pool_len - is defined in such a way that the test will fail unless all - pool blocks are the same size. */ - - if (thr->relfcn != 0 && - b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) - { -#if BufStats - if (thr->numpblk != 1) { /* Do not release the last buffer until finalization time */ -#endif - - KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0); - KMP_DEBUG_ASSERT(BH((char *) b + b->bh.bb.bsize)->bb.bsize == ESent); - KMP_DEBUG_ASSERT(BH((char *) b + b->bh.bb.bsize)->bb.prevfree == b->bh.bb.bsize); - - /* Unlink the buffer from the free list */ - __kmp_bget_remove_from_freelist( b ); - - KE_TRACE( 10, ("%%%%%% FREE( %p )\n", (void *) b ) ); - - (*thr->relfcn)(b); -#if BufStats - thr->numprel++; /* Nr of expansion block releases */ - thr->numpblk--; /* Total number of blocks */ - KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel); - - /* avoid leaving stale last_pool pointer around if it is being dealloced */ - if (thr->last_pool == b) thr->last_pool = 0; - } - else { - thr->last_pool = b; - } -#endif /* BufStats */ - } -} - -/* BECTL -- Establish automatic pool expansion control */ - -static void -bectl( kmp_info_t *th, bget_compact_t compact, bget_acquire_t acquire, bget_release_t release, bufsize pool_incr) -{ - thr_data_t *thr = get_thr_data( th ); - - thr->compfcn = compact; - thr->acqfcn = acquire; - thr->relfcn = release; - thr->exp_incr = pool_incr; -} - -/* BPOOL -- Add a region of memory to the buffer pool. */ - -static void -bpool( kmp_info_t *th, void *buf, bufsize len) -{ -/* int bin = 0; */ - thr_data_t *thr = get_thr_data( th ); - bfhead_t *b = BFH(buf); - bhead_t *bn; - - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - -#ifdef SizeQuant - len &= ~(SizeQuant - 1); -#endif - if (thr->pool_len == 0) { - thr->pool_len = len; - } else if (len != thr->pool_len) { - thr->pool_len = -1; - } -#if BufStats - thr->numpget++; /* Number of block acquisitions */ - thr->numpblk++; /* Number of blocks total */ - KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel); -#endif /* BufStats */ - - /* Since the block is initially occupied by a single free buffer, - it had better not be (much) larger than the largest buffer - whose size we can store in bhead.bb.bsize. */ - - KMP_DEBUG_ASSERT(len - sizeof(bhead_t) <= -((bufsize) ESent + 1)); - - /* Clear the backpointer at the start of the block to indicate that - there is no free block prior to this one. That blocks - recombination when the first block in memory is released. */ - - b->bh.bb.prevfree = 0; - - /* Create a dummy allocated buffer at the end of the pool. This dummy - buffer is seen when a buffer at the end of the pool is released and - blocks recombination of the last buffer with the dummy buffer at - the end. The length in the dummy buffer is set to the largest - negative number to denote the end of the pool for diagnostic - routines (this specific value is not counted on by the actual - allocation and release functions). */ - - len -= sizeof(bhead_t); - b->bh.bb.bsize = (bufsize) len; - /* Set the owner of this buffer */ - TCW_PTR( b->bh.bb.bthr, (kmp_info_t*)((kmp_uintptr_t)th | 1) ); // mark the buffer as allocated address - - /* Chain the new block to the free list. */ - __kmp_bget_insert_into_freelist( thr, b ); - -#ifdef FreeWipe - (void) memset(((char *) b) + sizeof(bfhead_t), 0x55, - (size_t) (len - sizeof(bfhead_t))); -#endif - bn = BH(((char *) b) + len); - bn->bb.prevfree = (bufsize) len; - /* Definition of ESent assumes two's complement! */ - KMP_DEBUG_ASSERT( (~0) == -1 && (bn != 0) ); - - bn->bb.bsize = ESent; -} - -/* ------------------------------------------------------------------------ */ - -/* BFREED -- Dump the free lists for this thread. */ - -static void -bfreed( kmp_info_t *th ) -{ - int bin = 0, count = 0; - int gtid = __kmp_gtid_from_thread( th ); - thr_data_t *thr = get_thr_data( th ); - -#if BufStats - __kmp_printf_no_lock("__kmp_printpool: T#%d total=%" KMP_UINT64_SPEC " get=%" KMP_INT64_SPEC " rel=%" \ - KMP_INT64_SPEC " pblk=%" KMP_INT64_SPEC " pget=%" KMP_INT64_SPEC " prel=%" KMP_INT64_SPEC \ - " dget=%" KMP_INT64_SPEC " drel=%" KMP_INT64_SPEC "\n", - gtid, (kmp_uint64) thr->totalloc, - (kmp_int64) thr->numget, (kmp_int64) thr->numrel, - (kmp_int64) thr->numpblk, - (kmp_int64) thr->numpget, (kmp_int64) thr->numprel, - (kmp_int64) thr->numdget, (kmp_int64) thr->numdrel ); -#endif - - for (bin = 0; bin < MAX_BGET_BINS; ++bin) { - bfhead_t *b; - - for (b = thr->freelist[ bin ].ql.flink; b != &thr->freelist[ bin ]; b = b->ql.flink) { - bufsize bs = b->bh.bb.bsize; - - KMP_DEBUG_ASSERT( b->ql.blink->ql.flink == b ); - KMP_DEBUG_ASSERT( b->ql.flink->ql.blink == b ); - KMP_DEBUG_ASSERT( bs > 0 ); - - count += 1; - - __kmp_printf_no_lock("__kmp_printpool: T#%d Free block: 0x%p size %6ld bytes.\n", gtid, b, (long) bs ); -#ifdef FreeWipe - { - char *lerr = ((char *) b) + sizeof(bfhead_t); - if ((bs > sizeof(bfhead_t)) && ((*lerr != 0x55) || (memcmp(lerr, lerr + 1, (size_t) (bs - (sizeof(bfhead_t) + 1))) != 0))) { - __kmp_printf_no_lock( "__kmp_printpool: T#%d (Contents of above free block have been overstored.)\n", gtid ); - } - } -#endif - } - } - - if (count == 0) - __kmp_printf_no_lock("__kmp_printpool: T#%d No free blocks\n", gtid ); -} - -/* ------------------------------------------------------------------------ */ - -#ifdef KMP_DEBUG - -#if BufStats - -/* BSTATS -- Return buffer allocation free space statistics. */ - -static void -bstats( kmp_info_t *th, bufsize *curalloc, bufsize *totfree, bufsize *maxfree, long *nget, long *nrel) -{ - int bin = 0; - thr_data_t *thr = get_thr_data( th ); - - *nget = thr->numget; - *nrel = thr->numrel; - *curalloc = (bufsize) thr->totalloc; - *totfree = 0; - *maxfree = -1; - - for (bin = 0; bin < MAX_BGET_BINS; ++bin) { - bfhead_t *b = thr->freelist[ bin ].ql.flink; - - while (b != &thr->freelist[ bin ]) { - KMP_DEBUG_ASSERT(b->bh.bb.bsize > 0); - *totfree += b->bh.bb.bsize; - if (b->bh.bb.bsize > *maxfree) { - *maxfree = b->bh.bb.bsize; - } - b = b->ql.flink; /* Link to next buffer */ - } - } -} - -/* BSTATSE -- Return extended statistics */ - -static void -bstatse( kmp_info_t *th, bufsize *pool_incr, long *npool, long *npget, long *nprel, long *ndget, long *ndrel) -{ - thr_data_t *thr = get_thr_data( th ); - - *pool_incr = (thr->pool_len < 0) ? -thr->exp_incr : thr->exp_incr; - *npool = thr->numpblk; - *npget = thr->numpget; - *nprel = thr->numprel; - *ndget = thr->numdget; - *ndrel = thr->numdrel; -} - -#endif /* BufStats */ - -/* BUFDUMP -- Dump the data in a buffer. This is called with the user - data pointer, and backs up to the buffer header. It will - dump either a free block or an allocated one. */ - -static void -bufdump( kmp_info_t *th, void *buf ) -{ - bfhead_t *b; - unsigned char *bdump; - bufsize bdlen; - - b = BFH(((char *) buf) - sizeof(bhead_t)); - KMP_DEBUG_ASSERT(b->bh.bb.bsize != 0); - if (b->bh.bb.bsize < 0) { - bdump = (unsigned char *) buf; - bdlen = (-b->bh.bb.bsize) - (bufsize) sizeof(bhead_t); - } else { - bdump = (unsigned char *) (((char *) b) + sizeof(bfhead_t)); - bdlen = b->bh.bb.bsize - (bufsize) sizeof(bfhead_t); - } - - while (bdlen > 0) { - int i, dupes = 0; - bufsize l = bdlen; - char bhex[50], bascii[20]; - - if (l > 16) { - l = 16; - } - - for (i = 0; i < l; i++) { - (void) KMP_SNPRINTF(bhex + i * 3, sizeof(bhex) - i * 3, "%02X ", bdump[i]); - if (bdump[i] > 0x20 && bdump[i] < 0x7F) - bascii[ i ] = bdump[ i ]; - else - bascii[ i ] = ' '; - } - bascii[i] = 0; - (void) __kmp_printf_no_lock("%-48s %s\n", bhex, bascii); - bdump += l; - bdlen -= l; - while ((bdlen > 16) && (memcmp((char *) (bdump - 16), - (char *) bdump, 16) == 0)) { - dupes++; - bdump += 16; - bdlen -= 16; - } - if (dupes > 1) { - (void) __kmp_printf_no_lock( - " (%d lines [%d bytes] identical to above line skipped)\n", - dupes, dupes * 16); - } else if (dupes == 1) { - bdump -= 16; - bdlen += 16; - } - } -} - -/* BPOOLD -- Dump a buffer pool. The buffer headers are always listed. - If DUMPALLOC is nonzero, the contents of allocated buffers - are dumped. If DUMPFREE is nonzero, free blocks are - dumped as well. If FreeWipe checking is enabled, free - blocks which have been clobbered will always be dumped. */ - -static void -bpoold( kmp_info_t *th, void *buf, int dumpalloc, int dumpfree) -{ - bfhead_t *b = BFH( (char*)buf - sizeof(bhead_t)); - - while (b->bh.bb.bsize != ESent) { - bufsize bs = b->bh.bb.bsize; - - if (bs < 0) { - bs = -bs; - (void) __kmp_printf_no_lock("Allocated buffer: size %6ld bytes.\n", (long) bs); - if (dumpalloc) { - bufdump( th, (void *) (((char *) b) + sizeof(bhead_t))); - } - } else { - const char *lerr = ""; - - KMP_DEBUG_ASSERT(bs > 0); - if ((b->ql.blink->ql.flink != b) || (b->ql.flink->ql.blink != b)) { - lerr = " (Bad free list links)"; - } - (void) __kmp_printf_no_lock("Free block: size %6ld bytes.%s\n", - (long) bs, lerr); -#ifdef FreeWipe - lerr = ((char *) b) + sizeof(bfhead_t); - if ((bs > sizeof(bfhead_t)) && ((*lerr != 0x55) || - (memcmp(lerr, lerr + 1, - (size_t) (bs - (sizeof(bfhead_t) + 1))) != 0))) { - (void) __kmp_printf_no_lock( - "(Contents of above free block have been overstored.)\n"); - bufdump( th, (void *) (((char *) b) + sizeof(bhead_t))); - } else -#endif - if (dumpfree) { - bufdump( th, (void *) (((char *) b) + sizeof(bhead_t))); - } - } - b = BFH(((char *) b) + bs); - } -} - -/* BPOOLV -- Validate a buffer pool. */ - -static int -bpoolv( kmp_info_t *th, void *buf ) -{ - bfhead_t *b = BFH(buf); - - while (b->bh.bb.bsize != ESent) { - bufsize bs = b->bh.bb.bsize; - - if (bs < 0) { - bs = -bs; - } else { -#ifdef FreeWipe - char *lerr = ""; -#endif - - KMP_DEBUG_ASSERT(bs > 0); - if (bs <= 0) { - return 0; - } - if ((b->ql.blink->ql.flink != b) || (b->ql.flink->ql.blink != b)) { - (void) __kmp_printf_no_lock("Free block: size %6ld bytes. (Bad free list links)\n", - (long) bs); - KMP_DEBUG_ASSERT(0); - return 0; - } -#ifdef FreeWipe - lerr = ((char *) b) + sizeof(bfhead_t); - if ((bs > sizeof(bfhead_t)) && ((*lerr != 0x55) || - (memcmp(lerr, lerr + 1, - (size_t) (bs - (sizeof(bfhead_t) + 1))) != 0))) { - (void) __kmp_printf_no_lock( - "(Contents of above free block have been overstored.)\n"); - bufdump( th, (void *) (((char *) b) + sizeof(bhead_t))); - KMP_DEBUG_ASSERT(0); - return 0; - } -#endif /* FreeWipe */ - } - b = BFH(((char *) b) + bs); - } - return 1; -} - -#endif /* KMP_DEBUG */ - -/* ------------------------------------------------------------------------ */ - -void -__kmp_initialize_bget( kmp_info_t *th ) -{ - KMP_DEBUG_ASSERT( SizeQuant >= sizeof( void * ) && (th != 0) ); - - set_thr_data( th ); - - bectl( th, (bget_compact_t) 0, (bget_acquire_t) malloc, (bget_release_t) free, - (bufsize) __kmp_malloc_pool_incr ); -} - -void -__kmp_finalize_bget( kmp_info_t *th ) -{ - thr_data_t *thr; - bfhead_t *b; - - KMP_DEBUG_ASSERT( th != 0 ); - -#if BufStats - thr = (thr_data_t *) th->th.th_local.bget_data; - KMP_DEBUG_ASSERT( thr != NULL ); - b = thr->last_pool; - - /* If a block-release function is defined, and this free buffer - constitutes the entire block, release it. Note that pool_len - is defined in such a way that the test will fail unless all - pool blocks are the same size. */ - - /* Deallocate the last pool if one exists because we no longer do it in brel() */ - if (thr->relfcn != 0 && b != 0 && thr->numpblk != 0 && - b->bh.bb.bsize == (bufsize)(thr->pool_len - sizeof(bhead_t))) - { - KMP_DEBUG_ASSERT(b->bh.bb.prevfree == 0); - KMP_DEBUG_ASSERT(BH((char *) b + b->bh.bb.bsize)->bb.bsize == ESent); - KMP_DEBUG_ASSERT(BH((char *) b + b->bh.bb.bsize)->bb.prevfree == b->bh.bb.bsize); - - /* Unlink the buffer from the free list */ - __kmp_bget_remove_from_freelist( b ); - - KE_TRACE( 10, ("%%%%%% FREE( %p )\n", (void *) b ) ); - - (*thr->relfcn)(b); - thr->numprel++; /* Nr of expansion block releases */ - thr->numpblk--; /* Total number of blocks */ - KMP_DEBUG_ASSERT(thr->numpblk == thr->numpget - thr->numprel); - } -#endif /* BufStats */ - - /* Deallocate bget_data */ - if ( th->th.th_local.bget_data != NULL ) { - __kmp_free( th->th.th_local.bget_data ); - th->th.th_local.bget_data = NULL; - }; // if -} - -void -kmpc_set_poolsize( size_t size ) -{ - bectl( __kmp_get_thread(), (bget_compact_t) 0, (bget_acquire_t) malloc, - (bget_release_t) free, (bufsize) size ); -} - -size_t -kmpc_get_poolsize( void ) -{ - thr_data_t *p; - - p = get_thr_data( __kmp_get_thread() ); - - return p->exp_incr; -} - -void -kmpc_set_poolmode( int mode ) -{ - thr_data_t *p; - - if (mode == bget_mode_fifo || mode == bget_mode_lifo || mode == bget_mode_best) { - p = get_thr_data( __kmp_get_thread() ); - p->mode = (bget_mode_t) mode; - } -} - -int -kmpc_get_poolmode( void ) -{ - thr_data_t *p; - - p = get_thr_data( __kmp_get_thread() ); - - return p->mode; -} - -void -kmpc_get_poolstat( size_t *maxmem, size_t *allmem ) -{ - kmp_info_t *th = __kmp_get_thread(); - bufsize a, b; - - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - - bcheck( th, &a, &b ); - - *maxmem = a; - *allmem = b; -} - -void -kmpc_poolprint( void ) -{ - kmp_info_t *th = __kmp_get_thread(); - - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - - bfreed( th ); -} - -#endif // #if KMP_USE_BGET - -/* ------------------------------------------------------------------------ */ - -void * -kmpc_malloc( size_t size ) -{ - void * ptr; - ptr = bget( __kmp_entry_thread(), (bufsize)(size + sizeof(ptr)) ); - if( ptr != NULL ) { - // save allocated pointer just before one returned to user - *(void**)ptr = ptr; - ptr = (void**)ptr + 1; - } - return ptr; -} - -#define IS_POWER_OF_TWO(n) (((n)&((n)-1))==0) - -void * -kmpc_aligned_malloc( size_t size, size_t alignment ) -{ - void * ptr; - void * ptr_allocated; - KMP_DEBUG_ASSERT( alignment < 32 * 1024 ); // Alignment should not be too big - if( !IS_POWER_OF_TWO(alignment) ) { - // AC: do we need to issue a warning here? - errno = EINVAL; - return NULL; - } - size = size + sizeof( void* ) + alignment; - ptr_allocated = bget( __kmp_entry_thread(), (bufsize)size ); - if( ptr_allocated != NULL ) { - // save allocated pointer just before one returned to user - ptr = (void*)(((kmp_uintptr_t)ptr_allocated + sizeof( void* ) + alignment) & ~(alignment - 1)); - *((void**)ptr - 1) = ptr_allocated; - } else { - ptr = NULL; - } - return ptr; -} - -void * -kmpc_calloc( size_t nelem, size_t elsize ) -{ - void * ptr; - ptr = bgetz( __kmp_entry_thread(), (bufsize) (nelem * elsize + sizeof(ptr)) ); - if( ptr != NULL ) { - // save allocated pointer just before one returned to user - *(void**)ptr = ptr; - ptr = (void**)ptr + 1; - } - return ptr; -} - -void * -kmpc_realloc( void * ptr, size_t size ) -{ - void * result = NULL; - if ( ptr == NULL ) { - // If pointer is NULL, realloc behaves like malloc. - result = bget( __kmp_entry_thread(), (bufsize)(size + sizeof(ptr)) ); - // save allocated pointer just before one returned to user - if( result != NULL ) { - *(void**)result = result; - result = (void**)result + 1; - } - } else if ( size == 0 ) { - // If size is 0, realloc behaves like free. - // The thread must be registered by the call to kmpc_malloc() or kmpc_calloc() before. - // So it should be safe to call __kmp_get_thread(), not __kmp_entry_thread(). - KMP_ASSERT(*((void**)ptr - 1)); - brel( __kmp_get_thread(), *((void**)ptr - 1) ); - } else { - result = bgetr( __kmp_entry_thread(), *((void**)ptr - 1), (bufsize)(size + sizeof(ptr)) ); - if( result != NULL ) { - *(void**)result = result; - result = (void**)result + 1; - } - }; // if - return result; -} - -/* NOTE: the library must have already been initialized by a previous allocate */ - -void -kmpc_free( void * ptr ) -{ - if ( ! __kmp_init_serial ) { - return; - }; // if - if ( ptr != NULL ) { - kmp_info_t *th = __kmp_get_thread(); - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - // extract allocated pointer and free it - KMP_ASSERT(*((void**)ptr - 1)); - brel( th, *((void**)ptr - 1) ); - }; -} - - -/* ------------------------------------------------------------------------ */ - -void * -___kmp_thread_malloc( kmp_info_t *th, size_t size KMP_SRC_LOC_DECL ) -{ - void * ptr; - KE_TRACE( 30, ( - "-> __kmp_thread_malloc( %p, %d ) called from %s:%d\n", - th, - (int) size - KMP_SRC_LOC_PARM - ) ); - ptr = bget( th, (bufsize) size ); - KE_TRACE( 30, ( "<- __kmp_thread_malloc() returns %p\n", ptr ) ); - return ptr; -} - -void * -___kmp_thread_calloc( kmp_info_t *th, size_t nelem, size_t elsize KMP_SRC_LOC_DECL ) -{ - void * ptr; - KE_TRACE( 30, ( - "-> __kmp_thread_calloc( %p, %d, %d ) called from %s:%d\n", - th, - (int) nelem, - (int) elsize - KMP_SRC_LOC_PARM - ) ); - ptr = bgetz( th, (bufsize) (nelem * elsize) ); - KE_TRACE( 30, ( "<- __kmp_thread_calloc() returns %p\n", ptr ) ); - return ptr; -} - -void * -___kmp_thread_realloc( kmp_info_t *th, void *ptr, size_t size KMP_SRC_LOC_DECL ) -{ - KE_TRACE( 30, ( - "-> __kmp_thread_realloc( %p, %p, %d ) called from %s:%d\n", - th, - ptr, - (int) size - KMP_SRC_LOC_PARM - ) ); - ptr = bgetr( th, ptr, (bufsize) size ); - KE_TRACE( 30, ( "<- __kmp_thread_realloc() returns %p\n", ptr ) ); - return ptr; -} - -void -___kmp_thread_free( kmp_info_t *th, void *ptr KMP_SRC_LOC_DECL ) -{ - KE_TRACE( 30, ( - "-> __kmp_thread_free( %p, %p ) called from %s:%d\n", - th, - ptr - KMP_SRC_LOC_PARM - ) ); - if ( ptr != NULL ) { - __kmp_bget_dequeue( th ); /* Release any queued buffers */ - brel( th, ptr ); - } - KE_TRACE( 30, ( "<- __kmp_thread_free()\n" ) ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ -/* - If LEAK_MEMORY is defined, __kmp_free() will *not* free memory. It causes memory leaks, but it - may be useful for debugging memory corruptions, used freed pointers, etc. -*/ -/* #define LEAK_MEMORY */ - -struct kmp_mem_descr { // Memory block descriptor. - void * ptr_allocated; // Pointer returned by malloc(), subject for free(). - size_t size_allocated; // Size of allocated memory block. - void * ptr_aligned; // Pointer to aligned memory, to be used by client code. - size_t size_aligned; // Size of aligned memory block. -}; -typedef struct kmp_mem_descr kmp_mem_descr_t; - -/* - Allocate memory on requested boundary, fill allocated memory with 0x00. - NULL is NEVER returned, __kmp_abort() is called in case of memory allocation error. - Must use __kmp_free when freeing memory allocated by this routine! - */ -static -void * -___kmp_allocate_align( size_t size, size_t alignment KMP_SRC_LOC_DECL ) -{ - /* - __kmp_allocate() allocates (by call to malloc()) bigger memory block than requested to - return properly aligned pointer. Original pointer returned by malloc() and size of allocated - block is saved in descriptor just before the aligned pointer. This information used by - __kmp_free() -- it has to pass to free() original pointer, not aligned one. - - +---------+------------+-----------------------------------+---------+ - | padding | descriptor | aligned block | padding | - +---------+------------+-----------------------------------+---------+ - ^ ^ - | | - | +- Aligned pointer returned to caller - +- Pointer returned by malloc() - - Aligned block is filled with zeros, paddings are filled with 0xEF. - */ - - kmp_mem_descr_t descr; - kmp_uintptr_t addr_allocated; // Address returned by malloc(). - kmp_uintptr_t addr_aligned; // Aligned address to return to caller. - kmp_uintptr_t addr_descr; // Address of memory block descriptor. - - KE_TRACE( 25, ( - "-> ___kmp_allocate_align( %d, %d ) called from %s:%d\n", - (int) size, - (int) alignment - KMP_SRC_LOC_PARM - ) ); - - KMP_DEBUG_ASSERT( alignment < 32 * 1024 ); // Alignment should not be too - KMP_DEBUG_ASSERT( sizeof( void * ) <= sizeof( kmp_uintptr_t ) ); - // Make sure kmp_uintptr_t is enough to store addresses. - - descr.size_aligned = size; - descr.size_allocated = descr.size_aligned + sizeof( kmp_mem_descr_t ) + alignment; - -#if KMP_DEBUG - descr.ptr_allocated = _malloc_src_loc( descr.size_allocated, _file_, _line_ ); -#else - descr.ptr_allocated = malloc_src_loc( descr.size_allocated KMP_SRC_LOC_PARM ); -#endif - KE_TRACE( 10, ( - " malloc( %d ) returned %p\n", - (int) descr.size_allocated, - descr.ptr_allocated - ) ); - if ( descr.ptr_allocated == NULL ) { - KMP_FATAL( OutOfHeapMemory ); - }; - - addr_allocated = (kmp_uintptr_t) descr.ptr_allocated; - addr_aligned = - ( addr_allocated + sizeof( kmp_mem_descr_t ) + alignment ) - & ~ ( alignment - 1 ); - addr_descr = addr_aligned - sizeof( kmp_mem_descr_t ); - - descr.ptr_aligned = (void *) addr_aligned; - - KE_TRACE( 26, ( - " ___kmp_allocate_align: " - "ptr_allocated=%p, size_allocated=%d, " - "ptr_aligned=%p, size_aligned=%d\n", - descr.ptr_allocated, - (int) descr.size_allocated, - descr.ptr_aligned, - (int) descr.size_aligned - ) ); - - KMP_DEBUG_ASSERT( addr_allocated <= addr_descr ); - KMP_DEBUG_ASSERT( addr_descr + sizeof( kmp_mem_descr_t ) == addr_aligned ); - KMP_DEBUG_ASSERT( addr_aligned + descr.size_aligned <= addr_allocated + descr.size_allocated ); - KMP_DEBUG_ASSERT( addr_aligned % alignment == 0 ); -#ifdef KMP_DEBUG - memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); - // Fill allocated memory block with 0xEF. -#endif - memset( descr.ptr_aligned, 0x00, descr.size_aligned ); - // Fill the aligned memory block (which is intended for using by caller) with 0x00. Do not - // put this filling under KMP_DEBUG condition! Many callers expect zeroed memory. (Padding - // bytes remain filled with 0xEF in debugging library.) - * ( (kmp_mem_descr_t *) addr_descr ) = descr; - - KMP_MB(); - - KE_TRACE( 25, ( "<- ___kmp_allocate_align() returns %p\n", descr.ptr_aligned ) ); - return descr.ptr_aligned; -} // func ___kmp_allocate_align - - -/* - Allocate memory on cache line boundary, fill allocated memory with 0x00. - Do not call this func directly! Use __kmp_allocate macro instead. - NULL is NEVER returned, __kmp_abort() is called in case of memory allocation error. - Must use __kmp_free when freeing memory allocated by this routine! - */ -void * -___kmp_allocate( size_t size KMP_SRC_LOC_DECL ) -{ - void * ptr; - KE_TRACE( 25, ( "-> __kmp_allocate( %d ) called from %s:%d\n", (int) size KMP_SRC_LOC_PARM ) ); - ptr = ___kmp_allocate_align( size, __kmp_align_alloc KMP_SRC_LOC_PARM ); - KE_TRACE( 25, ( "<- __kmp_allocate() returns %p\n", ptr ) ); - return ptr; -} // func ___kmp_allocate - -#if (BUILD_MEMORY==FIRST_TOUCH) -void * -__kmp_ft_page_allocate(size_t size) -{ - void *adr, *aadr; - - const int page_size = KMP_GET_PAGE_SIZE(); - - adr = (void *) __kmp_thread_malloc( __kmp_get_thread(), - size + page_size + KMP_PTR_SKIP); - if ( adr == 0 ) - KMP_FATAL( OutOfHeapMemory ); - - /* check to see if adr is on a page boundary. */ - if ( ( (kmp_uintptr_t) adr & (page_size - 1)) == 0) - /* nothing to do if adr is already on a page boundary. */ - aadr = adr; - else - /* else set aadr to the first page boundary in the allocated memory. */ - aadr = (void *) ( ( (kmp_uintptr_t) adr + page_size) & ~(page_size - 1) ); - - /* the first touch by the owner thread. */ - *((void**)aadr) = adr; - - /* skip the memory space used for storing adr above. */ - return (void*)((char*)aadr + KMP_PTR_SKIP); -} -#endif - -/* - Allocate memory on page boundary, fill allocated memory with 0x00. - Does not call this func directly! Use __kmp_page_allocate macro instead. - NULL is NEVER returned, __kmp_abort() is called in case of memory allocation error. - Must use __kmp_free when freeing memory allocated by this routine! - */ -void * -___kmp_page_allocate( size_t size KMP_SRC_LOC_DECL ) -{ - int page_size = 8 * 1024; - void * ptr; - - KE_TRACE( 25, ( - "-> __kmp_page_allocate( %d ) called from %s:%d\n", - (int) size - KMP_SRC_LOC_PARM - ) ); - ptr = ___kmp_allocate_align( size, page_size KMP_SRC_LOC_PARM ); - KE_TRACE( 25, ( "<- __kmp_page_allocate( %d ) returns %p\n", (int) size, ptr ) ); - return ptr; -} // ___kmp_page_allocate - -/* - Free memory allocated by __kmp_allocate() and __kmp_page_allocate(). - In debug mode, fill the memory block with 0xEF before call to free(). -*/ -void -___kmp_free( void * ptr KMP_SRC_LOC_DECL ) -{ - kmp_mem_descr_t descr; - kmp_uintptr_t addr_allocated; // Address returned by malloc(). - kmp_uintptr_t addr_aligned; // Aligned address passed by caller. - - KE_TRACE( 25, ( "-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM ) ); - KMP_ASSERT( ptr != NULL ); - - descr = * ( kmp_mem_descr_t *) ( (kmp_uintptr_t) ptr - sizeof( kmp_mem_descr_t ) ); - - KE_TRACE( 26, ( " __kmp_free: " - "ptr_allocated=%p, size_allocated=%d, " - "ptr_aligned=%p, size_aligned=%d\n", - descr.ptr_allocated, (int) descr.size_allocated, - descr.ptr_aligned, (int) descr.size_aligned )); - - addr_allocated = (kmp_uintptr_t) descr.ptr_allocated; - addr_aligned = (kmp_uintptr_t) descr.ptr_aligned; - - KMP_DEBUG_ASSERT( addr_aligned % CACHE_LINE == 0 ); - KMP_DEBUG_ASSERT( descr.ptr_aligned == ptr ); - KMP_DEBUG_ASSERT( addr_allocated + sizeof( kmp_mem_descr_t ) <= addr_aligned ); - KMP_DEBUG_ASSERT( descr.size_aligned < descr.size_allocated ); - KMP_DEBUG_ASSERT( addr_aligned + descr.size_aligned <= addr_allocated + descr.size_allocated ); - - #ifdef KMP_DEBUG - memset( descr.ptr_allocated, 0xEF, descr.size_allocated ); - // Fill memory block with 0xEF, it helps catch using freed memory. - #endif - - #ifndef LEAK_MEMORY - KE_TRACE( 10, ( " free( %p )\n", descr.ptr_allocated ) ); - # ifdef KMP_DEBUG - _free_src_loc( descr.ptr_allocated, _file_, _line_ ); - # else - free_src_loc( descr.ptr_allocated KMP_SRC_LOC_PARM ); - # endif - #endif - KMP_MB(); - KE_TRACE( 25, ( "<- __kmp_free() returns\n" ) ); -} // func ___kmp_free - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if USE_FAST_MEMORY == 3 -// Allocate fast memory by first scanning the thread's free lists -// If a chunk the right size exists, grab it off the free list. -// Otherwise allocate normally using kmp_thread_malloc. - -// AC: How to choose the limit? Just get 16 for now... -#define KMP_FREE_LIST_LIMIT 16 - -// Always use 128 bytes for determining buckets for caching memory blocks -#define DCACHE_LINE 128 - -void * -___kmp_fast_allocate( kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL ) -{ - void * ptr; - int num_lines; - int idx; - int index; - void * alloc_ptr; - size_t alloc_size; - kmp_mem_descr_t * descr; - - KE_TRACE( 25, ( "-> __kmp_fast_allocate( T#%d, %d ) called from %s:%d\n", - __kmp_gtid_from_thread(this_thr), (int) size KMP_SRC_LOC_PARM ) ); - - num_lines = ( size + DCACHE_LINE - 1 ) / DCACHE_LINE; - idx = num_lines - 1; - KMP_DEBUG_ASSERT( idx >= 0 ); - if ( idx < 2 ) { - index = 0; // idx is [ 0, 1 ], use first free list - num_lines = 2; // 1, 2 cache lines or less than cache line - } else if ( ( idx >>= 2 ) == 0 ) { - index = 1; // idx is [ 2, 3 ], use second free list - num_lines = 4; // 3, 4 cache lines - } else if ( ( idx >>= 2 ) == 0 ) { - index = 2; // idx is [ 4, 15 ], use third free list - num_lines = 16; // 5, 6, ..., 16 cache lines - } else if ( ( idx >>= 2 ) == 0 ) { - index = 3; // idx is [ 16, 63 ], use fourth free list - num_lines = 64; // 17, 18, ..., 64 cache lines - } else { - goto alloc_call; // 65 or more cache lines ( > 8KB ), don't use free lists - } - - ptr = this_thr->th.th_free_lists[index].th_free_list_self; - if ( ptr != NULL ) { - // pop the head of no-sync free list - this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr); - KMP_DEBUG_ASSERT( this_thr == - ((kmp_mem_descr_t *)( (kmp_uintptr_t)ptr - sizeof(kmp_mem_descr_t) ))->ptr_aligned ); - goto end; - }; - ptr = TCR_SYNC_PTR( this_thr->th.th_free_lists[index].th_free_list_sync ); - if ( ptr != NULL ) { - // no-sync free list is empty, use sync free list (filled in by other threads only) - // pop the head of the sync free list, push NULL instead - while ( ! KMP_COMPARE_AND_STORE_PTR( - &this_thr->th.th_free_lists[index].th_free_list_sync, ptr, NULL ) ) - { - KMP_CPU_PAUSE(); - ptr = TCR_SYNC_PTR( this_thr->th.th_free_lists[index].th_free_list_sync ); - } - // push the rest of chain into no-sync free list (can be NULL if there was the only block) - this_thr->th.th_free_lists[index].th_free_list_self = *((void **)ptr); - KMP_DEBUG_ASSERT( this_thr == - ((kmp_mem_descr_t *)( (kmp_uintptr_t)ptr - sizeof(kmp_mem_descr_t) ))->ptr_aligned ); - goto end; - } - - alloc_call: - // haven't found block in the free lists, thus allocate it - size = num_lines * DCACHE_LINE; - - alloc_size = size + sizeof( kmp_mem_descr_t ) + DCACHE_LINE; - KE_TRACE( 25, ( "__kmp_fast_allocate: T#%d Calling __kmp_thread_malloc with alloc_size %d\n", - __kmp_gtid_from_thread( this_thr ), alloc_size ) ); - alloc_ptr = bget( this_thr, (bufsize) alloc_size ); - - // align ptr to DCACHE_LINE - ptr = (void *)(( ((kmp_uintptr_t)alloc_ptr) + sizeof(kmp_mem_descr_t) + DCACHE_LINE ) & ~( DCACHE_LINE - 1 )); - descr = (kmp_mem_descr_t *)( ((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t) ); - - descr->ptr_allocated = alloc_ptr; // remember allocated pointer - // we don't need size_allocated - descr->ptr_aligned = (void *)this_thr; // remember allocating thread - // (it is already saved in bget buffer, - // but we may want to use another allocator in future) - descr->size_aligned = size; - - end: - KE_TRACE( 25, ( "<- __kmp_fast_allocate( T#%d ) returns %p\n", - __kmp_gtid_from_thread( this_thr ), ptr ) ); - return ptr; -} // func __kmp_fast_allocate - -// Free fast memory and place it on the thread's free list if it is of -// the correct size. -void -___kmp_fast_free( kmp_info_t *this_thr, void * ptr KMP_SRC_LOC_DECL ) -{ - kmp_mem_descr_t * descr; - kmp_info_t * alloc_thr; - size_t size; - size_t idx; - int index; - - KE_TRACE( 25, ( "-> __kmp_fast_free( T#%d, %p ) called from %s:%d\n", - __kmp_gtid_from_thread(this_thr), ptr KMP_SRC_LOC_PARM ) ); - KMP_ASSERT( ptr != NULL ); - - descr = (kmp_mem_descr_t *)( ((kmp_uintptr_t)ptr) - sizeof(kmp_mem_descr_t) ); - - KE_TRACE(26, (" __kmp_fast_free: size_aligned=%d\n", - (int) descr->size_aligned ) ); - - size = descr->size_aligned; // 2, 4, 16, 64, 65, 66, ... cache lines - - idx = DCACHE_LINE * 2; // 2 cache lines is minimal size of block - if ( idx == size ) { - index = 0; // 2 cache lines - } else if ( ( idx <<= 1 ) == size ) { - index = 1; // 4 cache lines - } else if ( ( idx <<= 2 ) == size ) { - index = 2; // 16 cache lines - } else if ( ( idx <<= 2 ) == size ) { - index = 3; // 64 cache lines - } else { - KMP_DEBUG_ASSERT( size > DCACHE_LINE * 64 ); - goto free_call; // 65 or more cache lines ( > 8KB ) - } - - alloc_thr = (kmp_info_t *)descr->ptr_aligned; // get thread owning the block - if ( alloc_thr == this_thr ) { - // push block to self no-sync free list, linking previous head (LIFO) - *((void **)ptr) = this_thr->th.th_free_lists[index].th_free_list_self; - this_thr->th.th_free_lists[index].th_free_list_self = ptr; - } else { - void * head = this_thr->th.th_free_lists[index].th_free_list_other; - if ( head == NULL ) { - // Create new free list - this_thr->th.th_free_lists[index].th_free_list_other = ptr; - *((void **)ptr) = NULL; // mark the tail of the list - descr->size_allocated = (size_t)1; // head of the list keeps its length - } else { - // need to check existed "other" list's owner thread and size of queue - kmp_mem_descr_t * dsc = (kmp_mem_descr_t *)( (char*)head - sizeof(kmp_mem_descr_t) ); - kmp_info_t * q_th = (kmp_info_t *)(dsc->ptr_aligned); // allocating thread, same for all queue nodes - size_t q_sz = dsc->size_allocated + 1; // new size in case we add current task - if ( q_th == alloc_thr && q_sz <= KMP_FREE_LIST_LIMIT ) { - // we can add current task to "other" list, no sync needed - *((void **)ptr) = head; - descr->size_allocated = q_sz; - this_thr->th.th_free_lists[index].th_free_list_other = ptr; - } else { - // either queue blocks owner is changing or size limit exceeded - // return old queue to allocating thread (q_th) synchroneously, - // and start new list for alloc_thr's tasks - void * old_ptr; - void * tail = head; - void * next = *((void **)head); - while ( next != NULL ) { - KMP_DEBUG_ASSERT( - // queue size should decrease by 1 each step through the list - ((kmp_mem_descr_t*)((char*)next - sizeof(kmp_mem_descr_t)))->size_allocated + 1 == - ((kmp_mem_descr_t*)((char*)tail - sizeof(kmp_mem_descr_t)))->size_allocated ); - tail = next; // remember tail node - next = *((void **)next); - } - KMP_DEBUG_ASSERT( q_th != NULL ); - // push block to owner's sync free list - old_ptr = TCR_PTR( q_th->th.th_free_lists[index].th_free_list_sync ); - /* the next pointer must be set before setting free_list to ptr to avoid - exposing a broken list to other threads, even for an instant. */ - *((void **)tail) = old_ptr; - - while ( ! KMP_COMPARE_AND_STORE_PTR( - &q_th->th.th_free_lists[index].th_free_list_sync, - old_ptr, - head ) ) - { - KMP_CPU_PAUSE(); - old_ptr = TCR_PTR( q_th->th.th_free_lists[index].th_free_list_sync ); - *((void **)tail) = old_ptr; - } - - // start new list of not-selt tasks - this_thr->th.th_free_lists[index].th_free_list_other = ptr; - *((void **)ptr) = NULL; - descr->size_allocated = (size_t)1; // head of queue keeps its length - } - } - } - goto end; - - free_call: - KE_TRACE(25, ( "__kmp_fast_free: T#%d Calling __kmp_thread_free for size %d\n", - __kmp_gtid_from_thread( this_thr), size ) ); - __kmp_bget_dequeue( this_thr ); /* Release any queued buffers */ - brel( this_thr, descr->ptr_allocated ); - - end: - KE_TRACE( 25, ( "<- __kmp_fast_free() returns\n" ) ); - -} // func __kmp_fast_free - - -// Initialize the thread free lists related to fast memory -// Only do this when a thread is initially created. -void -__kmp_initialize_fast_memory( kmp_info_t *this_thr ) -{ - KE_TRACE(10, ( "__kmp_initialize_fast_memory: Called from th %p\n", this_thr ) ); - - memset ( this_thr->th.th_free_lists, 0, NUM_LISTS * sizeof( kmp_free_list_t ) ); -} - -// Free the memory in the thread free lists related to fast memory -// Only do this when a thread is being reaped (destroyed). -void -__kmp_free_fast_memory( kmp_info_t *th ) -{ - // Suppose we use BGET underlying allocator, walk through its structures... - int bin; - thr_data_t * thr = get_thr_data( th ); - void ** lst = NULL; - - KE_TRACE(5, ( "__kmp_free_fast_memory: Called T#%d\n", - __kmp_gtid_from_thread( th ) ) ); - - __kmp_bget_dequeue( th ); // Release any queued buffers - - // Dig through free lists and extract all allocated blocks - for ( bin = 0; bin < MAX_BGET_BINS; ++bin ) { - bfhead_t * b = thr->freelist[ bin ].ql.flink; - while ( b != &thr->freelist[ bin ] ) { - if ( (kmp_uintptr_t)b->bh.bb.bthr & 1 ) { // if the buffer is an allocated address? - *((void**)b) = lst; // link the list (override bthr, but keep flink yet) - lst = (void**)b; // push b into lst - } - b = b->ql.flink; // get next buffer - } - } - while ( lst != NULL ) { - void * next = *lst; - KE_TRACE(10, ( "__kmp_free_fast_memory: freeing %p, next=%p th %p (%d)\n", - lst, next, th, __kmp_gtid_from_thread( th ) ) ); - (*thr->relfcn)(lst); - #if BufStats - // count blocks to prevent problems in __kmp_finalize_bget() - thr->numprel++; /* Nr of expansion block releases */ - thr->numpblk--; /* Total number of blocks */ - #endif - lst = (void**)next; - } - - KE_TRACE(5, ( "__kmp_free_fast_memory: Freed T#%d\n", - __kmp_gtid_from_thread( th ) ) ); -} - -#endif // USE_FAST_MEMORY Index: runtime/src/kmp_alloc.cpp =================================================================== --- runtime/src/kmp_alloc.cpp +++ runtime/src/kmp_alloc.cpp @@ -1,5 +1,5 @@ /* - * kmp_alloc.c -- private/shared dynamic memory allocation and management + * kmp_alloc.cpp -- private/shared dynamic memory allocation and management */ Index: runtime/src/kmp_atomic.c =================================================================== --- runtime/src/kmp_atomic.c +++ runtime/src/kmp_atomic.c @@ -1,3120 +0,0 @@ -/* - * kmp_atomic.c -- ATOMIC implementation routines - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp_atomic.h" -#include "kmp.h" // TRUE, asm routines prototypes - -typedef unsigned char uchar; -typedef unsigned short ushort; - -/*! -@defgroup ATOMIC_OPS Atomic Operations -These functions are used for implementing the many different varieties of atomic operations. - -The compiler is at liberty to inline atomic operations that are naturally supported -by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined -@code -static int s = 0; -#pragma omp atomic - s++; -@endcode -using the single instruction: `lock; incl s` - -However the runtime does provide entrypoints for these operations to support compilers that choose -not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the -increment above.) - -The names of the functions are encoded by using the data type name and the operation name, as in these tables. - -Data Type | Data type encoding ------------|--------------- -int8_t | `fixed1` -uint8_t | `fixed1u` -int16_t | `fixed2` -uint16_t | `fixed2u` -int32_t | `fixed4` -uint32_t | `fixed4u` -int32_t | `fixed8` -uint32_t | `fixed8u` -float | `float4` -double | `float8` -float 10 (8087 eighty bit float) | `float10` -complex | `cmplx4` -complex | `cmplx8` -complex | `cmplx10` -
- -Operation | Operation encoding -----------|------------------- -+ | add -- | sub -\* | mul -/ | div -& | andb -<< | shl -\>\> | shr -\| | orb -^ | xor -&& | andl -\|\| | orl -maximum | max -minimum | min -.eqv. | eqv -.neqv. | neqv - -
-For non-commutative operations, `_rev` can also be added for the reversed operation. -For the functions that capture the result, the suffix `_cpt` is added. - -Update Functions -================ -The general form of an atomic function that just performs an update (without a `capture`) -@code -void __kmpc_atomic__( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); -@endcode -@param ident_t a pointer to source location -@param gtid the global thread id -@param lhs a pointer to the left operand -@param rhs the right operand - -`capture` functions -=================== -The capture functions perform an atomic update and return a result, which is either the value -before the capture, or that after. They take an additional argument to determine which result is returned. -Their general form is therefore -@code -TYPE __kmpc_atomic___cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); -@endcode -@param ident_t a pointer to source location -@param gtid the global thread id -@param lhs a pointer to the left operand -@param rhs the right operand -@param flag one if the result is to be captured *after* the operation, zero if captured *before*. - -The one set of exceptions to this is the `complex` type where the value is not returned, -rather an extra argument pointer is passed. - -They look like -@code -void __kmpc_atomic_cmplx4__cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); -@endcode - -Read and Write Operations -========================= -The OpenMP* standard now supports atomic operations that simply ensure that the -value is read or written atomically, with no modification -performed. In many cases on IA-32 architecture these operations can be inlined since -the architecture guarantees that no tearing occurs on aligned objects -accessed with a single memory operation of up to 64 bits in size. - -The general form of the read operations is -@code -TYPE __kmpc_atomic__rd ( ident_t *id_ref, int gtid, TYPE * loc ); -@endcode - -For the write operations the form is -@code -void __kmpc_atomic__wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); -@endcode - -Full list of functions -====================== -This leads to the generation of 376 atomic functions, as follows. - -Functons for integers ---------------------- -There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). -@code - __kmpc_atomic_fixed1_add - __kmpc_atomic_fixed1_add_cpt - __kmpc_atomic_fixed1_add_fp - __kmpc_atomic_fixed1_andb - __kmpc_atomic_fixed1_andb_cpt - __kmpc_atomic_fixed1_andl - __kmpc_atomic_fixed1_andl_cpt - __kmpc_atomic_fixed1_div - __kmpc_atomic_fixed1_div_cpt - __kmpc_atomic_fixed1_div_cpt_rev - __kmpc_atomic_fixed1_div_float8 - __kmpc_atomic_fixed1_div_fp - __kmpc_atomic_fixed1_div_rev - __kmpc_atomic_fixed1_eqv - __kmpc_atomic_fixed1_eqv_cpt - __kmpc_atomic_fixed1_max - __kmpc_atomic_fixed1_max_cpt - __kmpc_atomic_fixed1_min - __kmpc_atomic_fixed1_min_cpt - __kmpc_atomic_fixed1_mul - __kmpc_atomic_fixed1_mul_cpt - __kmpc_atomic_fixed1_mul_float8 - __kmpc_atomic_fixed1_mul_fp - __kmpc_atomic_fixed1_neqv - __kmpc_atomic_fixed1_neqv_cpt - __kmpc_atomic_fixed1_orb - __kmpc_atomic_fixed1_orb_cpt - __kmpc_atomic_fixed1_orl - __kmpc_atomic_fixed1_orl_cpt - __kmpc_atomic_fixed1_rd - __kmpc_atomic_fixed1_shl - __kmpc_atomic_fixed1_shl_cpt - __kmpc_atomic_fixed1_shl_cpt_rev - __kmpc_atomic_fixed1_shl_rev - __kmpc_atomic_fixed1_shr - __kmpc_atomic_fixed1_shr_cpt - __kmpc_atomic_fixed1_shr_cpt_rev - __kmpc_atomic_fixed1_shr_rev - __kmpc_atomic_fixed1_sub - __kmpc_atomic_fixed1_sub_cpt - __kmpc_atomic_fixed1_sub_cpt_rev - __kmpc_atomic_fixed1_sub_fp - __kmpc_atomic_fixed1_sub_rev - __kmpc_atomic_fixed1_swp - __kmpc_atomic_fixed1_wr - __kmpc_atomic_fixed1_xor - __kmpc_atomic_fixed1_xor_cpt - __kmpc_atomic_fixed1u_add_fp - __kmpc_atomic_fixed1u_sub_fp - __kmpc_atomic_fixed1u_mul_fp - __kmpc_atomic_fixed1u_div - __kmpc_atomic_fixed1u_div_cpt - __kmpc_atomic_fixed1u_div_cpt_rev - __kmpc_atomic_fixed1u_div_fp - __kmpc_atomic_fixed1u_div_rev - __kmpc_atomic_fixed1u_shr - __kmpc_atomic_fixed1u_shr_cpt - __kmpc_atomic_fixed1u_shr_cpt_rev - __kmpc_atomic_fixed1u_shr_rev - __kmpc_atomic_fixed2_add - __kmpc_atomic_fixed2_add_cpt - __kmpc_atomic_fixed2_add_fp - __kmpc_atomic_fixed2_andb - __kmpc_atomic_fixed2_andb_cpt - __kmpc_atomic_fixed2_andl - __kmpc_atomic_fixed2_andl_cpt - __kmpc_atomic_fixed2_div - __kmpc_atomic_fixed2_div_cpt - __kmpc_atomic_fixed2_div_cpt_rev - __kmpc_atomic_fixed2_div_float8 - __kmpc_atomic_fixed2_div_fp - __kmpc_atomic_fixed2_div_rev - __kmpc_atomic_fixed2_eqv - __kmpc_atomic_fixed2_eqv_cpt - __kmpc_atomic_fixed2_max - __kmpc_atomic_fixed2_max_cpt - __kmpc_atomic_fixed2_min - __kmpc_atomic_fixed2_min_cpt - __kmpc_atomic_fixed2_mul - __kmpc_atomic_fixed2_mul_cpt - __kmpc_atomic_fixed2_mul_float8 - __kmpc_atomic_fixed2_mul_fp - __kmpc_atomic_fixed2_neqv - __kmpc_atomic_fixed2_neqv_cpt - __kmpc_atomic_fixed2_orb - __kmpc_atomic_fixed2_orb_cpt - __kmpc_atomic_fixed2_orl - __kmpc_atomic_fixed2_orl_cpt - __kmpc_atomic_fixed2_rd - __kmpc_atomic_fixed2_shl - __kmpc_atomic_fixed2_shl_cpt - __kmpc_atomic_fixed2_shl_cpt_rev - __kmpc_atomic_fixed2_shl_rev - __kmpc_atomic_fixed2_shr - __kmpc_atomic_fixed2_shr_cpt - __kmpc_atomic_fixed2_shr_cpt_rev - __kmpc_atomic_fixed2_shr_rev - __kmpc_atomic_fixed2_sub - __kmpc_atomic_fixed2_sub_cpt - __kmpc_atomic_fixed2_sub_cpt_rev - __kmpc_atomic_fixed2_sub_fp - __kmpc_atomic_fixed2_sub_rev - __kmpc_atomic_fixed2_swp - __kmpc_atomic_fixed2_wr - __kmpc_atomic_fixed2_xor - __kmpc_atomic_fixed2_xor_cpt - __kmpc_atomic_fixed2u_add_fp - __kmpc_atomic_fixed2u_sub_fp - __kmpc_atomic_fixed2u_mul_fp - __kmpc_atomic_fixed2u_div - __kmpc_atomic_fixed2u_div_cpt - __kmpc_atomic_fixed2u_div_cpt_rev - __kmpc_atomic_fixed2u_div_fp - __kmpc_atomic_fixed2u_div_rev - __kmpc_atomic_fixed2u_shr - __kmpc_atomic_fixed2u_shr_cpt - __kmpc_atomic_fixed2u_shr_cpt_rev - __kmpc_atomic_fixed2u_shr_rev - __kmpc_atomic_fixed4_add - __kmpc_atomic_fixed4_add_cpt - __kmpc_atomic_fixed4_add_fp - __kmpc_atomic_fixed4_andb - __kmpc_atomic_fixed4_andb_cpt - __kmpc_atomic_fixed4_andl - __kmpc_atomic_fixed4_andl_cpt - __kmpc_atomic_fixed4_div - __kmpc_atomic_fixed4_div_cpt - __kmpc_atomic_fixed4_div_cpt_rev - __kmpc_atomic_fixed4_div_float8 - __kmpc_atomic_fixed4_div_fp - __kmpc_atomic_fixed4_div_rev - __kmpc_atomic_fixed4_eqv - __kmpc_atomic_fixed4_eqv_cpt - __kmpc_atomic_fixed4_max - __kmpc_atomic_fixed4_max_cpt - __kmpc_atomic_fixed4_min - __kmpc_atomic_fixed4_min_cpt - __kmpc_atomic_fixed4_mul - __kmpc_atomic_fixed4_mul_cpt - __kmpc_atomic_fixed4_mul_float8 - __kmpc_atomic_fixed4_mul_fp - __kmpc_atomic_fixed4_neqv - __kmpc_atomic_fixed4_neqv_cpt - __kmpc_atomic_fixed4_orb - __kmpc_atomic_fixed4_orb_cpt - __kmpc_atomic_fixed4_orl - __kmpc_atomic_fixed4_orl_cpt - __kmpc_atomic_fixed4_rd - __kmpc_atomic_fixed4_shl - __kmpc_atomic_fixed4_shl_cpt - __kmpc_atomic_fixed4_shl_cpt_rev - __kmpc_atomic_fixed4_shl_rev - __kmpc_atomic_fixed4_shr - __kmpc_atomic_fixed4_shr_cpt - __kmpc_atomic_fixed4_shr_cpt_rev - __kmpc_atomic_fixed4_shr_rev - __kmpc_atomic_fixed4_sub - __kmpc_atomic_fixed4_sub_cpt - __kmpc_atomic_fixed4_sub_cpt_rev - __kmpc_atomic_fixed4_sub_fp - __kmpc_atomic_fixed4_sub_rev - __kmpc_atomic_fixed4_swp - __kmpc_atomic_fixed4_wr - __kmpc_atomic_fixed4_xor - __kmpc_atomic_fixed4_xor_cpt - __kmpc_atomic_fixed4u_add_fp - __kmpc_atomic_fixed4u_sub_fp - __kmpc_atomic_fixed4u_mul_fp - __kmpc_atomic_fixed4u_div - __kmpc_atomic_fixed4u_div_cpt - __kmpc_atomic_fixed4u_div_cpt_rev - __kmpc_atomic_fixed4u_div_fp - __kmpc_atomic_fixed4u_div_rev - __kmpc_atomic_fixed4u_shr - __kmpc_atomic_fixed4u_shr_cpt - __kmpc_atomic_fixed4u_shr_cpt_rev - __kmpc_atomic_fixed4u_shr_rev - __kmpc_atomic_fixed8_add - __kmpc_atomic_fixed8_add_cpt - __kmpc_atomic_fixed8_add_fp - __kmpc_atomic_fixed8_andb - __kmpc_atomic_fixed8_andb_cpt - __kmpc_atomic_fixed8_andl - __kmpc_atomic_fixed8_andl_cpt - __kmpc_atomic_fixed8_div - __kmpc_atomic_fixed8_div_cpt - __kmpc_atomic_fixed8_div_cpt_rev - __kmpc_atomic_fixed8_div_float8 - __kmpc_atomic_fixed8_div_fp - __kmpc_atomic_fixed8_div_rev - __kmpc_atomic_fixed8_eqv - __kmpc_atomic_fixed8_eqv_cpt - __kmpc_atomic_fixed8_max - __kmpc_atomic_fixed8_max_cpt - __kmpc_atomic_fixed8_min - __kmpc_atomic_fixed8_min_cpt - __kmpc_atomic_fixed8_mul - __kmpc_atomic_fixed8_mul_cpt - __kmpc_atomic_fixed8_mul_float8 - __kmpc_atomic_fixed8_mul_fp - __kmpc_atomic_fixed8_neqv - __kmpc_atomic_fixed8_neqv_cpt - __kmpc_atomic_fixed8_orb - __kmpc_atomic_fixed8_orb_cpt - __kmpc_atomic_fixed8_orl - __kmpc_atomic_fixed8_orl_cpt - __kmpc_atomic_fixed8_rd - __kmpc_atomic_fixed8_shl - __kmpc_atomic_fixed8_shl_cpt - __kmpc_atomic_fixed8_shl_cpt_rev - __kmpc_atomic_fixed8_shl_rev - __kmpc_atomic_fixed8_shr - __kmpc_atomic_fixed8_shr_cpt - __kmpc_atomic_fixed8_shr_cpt_rev - __kmpc_atomic_fixed8_shr_rev - __kmpc_atomic_fixed8_sub - __kmpc_atomic_fixed8_sub_cpt - __kmpc_atomic_fixed8_sub_cpt_rev - __kmpc_atomic_fixed8_sub_fp - __kmpc_atomic_fixed8_sub_rev - __kmpc_atomic_fixed8_swp - __kmpc_atomic_fixed8_wr - __kmpc_atomic_fixed8_xor - __kmpc_atomic_fixed8_xor_cpt - __kmpc_atomic_fixed8u_add_fp - __kmpc_atomic_fixed8u_sub_fp - __kmpc_atomic_fixed8u_mul_fp - __kmpc_atomic_fixed8u_div - __kmpc_atomic_fixed8u_div_cpt - __kmpc_atomic_fixed8u_div_cpt_rev - __kmpc_atomic_fixed8u_div_fp - __kmpc_atomic_fixed8u_div_rev - __kmpc_atomic_fixed8u_shr - __kmpc_atomic_fixed8u_shr_cpt - __kmpc_atomic_fixed8u_shr_cpt_rev - __kmpc_atomic_fixed8u_shr_rev -@endcode - -Functions for floating point ----------------------------- -There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. -(Ten byte floats are used by X87, but are now rare). -@code - __kmpc_atomic_float4_add - __kmpc_atomic_float4_add_cpt - __kmpc_atomic_float4_add_float8 - __kmpc_atomic_float4_add_fp - __kmpc_atomic_float4_div - __kmpc_atomic_float4_div_cpt - __kmpc_atomic_float4_div_cpt_rev - __kmpc_atomic_float4_div_float8 - __kmpc_atomic_float4_div_fp - __kmpc_atomic_float4_div_rev - __kmpc_atomic_float4_max - __kmpc_atomic_float4_max_cpt - __kmpc_atomic_float4_min - __kmpc_atomic_float4_min_cpt - __kmpc_atomic_float4_mul - __kmpc_atomic_float4_mul_cpt - __kmpc_atomic_float4_mul_float8 - __kmpc_atomic_float4_mul_fp - __kmpc_atomic_float4_rd - __kmpc_atomic_float4_sub - __kmpc_atomic_float4_sub_cpt - __kmpc_atomic_float4_sub_cpt_rev - __kmpc_atomic_float4_sub_float8 - __kmpc_atomic_float4_sub_fp - __kmpc_atomic_float4_sub_rev - __kmpc_atomic_float4_swp - __kmpc_atomic_float4_wr - __kmpc_atomic_float8_add - __kmpc_atomic_float8_add_cpt - __kmpc_atomic_float8_add_fp - __kmpc_atomic_float8_div - __kmpc_atomic_float8_div_cpt - __kmpc_atomic_float8_div_cpt_rev - __kmpc_atomic_float8_div_fp - __kmpc_atomic_float8_div_rev - __kmpc_atomic_float8_max - __kmpc_atomic_float8_max_cpt - __kmpc_atomic_float8_min - __kmpc_atomic_float8_min_cpt - __kmpc_atomic_float8_mul - __kmpc_atomic_float8_mul_cpt - __kmpc_atomic_float8_mul_fp - __kmpc_atomic_float8_rd - __kmpc_atomic_float8_sub - __kmpc_atomic_float8_sub_cpt - __kmpc_atomic_float8_sub_cpt_rev - __kmpc_atomic_float8_sub_fp - __kmpc_atomic_float8_sub_rev - __kmpc_atomic_float8_swp - __kmpc_atomic_float8_wr - __kmpc_atomic_float10_add - __kmpc_atomic_float10_add_cpt - __kmpc_atomic_float10_add_fp - __kmpc_atomic_float10_div - __kmpc_atomic_float10_div_cpt - __kmpc_atomic_float10_div_cpt_rev - __kmpc_atomic_float10_div_fp - __kmpc_atomic_float10_div_rev - __kmpc_atomic_float10_mul - __kmpc_atomic_float10_mul_cpt - __kmpc_atomic_float10_mul_fp - __kmpc_atomic_float10_rd - __kmpc_atomic_float10_sub - __kmpc_atomic_float10_sub_cpt - __kmpc_atomic_float10_sub_cpt_rev - __kmpc_atomic_float10_sub_fp - __kmpc_atomic_float10_sub_rev - __kmpc_atomic_float10_swp - __kmpc_atomic_float10_wr - __kmpc_atomic_float16_add - __kmpc_atomic_float16_add_cpt - __kmpc_atomic_float16_div - __kmpc_atomic_float16_div_cpt - __kmpc_atomic_float16_div_cpt_rev - __kmpc_atomic_float16_div_rev - __kmpc_atomic_float16_max - __kmpc_atomic_float16_max_cpt - __kmpc_atomic_float16_min - __kmpc_atomic_float16_min_cpt - __kmpc_atomic_float16_mul - __kmpc_atomic_float16_mul_cpt - __kmpc_atomic_float16_rd - __kmpc_atomic_float16_sub - __kmpc_atomic_float16_sub_cpt - __kmpc_atomic_float16_sub_cpt_rev - __kmpc_atomic_float16_sub_rev - __kmpc_atomic_float16_swp - __kmpc_atomic_float16_wr -@endcode - -Functions for Complex types ---------------------------- -Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. -The names here are based on the size of the component float, *not* the size of the complex type. So -`__kmpc_atomc_cmplx8_add` is an operation on a `complex` or `complex(kind=8)`, *not* `complex`. - -@code - __kmpc_atomic_cmplx4_add - __kmpc_atomic_cmplx4_add_cmplx8 - __kmpc_atomic_cmplx4_add_cpt - __kmpc_atomic_cmplx4_div - __kmpc_atomic_cmplx4_div_cmplx8 - __kmpc_atomic_cmplx4_div_cpt - __kmpc_atomic_cmplx4_div_cpt_rev - __kmpc_atomic_cmplx4_div_rev - __kmpc_atomic_cmplx4_mul - __kmpc_atomic_cmplx4_mul_cmplx8 - __kmpc_atomic_cmplx4_mul_cpt - __kmpc_atomic_cmplx4_rd - __kmpc_atomic_cmplx4_sub - __kmpc_atomic_cmplx4_sub_cmplx8 - __kmpc_atomic_cmplx4_sub_cpt - __kmpc_atomic_cmplx4_sub_cpt_rev - __kmpc_atomic_cmplx4_sub_rev - __kmpc_atomic_cmplx4_swp - __kmpc_atomic_cmplx4_wr - __kmpc_atomic_cmplx8_add - __kmpc_atomic_cmplx8_add_cpt - __kmpc_atomic_cmplx8_div - __kmpc_atomic_cmplx8_div_cpt - __kmpc_atomic_cmplx8_div_cpt_rev - __kmpc_atomic_cmplx8_div_rev - __kmpc_atomic_cmplx8_mul - __kmpc_atomic_cmplx8_mul_cpt - __kmpc_atomic_cmplx8_rd - __kmpc_atomic_cmplx8_sub - __kmpc_atomic_cmplx8_sub_cpt - __kmpc_atomic_cmplx8_sub_cpt_rev - __kmpc_atomic_cmplx8_sub_rev - __kmpc_atomic_cmplx8_swp - __kmpc_atomic_cmplx8_wr - __kmpc_atomic_cmplx10_add - __kmpc_atomic_cmplx10_add_cpt - __kmpc_atomic_cmplx10_div - __kmpc_atomic_cmplx10_div_cpt - __kmpc_atomic_cmplx10_div_cpt_rev - __kmpc_atomic_cmplx10_div_rev - __kmpc_atomic_cmplx10_mul - __kmpc_atomic_cmplx10_mul_cpt - __kmpc_atomic_cmplx10_rd - __kmpc_atomic_cmplx10_sub - __kmpc_atomic_cmplx10_sub_cpt - __kmpc_atomic_cmplx10_sub_cpt_rev - __kmpc_atomic_cmplx10_sub_rev - __kmpc_atomic_cmplx10_swp - __kmpc_atomic_cmplx10_wr - __kmpc_atomic_cmplx16_add - __kmpc_atomic_cmplx16_add_cpt - __kmpc_atomic_cmplx16_div - __kmpc_atomic_cmplx16_div_cpt - __kmpc_atomic_cmplx16_div_cpt_rev - __kmpc_atomic_cmplx16_div_rev - __kmpc_atomic_cmplx16_mul - __kmpc_atomic_cmplx16_mul_cpt - __kmpc_atomic_cmplx16_rd - __kmpc_atomic_cmplx16_sub - __kmpc_atomic_cmplx16_sub_cpt - __kmpc_atomic_cmplx16_sub_cpt_rev - __kmpc_atomic_cmplx16_swp - __kmpc_atomic_cmplx16_wr -@endcode -*/ - -/*! -@ingroup ATOMIC_OPS -@{ -*/ - -/* - * Global vars - */ - -#ifndef KMP_GOMP_COMPAT -int __kmp_atomic_mode = 1; // Intel perf -#else -int __kmp_atomic_mode = 2; // GOMP compatibility -#endif /* KMP_GOMP_COMPAT */ - -KMP_ALIGN(128) - -kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ -kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ -kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ -kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ -kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ -kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ -kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ -kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ -kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ -kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ -kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ -kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ -kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ - - -/* - 2007-03-02: - Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a - bug on *_32 and *_32e. This is just a temporary workaround for the problem. - It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG - routines in assembler language. -*/ -#define KMP_ATOMIC_VOLATILE volatile - -#if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD - - static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; - static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; - static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; - static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; - static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } - static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } - - static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; - static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; - static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; - static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; - static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } - static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } - - static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; - static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; - static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; - static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; - - static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; - static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; - static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; - static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; - -#endif - -/* ------------------------------------------------------------------------ */ -/* ATOMIC implementation routines */ -/* one routine for each operation and operand type */ -/* ------------------------------------------------------------------------ */ - -// All routines declarations looks like -// void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); -// ------------------------------------------------------------------------ - -#define KMP_CHECK_GTID \ - if ( gtid == KMP_GTID_UNKNOWN ) { \ - gtid = __kmp_entry_gtid(); \ - } // check and get gtid when needed - -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -#define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ -RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------ -// Lock variables used for critical sections for various size operands -#define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat -#define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char -#define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short -#define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int -#define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float -#define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int -#define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double -#define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex -#define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double -#define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad -#define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex -#define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex -#define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - (*lhs) OP (rhs); \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); - -// ------------------------------------------------------------------------ -// For GNU compatibility, we may need to use a critical section, -// even though it is not required by the ISA. -// -// On IA-32 architecture, all atomic operations except for fixed 4 byte add, -// sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common -// critical section. On Intel(R) 64, all atomic operations are done with fetch -// and add or compare and exchange. Therefore, the FLAG parameter to this -// macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which -// require a critical section, where we predict that they will be implemented -// in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). -// -// When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, -// the FLAG parameter should always be 1. If we know that we will be using -// a critical section, then we want to make certain that we use the generic -// lock __kmp_atomic_lock to protect the atomic update, and not of of the -// locks that are specialized based upon the size or type of the data. -// -// If FLAG is 0, then we are relying on dead code elimination by the build -// compiler to get rid of the useless block of code, and save a needless -// branch at runtime. -// - -#ifdef KMP_GOMP_COMPAT -# define OP_GOMP_CRITICAL(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL( OP, 0 ); \ - return; \ - } -# else -# define OP_GOMP_CRITICAL(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -#if KMP_MIC -# define KMP_DO_PAUSE _mm_delay_32( 1 ) -#else -# define KMP_DO_PAUSE KMP_CPU_PAUSE() -#endif /* KMP_MIC */ - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -#define OP_CMPXCHG(TYPE,BITS,OP) \ - { \ - TYPE old_value, new_value; \ - old_value = *(TYPE volatile *)lhs; \ - new_value = old_value OP rhs; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_DO_PAUSE; \ - \ - old_value = *(TYPE volatile *)lhs; \ - new_value = old_value OP rhs; \ - } \ - } - -#if USE_CMPXCHG_FIX -// 2007-06-25: -// workaround for C78287 (complex(kind=4) data type) -// lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) -// Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. -// This is a problem of the compiler. -// Related tracker is C76005, targeted to 11.0. -// I verified the asm of the workaround. -#define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ - { \ - struct _sss { \ - TYPE cmp; \ - kmp_int##BITS *vvv; \ - }; \ - struct _sss old_value, new_value; \ - old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ - new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ - *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ - new_value.cmp = old_value.cmp OP rhs; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ - *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ - { \ - KMP_DO_PAUSE; \ - \ - *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ - new_value.cmp = old_value.cmp OP rhs; \ - } \ - } -// end of the first part of the workaround for C78287 -#endif // USE_CMPXCHG_FIX - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -// ------------------------------------------------------------------------ -// X86 or X86_64: no alignment problems ==================================== -#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ - KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ -} -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - OP_CMPXCHG(TYPE,BITS,OP) \ -} -#if USE_CMPXCHG_FIX -// ------------------------------------------------------------------------- -// workaround for C78287 (complex(kind=4) data type) -#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ -} -// end of the second part of the workaround for C78287 -#endif - -#else -// ------------------------------------------------------------------------- -// Code for other architectures that don't handle unaligned accesses. -#define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ - KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -#if USE_CMPXCHG_FIX -// ------------------------------------------------------------------------- -// workaround for C78287 (complex(kind=4) data type) -#define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -// end of the second part of the workaround for C78287 -#endif // USE_CMPXCHG_FIX -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -// Routines for ATOMIC 4-byte operands addition and subtraction -ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add -ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub - -ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add -ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub - -// Routines for ATOMIC 8-byte operands addition and subtraction -ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add -ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub - -ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add -ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub - -// ------------------------------------------------------------------------ -// Entries definition for integer operands -// TYPE_ID - operands type and size (fixed4, float4) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operand type -// BITS - size in bits, used to distinguish low level calls -// OP - operator (used in critical section) -// LCK_ID - lock identifier, used to possibly distinguish lock variable -// MASK - used for alignment check - -// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG -// ------------------------------------------------------------------------ -// Routines for ATOMIC integer operands, other operators -// ------------------------------------------------------------------------ -// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG -ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add -ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb -ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div -ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div -ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul -ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb -ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl -ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr -ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr -ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub -ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor -ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add -ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb -ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div -ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div -ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul -ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb -ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl -ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr -ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr -ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub -ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor -ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb -ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div -ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div -ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul -ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb -ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl -ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr -ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr -ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor -ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb -ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div -ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div -ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul -ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb -ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl -ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr -ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr -ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor -ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div -ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul -ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div -ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul -// TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG - - -/* ------------------------------------------------------------------------ */ -/* Routines for C/C++ Reduction operators && and || */ -/* ------------------------------------------------------------------------ */ - -// ------------------------------------------------------------------------ -// Need separate macros for &&, || because there is no combined assignment -// TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used -#define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ - OP_CRITICAL( = *lhs OP, LCK_ID ) \ -} - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -// ------------------------------------------------------------------------ -// X86 or X86_64: no alignment problems =================================== -#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ - OP_CMPXCHG(TYPE,BITS,OP) \ -} - -#else -// ------------------------------------------------------------------------ -// Code for other architectures that don't handle unaligned accesses. -#define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ - } \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl -ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl -ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl -ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl -ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl -ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl -ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl -ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl - - -/* ------------------------------------------------------------------------- */ -/* Routines for Fortran operators that matched no one in C: */ -/* MAX, MIN, .EQV., .NEQV. */ -/* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ -/* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ -/* ------------------------------------------------------------------------- */ - -// ------------------------------------------------------------------------- -// MIN and MAX need separate macros -// OP - operator to check if we need any actions? -#define MIN_MAX_CRITSECT(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if ( *lhs OP rhs ) { /* still need actions? */ \ - *lhs = rhs; \ - } \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); - -// ------------------------------------------------------------------------- -#ifdef KMP_GOMP_COMPAT -#define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ - if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ - KMP_CHECK_GTID; \ - MIN_MAX_CRITSECT( OP, 0 ); \ - return; \ - } -#else -#define GOMP_MIN_MAX_CRITSECT(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------- -#define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - while ( old_value OP rhs && /* still need actions? */ \ - ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ - { \ - KMP_CPU_PAUSE(); \ - temp_val = *lhs; \ - old_value = temp_val; \ - } \ - } - -// ------------------------------------------------------------------------- -// 1-byte, 2-byte operands - use critical section -#define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - if ( *lhs OP rhs ) { /* need actions? */ \ - GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ - MIN_MAX_CRITSECT(OP,LCK_ID) \ - } \ -} - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -// ------------------------------------------------------------------------- -// X86 or X86_64: no alignment problems ==================================== -#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - if ( *lhs OP rhs ) { \ - GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ - MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ - } \ -} - -#else -// ------------------------------------------------------------------------- -// Code for other architectures that don't handle unaligned accesses. -#define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - if ( *lhs OP rhs ) { \ - GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ - } \ - } \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max -MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min -MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max -MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min -MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max -MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min -MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max -MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min -MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max -MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min -MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max -MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min -#if KMP_HAVE_QUAD -MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max -MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min -#if ( KMP_ARCH_X86 ) - MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 - MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 -#endif -#endif -// ------------------------------------------------------------------------ -// Need separate macros for .EQV. because of the need of complement (~) -// OP ignored for critical sections, ^=~ used instead -#define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ -} - -// ------------------------------------------------------------------------ -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// ------------------------------------------------------------------------ -// X86 or X86_64: no alignment problems =================================== -#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ - OP_CMPXCHG(TYPE,BITS,OP) \ -} -// ------------------------------------------------------------------------ -#else -// ------------------------------------------------------------------------ -// Code for other architectures that don't handle unaligned accesses. -#define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv -ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv -ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv -ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv -ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv -ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv -ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv -ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv - - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ -} - -/* ------------------------------------------------------------------------- */ -// routines for long double type -ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add -ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub -ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul -ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div -#if KMP_HAVE_QUAD -// routines for _Quad type -ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add -ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub -ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul -ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 - ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 - ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 - ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 -#endif -#endif -// routines for complex types - -#if USE_CMPXCHG_FIX -// workaround for C78287 (complex(kind=4) data type) -ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add -ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub -ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul -ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div -// end of the workaround for C78287 -#else -ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add -ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub -ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul -ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div -#endif // USE_CMPXCHG_FIX - -ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add -ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub -ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul -ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div -ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add -ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub -ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul -ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add -ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub -ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul -ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 - ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 - ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 - ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 -#endif -#endif - -#if OMP_40_ENABLED - -// OpenMP 4.0: x = expr binop x for non-commutative operations. -// Supported only on IA-32 architecture and Intel(R) 64 -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL_REV(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - (*lhs) = (rhs) OP (*lhs); \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); - -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_REV(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_REV( OP, 0 ); \ - return; \ - } -#else -#define OP_GOMP_CRITICAL_REV(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - - -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -#define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ -RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -// Note: temp_val introduced in order to force the compiler to read -// *lhs only once (w/o it the compiler reads *lhs twice) -#define OP_CMPXCHG_REV(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value, new_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs OP old_value; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_DO_PAUSE; \ - \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs OP old_value; \ - } \ - } - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ - OP_CMPXCHG_REV(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------ -// Entries definition for integer operands -// TYPE_ID - operands type and size (fixed4, float4) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operand type -// BITS - size in bits, used to distinguish low level calls -// OP - operator (used in critical section) -// LCK_ID - lock identifier, used to possibly distinguish lock variable - -// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG -// ------------------------------------------------------------------------ -// Routines for ATOMIC integer operands, other operators -// ------------------------------------------------------------------------ -// TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG -ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev -ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev -ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev -ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev -ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev -ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev - -ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev -ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev -ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev -ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev -ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev -ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev - -ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev -ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev -ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev -ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev -ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev -ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev - -ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev -ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev -ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev -ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev -ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev -ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev - -ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev -ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev - -ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev -ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev -// TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ - OP_CRITICAL_REV(OP,LCK_ID) \ -} - -/* ------------------------------------------------------------------------- */ -// routines for long double type -ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev -ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev -#if KMP_HAVE_QUAD -// routines for _Quad type -ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev -ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev - ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev -#endif -#endif - -// routines for complex types -ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev -ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev -ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev -ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev -ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev -ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev -ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev - ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev -#endif -#endif - - -#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 -// End of OpenMP 4.0: x = expr binop x for non-commutative operations. - -#endif //OMP_40_ENABLED - - -/* ------------------------------------------------------------------------ */ -/* Routines for mixed types of LHS and RHS, when RHS is "larger" */ -/* Note: in order to reduce the total number of types combinations */ -/* it is supposed that compiler converts RHS to longest floating type,*/ -/* that is _Quad, before call to any of these routines */ -/* Conversion to _Quad will be done by the compiler during calculation, */ -/* conversion back to TYPE - before the assignment, like: */ -/* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ -/* Performance penalty expected because of SW emulation use */ -/* ------------------------------------------------------------------------ */ - -#define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ -void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------- -#define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ -} - -// ------------------------------------------------------------------------- -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// ------------------------------------------------------------------------- -// X86 or X86_64: no alignment problems ==================================== -#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - OP_CMPXCHG(TYPE,BITS,OP) \ -} -// ------------------------------------------------------------------------- -#else -// ------------------------------------------------------------------------ -// Code for other architectures that don't handle unaligned accesses. -#define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -// ------------------------------------------------------------------------- -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ - OP_CMPXCHG_REV(TYPE,BITS,OP) \ -} -#define ATOMIC_CRITICAL_REV_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ - OP_CRITICAL_REV(OP,LCK_ID) \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -// RHS=float8 -ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 -ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 -ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 -ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 - -// RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) -#if KMP_HAVE_QUAD -ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp -ATOMIC_CMPXCHG_MIX( fixed1u, uchar, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_fp -ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp -ATOMIC_CMPXCHG_MIX( fixed1u, uchar, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_fp -ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp -ATOMIC_CMPXCHG_MIX( fixed1u, uchar, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_fp -ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp -ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp - -ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp -ATOMIC_CMPXCHG_MIX( fixed2u, ushort, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_fp -ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp -ATOMIC_CMPXCHG_MIX( fixed2u, ushort, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_fp -ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp -ATOMIC_CMPXCHG_MIX( fixed2u, ushort, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_fp -ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp -ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp - -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp -ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_fp -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp -ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_fp -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp -ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_fp -ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp -ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp - -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp -ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_fp -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp -ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_fp -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp -ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_fp -ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp -ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp - -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp -ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp - -ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp -ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp -ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp -ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp - -ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp -ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp -ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp -ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// Reverse operations -ATOMIC_CMPXCHG_REV_MIX( fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed1, char, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev_fp - -ATOMIC_CMPXCHG_REV_MIX( fixed2, short, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed2, short, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev_fp - -ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_rev_fp - -ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev_fp -ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev_fp - -ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, sub_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, div_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev_fp - -ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, sub_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev_fp -ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, div_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev_fp - -ATOMIC_CRITICAL_REV_FP( float10, long double, sub_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_rev_fp -ATOMIC_CRITICAL_REV_FP( float10, long double, div_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_rev_fp -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -#endif - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// ------------------------------------------------------------------------ -// X86 or X86_64: no alignment problems ==================================== -#if USE_CMPXCHG_FIX -// workaround for C78287 (complex(kind=4) data type) -#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ -} -// end of the second part of the workaround for C78287 -#else -#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - OP_CMPXCHG(TYPE,BITS,OP) \ -} -#endif // USE_CMPXCHG_FIX -#else -// ------------------------------------------------------------------------ -// Code for other architectures that don't handle unaligned accesses. -#define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ - OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ - if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ - OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ - } else { \ - KMP_CHECK_GTID; \ - OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ - } \ -} -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 -ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 -ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 -ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 - -// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -////////////////////////////////////////////////////////////////////////////////////////////////////// -// ------------------------------------------------------------------------ -// Atomic READ routines -// ------------------------------------------------------------------------ - -// ------------------------------------------------------------------------ -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -#define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ -RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store_ret" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -// Note: temp_val introduced in order to force the compiler to read -// *lhs only once (w/o it the compiler reads *lhs twice) -// TODO: check if it is still necessary -// Return old value regardless of the result of "compare & swap# operation - -#define OP_CMPXCHG_READ(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - union f_i_union { \ - TYPE f_val; \ - kmp_int##BITS i_val; \ - }; \ - union f_i_union old_value; \ - temp_val = *loc; \ - old_value.f_val = temp_val; \ - old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ - new_value = old_value.f_val; \ - return new_value; \ - } - -// ------------------------------------------------------------------------- -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL_READ(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - new_value = (*loc); \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); - -// ------------------------------------------------------------------------- -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_READ(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_READ( OP, 0 ); \ - return new_value; \ - } -#else -#define OP_GOMP_CRITICAL_READ(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------- -#define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ - new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ - return new_value; \ -} -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ - OP_CMPXCHG_READ(TYPE,BITS,OP) \ -} -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ - return new_value; \ -} - -// ------------------------------------------------------------------------ -// Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. -// Let's return the read value through the additional parameter. - -#if ( KMP_OS_WINDOWS ) - -#define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - (*out) = (*loc); \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_READ_WRK( OP, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ -// ------------------------------------------------------------------------ -#define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ -void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------ -#define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ - OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ -} - -#endif // KMP_OS_WINDOWS - -// ------------------------------------------------------------------------ -// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG -ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd -ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd -ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd -ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd - -// !!! TODO: Remove lock operations for "char" since it can't be non-atomic -ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd -ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd - -ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd -#endif // KMP_HAVE_QUAD - -// Fix for CQ220361 on Windows* OS -#if ( KMP_OS_WINDOWS ) - ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd -#else - ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd -#endif -ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd -ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd - ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd -#endif -#endif - - -// ------------------------------------------------------------------------ -// Atomic WRITE routines -// ------------------------------------------------------------------------ - -#define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ - KMP_XCHG_FIXED##BITS( lhs, rhs ); \ -} -// ------------------------------------------------------------------------ -#define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ - KMP_XCHG_REAL##BITS( lhs, rhs ); \ -} - - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -// Note: temp_val introduced in order to force the compiler to read -// *lhs only once (w/o it the compiler reads *lhs twice) -#define OP_CMPXCHG_WR(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value, new_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_CPU_PAUSE(); \ - \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs; \ - } \ - } - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ - OP_CMPXCHG_WR(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ - OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ -} -// ------------------------------------------------------------------------- - -ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr -ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr -ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr -#if ( KMP_ARCH_X86 ) - ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr -#else - ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr -#endif - -ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr -#if ( KMP_ARCH_X86 ) - ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr -#else - ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr -#endif - -ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr -#endif -ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr -ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr -ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr - ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr -#endif -#endif - - -// ------------------------------------------------------------------------ -// Atomic CAPTURE routines -// ------------------------------------------------------------------------ - -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -#define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ -RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------- -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL_CPT(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if( flag ) { \ - (*lhs) OP rhs; \ - new_value = (*lhs); \ - } else { \ - new_value = (*lhs); \ - (*lhs) OP rhs; \ - } \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return new_value; - -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_CPT( OP##=, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_CPT(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -// Note: temp_val introduced in order to force the compiler to read -// *lhs only once (w/o it the compiler reads *lhs twice) -#define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value, new_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = old_value OP rhs; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_CPU_PAUSE(); \ - \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = old_value OP rhs; \ - } \ - if( flag ) { \ - return new_value; \ - } else \ - return old_value; \ - } - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ - OP_CMPXCHG_CPT(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------- -#define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE old_value, new_value; \ - OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ - /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ - old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ - if( flag ) { \ - return old_value OP rhs; \ - } else \ - return old_value; \ -} -// ------------------------------------------------------------------------- - -ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt -ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt -ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt -ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt - -ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt -ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt -ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt -ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt - -// ------------------------------------------------------------------------ -// Entries definition for integer operands -// TYPE_ID - operands type and size (fixed4, float4) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operand type -// BITS - size in bits, used to distinguish low level calls -// OP - operator (used in critical section) -// TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG -// ------------------------------------------------------------------------ -// Routines for ATOMIC integer operands, other operators -// ------------------------------------------------------------------------ -// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG -ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt -ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt -ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt -ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt -ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt -ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt -ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt -ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt -ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt -ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt -ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt -ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt -ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt -ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt -ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt -ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt -ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt -ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt -ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt -ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt -ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt -ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt -ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt -ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt -ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt -ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt -ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt -ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt -ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt -ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt -ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt -ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt -ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt -ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt -ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt -ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt -ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt -// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG - -////////////////////////////////// - -// CAPTURE routines for mixed types RHS=float16 -#if KMP_HAVE_QUAD - -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -#define ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ -TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs, int flag ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_CPT_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ - OP_CMPXCHG_CPT(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------- -#define ATOMIC_CRITICAL_CPT_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ -} - -ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_fp - -ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_fp - -ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_fp - -ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_fp - -ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, add_cpt, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, sub_cpt, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, mul_cpt, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, div_cpt, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_fp - -ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, add_cpt, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, sub_cpt, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, mul_cpt, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt_fp -ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, div_cpt, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_fp - -ATOMIC_CRITICAL_CPT_MIX( float10, long double, add_cpt, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_cpt_fp -ATOMIC_CRITICAL_CPT_MIX( float10, long double, sub_cpt, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_fp -ATOMIC_CRITICAL_CPT_MIX( float10, long double, mul_cpt, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt_fp -ATOMIC_CRITICAL_CPT_MIX( float10, long double, div_cpt, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_fp - -#endif //KMP_HAVE_QUAD - -/////////////////////////////////// - -// ------------------------------------------------------------------------ -// Routines for C/C++ Reduction operators && and || -// ------------------------------------------------------------------------ - -// ------------------------------------------------------------------------- -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL_L_CPT(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if( flag ) { \ - new_value OP rhs; \ - } else \ - new_value = (*lhs); \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); - -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_L_CPT( OP, 0 ); \ - return new_value; \ - } -#else -#define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------ -// Need separate macros for &&, || because there is no combined assignment -#define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ - OP_CMPXCHG_CPT(TYPE,BITS,OP) \ -} - -ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt -ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt -ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt -ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt -ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt -ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt -ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt -ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt - - -// ------------------------------------------------------------------------- -// Routines for Fortran operators that matched no one in C: -// MAX, MIN, .EQV., .NEQV. -// Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt -// Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt -// ------------------------------------------------------------------------- - -// ------------------------------------------------------------------------- -// MIN and MAX need separate macros -// OP - operator to check if we need any actions? -#define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if ( *lhs OP rhs ) { /* still need actions? */ \ - old_value = *lhs; \ - *lhs = rhs; \ - if ( flag ) \ - new_value = rhs; \ - else \ - new_value = old_value; \ - } \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return new_value; \ - -// ------------------------------------------------------------------------- -#ifdef KMP_GOMP_COMPAT -#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ - if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ - KMP_CHECK_GTID; \ - MIN_MAX_CRITSECT_CPT( OP, 0 ); \ - } -#else -#define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------- -#define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - /*TYPE old_value; */ \ - temp_val = *lhs; \ - old_value = temp_val; \ - while ( old_value OP rhs && /* still need actions? */ \ - ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ - { \ - KMP_CPU_PAUSE(); \ - temp_val = *lhs; \ - old_value = temp_val; \ - } \ - if( flag ) \ - return rhs; \ - else \ - return old_value; \ - } - -// ------------------------------------------------------------------------- -// 1-byte, 2-byte operands - use critical section -#define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value, old_value; \ - if ( *lhs OP rhs ) { /* need actions? */ \ - GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ - MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ - } \ - return *lhs; \ -} - -#define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value, old_value; \ - if ( *lhs OP rhs ) { \ - GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ - MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ - } \ - return *lhs; \ -} - - -MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt -MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt -MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt -MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt -MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt -MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt -MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt -MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt -MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt -MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt -MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt -MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt -#if KMP_HAVE_QUAD -MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt -MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt -#if ( KMP_ARCH_X86 ) - MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt - MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt -#endif -#endif - -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_CPT( OP, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ -// ------------------------------------------------------------------------ -#define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ - OP_CMPXCHG_CPT(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------ - -ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt -ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt -ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt -ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt -ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt -ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt -ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt -ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ -} - -// ------------------------------------------------------------------------ - -// Workaround for cmplx4. Regular routines with return value don't work -// on Win_32e. Let's return captured values through the additional parameter. -#define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if( flag ) { \ - (*lhs) OP rhs; \ - (*out) = (*lhs); \ - } else { \ - (*out) = (*lhs); \ - (*lhs) OP rhs; \ - } \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return; -// ------------------------------------------------------------------------ - -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ -// ------------------------------------------------------------------------ - -#define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ -void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); -// ------------------------------------------------------------------------ - -#define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ - OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ - OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ -} -// The end of workaround for cmplx4 - -/* ------------------------------------------------------------------------- */ -// routines for long double type -ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt -ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt -ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt -ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt -#if KMP_HAVE_QUAD -// routines for _Quad type -ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt -ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt -ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt -ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt - ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt - ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt - ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt -#endif -#endif - -// routines for complex types - -// cmplx4 routines to return void -ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt -ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt -ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt -ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt - -ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt -ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt -ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt -ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt -ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt -ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt -ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt -ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt -ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt -ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt -ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt - ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt - ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt - ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt -#endif -#endif - -#if OMP_40_ENABLED - -// OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. -// Supported only on IA-32 architecture and Intel(R) 64 - -// ------------------------------------------------------------------------- -// Operation on *lhs, rhs bound by critical section -// OP - operator (it's supposed to contain an assignment) -// LCK_ID - lock identifier -// Note: don't check gtid as it should always be valid -// 1, 2-byte - expect valid parameter, other - check before this macro -#define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if( flag ) { \ - /*temp_val = (*lhs);*/\ - (*lhs) = (rhs) OP (*lhs); \ - new_value = (*lhs); \ - } else { \ - new_value = (*lhs);\ - (*lhs) = (rhs) OP (*lhs); \ - } \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return new_value; - -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_CPT_REV( OP, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ - -// ------------------------------------------------------------------------ -// Operation on *lhs, rhs using "compare_and_store" routine -// TYPE - operands' type -// BITS - size in bits, used to distinguish low level calls -// OP - operator -// Note: temp_val introduced in order to force the compiler to read -// *lhs only once (w/o it the compiler reads *lhs twice) -#define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value, new_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs OP old_value; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_CPU_PAUSE(); \ - \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs OP old_value; \ - } \ - if( flag ) { \ - return new_value; \ - } else \ - return old_value; \ - } - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ - OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ -} - - -ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev -ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev -// TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG - - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -// TYPE_ID, OP_ID, TYPE - detailed above -// OP - operator -// LCK_ID - lock identifier, used to possibly distinguish lock variable -#define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ - TYPE new_value; \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ - OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ - OP_CRITICAL_CPT_REV(OP,LCK_ID) \ -} - - -/* ------------------------------------------------------------------------- */ -// routines for long double type -ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev -#if KMP_HAVE_QUAD -// routines for _Quad type -ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev - ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev -#endif -#endif - -// routines for complex types - -// ------------------------------------------------------------------------ - -// Workaround for cmplx4. Regular routines with return value don't work -// on Win_32e. Let's return captured values through the additional parameter. -#define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - if( flag ) { \ - (*lhs) = (rhs) OP (*lhs); \ - (*out) = (*lhs); \ - } else { \ - (*out) = (*lhs); \ - (*lhs) = (rhs) OP (*lhs); \ - } \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return; -// ------------------------------------------------------------------------ - -#ifdef KMP_GOMP_COMPAT -#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ - } -#else -#define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) -#endif /* KMP_GOMP_COMPAT */ -// ------------------------------------------------------------------------ - -#define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ - OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ - OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ -} -// The end of workaround for cmplx4 - - -// !!! TODO: check if we need to return void for cmplx4 routines -// cmplx4 routines to return void -ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev - -ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev -ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev -ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev - ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev -#endif -#endif - -// Capture reverse for mixed type: RHS=float16 -#if KMP_HAVE_QUAD - -// Beginning of a definition (provides name, parameters, gebug trace) -// TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) -// OP_ID - operation identifier (add, sub, mul, ...) -// TYPE - operands' type -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ - OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ -} - -// ------------------------------------------------------------------------- -#define ATOMIC_CRITICAL_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ - TYPE new_value; \ - OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) /* send assignment */ \ - OP_CRITICAL_CPT_REV(OP,LCK_ID) /* send assignment */ \ -} - -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev_fp - -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev_fp - -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_rev_fp - -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev_fp - -ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, sub_cpt_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, div_cpt_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev_fp - -ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, sub_cpt_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev_fp -ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, div_cpt_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev_fp - -ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, sub_cpt_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev_fp -ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, div_cpt_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev_fp - -#endif //KMP_HAVE_QUAD - - -// OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} - -#define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ -TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); - -#define CRITICAL_SWP(LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - old_value = (*lhs); \ - (*lhs) = rhs; \ - \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return old_value; - -// ------------------------------------------------------------------------ -#ifdef KMP_GOMP_COMPAT -#define GOMP_CRITICAL_SWP(FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - CRITICAL_SWP( 0 ); \ - } -#else -#define GOMP_CRITICAL_SWP(FLAG) -#endif /* KMP_GOMP_COMPAT */ - - -#define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ -ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ - TYPE old_value; \ - GOMP_CRITICAL_SWP(GOMP_FLAG) \ - old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ - return old_value; \ -} -// ------------------------------------------------------------------------ -#define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ -ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ - TYPE old_value; \ - GOMP_CRITICAL_SWP(GOMP_FLAG) \ - old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ - return old_value; \ -} - -// ------------------------------------------------------------------------ -#define CMPXCHG_SWP(TYPE,BITS) \ - { \ - TYPE KMP_ATOMIC_VOLATILE temp_val; \ - TYPE old_value, new_value; \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs; \ - while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ - *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ - *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ - { \ - KMP_CPU_PAUSE(); \ - \ - temp_val = *lhs; \ - old_value = temp_val; \ - new_value = rhs; \ - } \ - return old_value; \ - } - -// ------------------------------------------------------------------------- -#define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ -ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ - TYPE old_value; \ - GOMP_CRITICAL_SWP(GOMP_FLAG) \ - CMPXCHG_SWP(TYPE,BITS) \ -} - -ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp -ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp -ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp - -ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp - -#if ( KMP_ARCH_X86 ) - ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp - ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp -#else - ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp - ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp -#endif - -// ------------------------------------------------------------------------ -// Routines for Extended types: long double, _Quad, complex flavours (use critical section) -#define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ - TYPE old_value; \ - GOMP_CRITICAL_SWP(GOMP_FLAG) \ - CRITICAL_SWP(LCK_ID) \ -} - -// ------------------------------------------------------------------------ - -// !!! TODO: check if we need to return void for cmplx4 routines -// Workaround for cmplx4. Regular routines with return value don't work -// on Win_32e. Let's return captured values through the additional parameter. - -#define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ -void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ -{ \ - KMP_DEBUG_ASSERT( __kmp_init_serial ); \ - KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); - - -#define CRITICAL_SWP_WRK(LCK_ID) \ - __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - \ - tmp = (*lhs); \ - (*lhs) = (rhs); \ - (*out) = tmp; \ - __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ - return; - -// ------------------------------------------------------------------------ - -#ifdef KMP_GOMP_COMPAT -#define GOMP_CRITICAL_SWP_WRK(FLAG) \ - if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ - KMP_CHECK_GTID; \ - CRITICAL_SWP_WRK( 0 ); \ - } -#else -#define GOMP_CRITICAL_SWP_WRK(FLAG) -#endif /* KMP_GOMP_COMPAT */ -// ------------------------------------------------------------------------ - -#define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ -ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ - TYPE tmp; \ - GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ - CRITICAL_SWP_WRK(LCK_ID) \ -} -// The end of workaround for cmplx4 - - -ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp -#endif -// cmplx4 routine to return void -ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp - -//ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp - - -ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp -ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp -#if KMP_HAVE_QUAD -ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp -#if ( KMP_ARCH_X86 ) - ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp - ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp -#endif -#endif - - -// End of OpenMP 4.0 Capture - -#endif //OMP_40_ENABLED - -#endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 - - -#undef OP_CRITICAL - -/* ------------------------------------------------------------------------ */ -/* Generic atomic routines */ -/* ------------------------------------------------------------------------ */ - -void -__kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - if ( -#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) - FALSE /* must use lock */ -#else - TRUE -#endif - ) - { - kmp_int8 old_value, new_value; - - old_value = *(kmp_int8 *) lhs; - (*f)( &new_value, &old_value, rhs ); - - /* TODO: Should this be acquire or release? */ - while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, - *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) - { - KMP_CPU_PAUSE(); - - old_value = *(kmp_int8 *) lhs; - (*f)( &new_value, &old_value, rhs ); - } - - return; - } - else { - // - // All 1-byte data is of integer data type. - // - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); - } -} - -void -__kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - if ( -#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) - FALSE /* must use lock */ -#elif KMP_ARCH_X86 || KMP_ARCH_X86_64 - TRUE /* no alignment problems */ -#else - ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ -#endif - ) - { - kmp_int16 old_value, new_value; - - old_value = *(kmp_int16 *) lhs; - (*f)( &new_value, &old_value, rhs ); - - /* TODO: Should this be acquire or release? */ - while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, - *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) - { - KMP_CPU_PAUSE(); - - old_value = *(kmp_int16 *) lhs; - (*f)( &new_value, &old_value, rhs ); - } - - return; - } - else { - // - // All 2-byte data is of integer data type. - // - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); - } -} - -void -__kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - if ( - // - // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. - // Gomp compatibility is broken if this routine is called for floats. - // -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - TRUE /* no alignment problems */ -#else - ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ -#endif - ) - { - kmp_int32 old_value, new_value; - - old_value = *(kmp_int32 *) lhs; - (*f)( &new_value, &old_value, rhs ); - - /* TODO: Should this be acquire or release? */ - while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, - *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) - { - KMP_CPU_PAUSE(); - - old_value = *(kmp_int32 *) lhs; - (*f)( &new_value, &old_value, rhs ); - } - - return; - } - else { - // - // Use __kmp_atomic_lock_4i for all 4-byte data, - // even if it isn't of integer data type. - // - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); - } -} - -void -__kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - if ( - -#if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) - FALSE /* must use lock */ -#elif KMP_ARCH_X86 || KMP_ARCH_X86_64 - TRUE /* no alignment problems */ -#else - ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ -#endif - ) - { - kmp_int64 old_value, new_value; - - old_value = *(kmp_int64 *) lhs; - (*f)( &new_value, &old_value, rhs ); - /* TODO: Should this be acquire or release? */ - while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, - *(kmp_int64 *) &old_value, - *(kmp_int64 *) &new_value ) ) - { - KMP_CPU_PAUSE(); - - old_value = *(kmp_int64 *) lhs; - (*f)( &new_value, &old_value, rhs ); - } - - return; - } else { - // - // Use __kmp_atomic_lock_8i for all 8-byte data, - // even if it isn't of integer data type. - // - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); - } -} - -void -__kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); -} - -void -__kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); -} - -void -__kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); -} - -void -__kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) -{ - KMP_DEBUG_ASSERT( __kmp_init_serial ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); - - (*f)( lhs, lhs, rhs ); - -#ifdef KMP_GOMP_COMPAT - if ( __kmp_atomic_mode == 2 ) { - __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); - } - else -#endif /* KMP_GOMP_COMPAT */ - __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); -} - -// AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler -// duplicated in order to not use 3-party names in pure Intel code -// TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. -void -__kmpc_atomic_start(void) -{ - int gtid = __kmp_entry_gtid(); - KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); - __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); -} - - -void -__kmpc_atomic_end(void) -{ - int gtid = __kmp_get_gtid(); - KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); - __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ -/*! -@} -*/ - -// end of file Index: runtime/src/kmp_atomic.cpp =================================================================== --- runtime/src/kmp_atomic.cpp +++ runtime/src/kmp_atomic.cpp @@ -1,5 +1,5 @@ /* - * kmp_atomic.c -- ATOMIC implementation routines + * kmp_atomic.cpp -- ATOMIC implementation routines */ Index: runtime/src/kmp_csupport.c =================================================================== --- runtime/src/kmp_csupport.c +++ runtime/src/kmp_csupport.c @@ -1,3337 +0,0 @@ -/* - * kmp_csupport.c -- kfront linkage support for OpenMP. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "omp.h" /* extern "C" declarations of user-visible routines */ -#include "kmp.h" -#include "kmp_i18n.h" -#include "kmp_itt.h" -#include "kmp_lock.h" -#include "kmp_error.h" -#include "kmp_stats.h" - -#if OMPT_SUPPORT -#include "ompt-internal.h" -#include "ompt-specific.h" -#endif - -#define MAX_MESSAGE 512 - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* flags will be used in future, e.g., to implement */ -/* openmp_strict library restrictions */ - -/*! - * @ingroup STARTUP_SHUTDOWN - * @param loc in source location information - * @param flags in for future use (currently ignored) - * - * Initialize the runtime library. This call is optional; if it is not made then - * it will be implicitly called by attempts to use other library functions. - * - */ -void -__kmpc_begin(ident_t *loc, kmp_int32 flags) -{ - // By default __kmp_ignore_mppbeg() returns TRUE. - if (__kmp_ignore_mppbeg() == FALSE) { - __kmp_internal_begin(); - - KC_TRACE( 10, ("__kmpc_begin: called\n" ) ); - } -} - -/*! - * @ingroup STARTUP_SHUTDOWN - * @param loc source location information - * - * Shutdown the runtime library. This is also optional, and even if called will not - * do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to zero. - */ -void -__kmpc_end(ident_t *loc) -{ - // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end() call no-op. - // However, this can be overridden with KMP_IGNORE_MPPEND environment variable. - // If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend() returns FALSE and __kmpc_end() - // will unregister this root (it can cause library shut down). - if (__kmp_ignore_mppend() == FALSE) { - KC_TRACE( 10, ("__kmpc_end: called\n" ) ); - KA_TRACE( 30, ("__kmpc_end\n" )); - - __kmp_internal_end_thread( -1 ); - } -} - -/*! -@ingroup THREAD_STATES -@param loc Source location information. -@return The global thread index of the active thread. - -This function can be called in any context. - -If the runtime has ony been entered at the outermost level from a -single (necessarily non-OpenMP*) thread, then the thread number is that -which would be returned by omp_get_thread_num() in the outermost -active parallel construct. (Or zero if there is no active parallel -construct, since the master thread is necessarily thread zero). - -If multiple non-OpenMP threads all enter an OpenMP construct then this -will be a unique thread identifier among all the threads created by -the OpenMP runtime (but the value cannote be defined in terms of -OpenMP thread ids returned by omp_get_thread_num()). - -*/ -kmp_int32 -__kmpc_global_thread_num(ident_t *loc) -{ - kmp_int32 gtid = __kmp_entry_gtid(); - - KC_TRACE( 10, ("__kmpc_global_thread_num: T#%d\n", gtid ) ); - - return gtid; -} - -/*! -@ingroup THREAD_STATES -@param loc Source location information. -@return The number of threads under control of the OpenMP* runtime - -This function can be called in any context. -It returns the total number of threads under the control of the OpenMP runtime. That is -not a number that can be determined by any OpenMP standard calls, since the library may be -called from more than one non-OpenMP thread, and this reflects the total over all such calls. -Similarly the runtime maintains underlying threads even when they are not active (since the cost -of creating and destroying OS threads is high), this call counts all such threads even if they are not -waiting for work. -*/ -kmp_int32 -__kmpc_global_num_threads(ident_t *loc) -{ - KC_TRACE( 10, ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_nth ) ); - - return TCR_4(__kmp_nth); -} - -/*! -@ingroup THREAD_STATES -@param loc Source location information. -@return The thread number of the calling thread in the innermost active parallel construct. - -*/ -kmp_int32 -__kmpc_bound_thread_num(ident_t *loc) -{ - KC_TRACE( 10, ("__kmpc_bound_thread_num: called\n" ) ); - return __kmp_tid_from_gtid( __kmp_entry_gtid() ); -} - -/*! -@ingroup THREAD_STATES -@param loc Source location information. -@return The number of threads in the innermost active parallel construct. -*/ -kmp_int32 -__kmpc_bound_num_threads(ident_t *loc) -{ - KC_TRACE( 10, ("__kmpc_bound_num_threads: called\n" ) ); - - return __kmp_entry_thread() -> th.th_team -> t.t_nproc; -} - -/*! - * @ingroup DEPRECATED - * @param loc location description - * - * This function need not be called. It always returns TRUE. - */ -kmp_int32 -__kmpc_ok_to_fork(ident_t *loc) -{ -#ifndef KMP_DEBUG - - return TRUE; - -#else - - const char *semi2; - const char *semi3; - int line_no; - - if (__kmp_par_range == 0) { - return TRUE; - } - semi2 = loc->psource; - if (semi2 == NULL) { - return TRUE; - } - semi2 = strchr(semi2, ';'); - if (semi2 == NULL) { - return TRUE; - } - semi2 = strchr(semi2 + 1, ';'); - if (semi2 == NULL) { - return TRUE; - } - if (__kmp_par_range_filename[0]) { - const char *name = semi2 - 1; - while ((name > loc->psource) && (*name != '/') && (*name != ';')) { - name--; - } - if ((*name == '/') || (*name == ';')) { - name++; - } - if (strncmp(__kmp_par_range_filename, name, semi2 - name)) { - return __kmp_par_range < 0; - } - } - semi3 = strchr(semi2 + 1, ';'); - if (__kmp_par_range_routine[0]) { - if ((semi3 != NULL) && (semi3 > semi2) - && (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) { - return __kmp_par_range < 0; - } - } - if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) { - if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) { - return __kmp_par_range > 0; - } - return __kmp_par_range < 0; - } - return TRUE; - -#endif /* KMP_DEBUG */ - -} - -/*! -@ingroup THREAD_STATES -@param loc Source location information. -@return 1 if this thread is executing inside an active parallel region, zero if not. -*/ -kmp_int32 -__kmpc_in_parallel( ident_t *loc ) -{ - return __kmp_entry_thread() -> th.th_root -> r.r_active; -} - -/*! -@ingroup PARALLEL -@param loc source location information -@param global_tid global thread number -@param num_threads number of threads requested for this parallel construct - -Set the number of threads to be used by the next fork spawned by this thread. -This call is only required if the parallel construct has a `num_threads` clause. -*/ -void -__kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads ) -{ - KA_TRACE( 20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n", - global_tid, num_threads ) ); - - __kmp_push_num_threads( loc, global_tid, num_threads ); -} - -void -__kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid ) -{ - KA_TRACE( 20, ("__kmpc_pop_num_threads: enter\n" ) ); - - /* the num_threads are automatically popped */ -} - - -#if OMP_40_ENABLED - -void -__kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, kmp_int32 proc_bind ) -{ - KA_TRACE( 20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", - global_tid, proc_bind ) ); - - __kmp_push_proc_bind( loc, global_tid, (kmp_proc_bind_t)proc_bind ); -} - -#endif /* OMP_40_ENABLED */ - - -/*! -@ingroup PARALLEL -@param loc source location information -@param argc total number of arguments in the ellipsis -@param microtask pointer to callback routine consisting of outlined parallel construct -@param ... pointers to shared variables that aren't global - -Do the actual fork and call the microtask in the relevant number of threads. -*/ -void -__kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) -{ - int gtid = __kmp_entry_gtid(); - -#if (KMP_STATS_ENABLED) - int inParallel = __kmpc_in_parallel(loc); - if (inParallel) - { - KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL); - } - else - { - KMP_COUNT_BLOCK(OMP_PARALLEL); - } -#endif - - // maybe to save thr_state is enough here - { - va_list ap; - va_start( ap, microtask ); - -#if OMPT_SUPPORT - ompt_frame_t* ompt_frame; - if (ompt_enabled) { - kmp_info_t *master_th = __kmp_threads[ gtid ]; - kmp_team_t *parent_team = master_th->th.th_team; - ompt_lw_taskteam_t *lwt = parent_team->t.ompt_serialized_team_info; - if (lwt) - ompt_frame = &(lwt->ompt_task_info.frame); - else - { - int tid = __kmp_tid_from_gtid( gtid ); - ompt_frame = &(parent_team->t.t_implicit_task_taskdata[tid]. - ompt_task_info.frame); - } - ompt_frame->reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - -#if INCLUDE_SSC_MARKS - SSC_MARK_FORKING(); -#endif - __kmp_fork_call( loc, gtid, fork_context_intel, - argc, -#if OMPT_SUPPORT - VOLATILE_CAST(void *) microtask, // "unwrapped" task -#endif - VOLATILE_CAST(microtask_t) microtask, // "wrapped" task - VOLATILE_CAST(launch_t) __kmp_invoke_task_func, -/* TODO: revert workaround for Intel(R) 64 tracker #96 */ -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - &ap -#else - ap -#endif - ); -#if INCLUDE_SSC_MARKS - SSC_MARK_JOINING(); -#endif - __kmp_join_call( loc, gtid -#if OMPT_SUPPORT - , fork_context_intel -#endif - ); - - va_end( ap ); - - } -} - -#if OMP_40_ENABLED -/*! -@ingroup PARALLEL -@param loc source location information -@param global_tid global thread number -@param num_teams number of teams requested for the teams construct -@param num_threads number of threads per team requested for the teams construct - -Set the number of teams to be used by the teams construct. -This call is only required if the teams construct has a `num_teams` clause -or a `thread_limit` clause (or both). -*/ -void -__kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads ) -{ - KA_TRACE( 20, ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n", - global_tid, num_teams, num_threads ) ); - - __kmp_push_num_teams( loc, global_tid, num_teams, num_threads ); -} - -/*! -@ingroup PARALLEL -@param loc source location information -@param argc total number of arguments in the ellipsis -@param microtask pointer to callback routine consisting of outlined teams construct -@param ... pointers to shared variables that aren't global - -Do the actual fork and call the microtask in the relevant number of threads. -*/ -void -__kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) -{ - int gtid = __kmp_entry_gtid(); - kmp_info_t *this_thr = __kmp_threads[ gtid ]; - va_list ap; - va_start( ap, microtask ); - - KMP_COUNT_BLOCK(OMP_TEAMS); - - // remember teams entry point and nesting level - this_thr->th.th_teams_microtask = microtask; - this_thr->th.th_teams_level = this_thr->th.th_team->t.t_level; // AC: can be >0 on host - -#if OMPT_SUPPORT - kmp_team_t *parent_team = this_thr->th.th_team; - int tid = __kmp_tid_from_gtid( gtid ); - if (ompt_enabled) { - parent_team->t.t_implicit_task_taskdata[tid]. - ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - - // check if __kmpc_push_num_teams called, set default number of teams otherwise - if ( this_thr->th.th_teams_size.nteams == 0 ) { - __kmp_push_num_teams( loc, gtid, 0, 0 ); - } - KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1); - KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1); - KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1); - - __kmp_fork_call( loc, gtid, fork_context_intel, - argc, -#if OMPT_SUPPORT - VOLATILE_CAST(void *) microtask, // "unwrapped" task -#endif - VOLATILE_CAST(microtask_t) __kmp_teams_master, // "wrapped" task - VOLATILE_CAST(launch_t) __kmp_invoke_teams_master, -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - &ap -#else - ap -#endif - ); - __kmp_join_call( loc, gtid -#if OMPT_SUPPORT - , fork_context_intel -#endif - ); - - this_thr->th.th_teams_microtask = NULL; - this_thr->th.th_teams_level = 0; - *(kmp_int64*)(&this_thr->th.th_teams_size) = 0L; - va_end( ap ); -} -#endif /* OMP_40_ENABLED */ - - -// -// I don't think this function should ever have been exported. -// The __kmpc_ prefix was misapplied. I'm fairly certain that no generated -// openmp code ever called it, but it's been exported from the RTL for so -// long that I'm afraid to remove the definition. -// -int -__kmpc_invoke_task_func( int gtid ) -{ - return __kmp_invoke_task_func( gtid ); -} - -/*! -@ingroup PARALLEL -@param loc source location information -@param global_tid global thread number - -Enter a serialized parallel construct. This interface is used to handle a -conditional parallel region, like this, -@code -#pragma omp parallel if (condition) -@endcode -when the condition is false. -*/ -void -__kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) -{ - __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with - * kmp_fork_call since the tasks to be done are similar in each case. - */ -} - -/*! -@ingroup PARALLEL -@param loc source location information -@param global_tid global thread number - -Leave a serialized parallel construct. -*/ -void -__kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) -{ - kmp_internal_control_t *top; - kmp_info_t *this_thr; - kmp_team_t *serial_team; - - KC_TRACE( 10, ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid ) ); - - /* skip all this code for autopar serialized loops since it results in - unacceptable overhead */ - if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) - return; - - // Not autopar code - if( ! TCR_4( __kmp_init_parallel ) ) - __kmp_parallel_initialize(); - - this_thr = __kmp_threads[ global_tid ]; - serial_team = this_thr->th.th_serial_team; - - #if OMP_45_ENABLED - kmp_task_team_t * task_team = this_thr->th.th_task_team; - - // we need to wait for the proxy tasks before finishing the thread - if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) - __kmp_task_team_wait(this_thr, serial_team USE_ITT_BUILD_ARG(NULL) ); // is an ITT object needed here? - #endif - - KMP_MB(); - KMP_DEBUG_ASSERT( serial_team ); - KMP_ASSERT( serial_team -> t.t_serialized ); - KMP_DEBUG_ASSERT( this_thr -> th.th_team == serial_team ); - KMP_DEBUG_ASSERT( serial_team != this_thr->th.th_root->r.r_root_team ); - KMP_DEBUG_ASSERT( serial_team -> t.t_threads ); - KMP_DEBUG_ASSERT( serial_team -> t.t_threads[0] == this_thr ); - - /* If necessary, pop the internal control stack values and replace the team values */ - top = serial_team -> t.t_control_stack_top; - if ( top && top -> serial_nesting_level == serial_team -> t.t_serialized ) { - copy_icvs( &serial_team -> t.t_threads[0] -> th.th_current_task -> td_icvs, top ); - serial_team -> t.t_control_stack_top = top -> next; - __kmp_free(top); - } - - //if( serial_team -> t.t_serialized > 1 ) - serial_team -> t.t_level--; - - /* pop dispatch buffers stack */ - KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer); - { - dispatch_private_info_t * disp_buffer = serial_team->t.t_dispatch->th_disp_buffer; - serial_team->t.t_dispatch->th_disp_buffer = - serial_team->t.t_dispatch->th_disp_buffer->next; - __kmp_free( disp_buffer ); - } - - -- serial_team -> t.t_serialized; - if ( serial_team -> t.t_serialized == 0 ) { - - /* return to the parallel section */ - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - if ( __kmp_inherit_fp_control && serial_team->t.t_fp_control_saved ) { - __kmp_clear_x87_fpu_status_word(); - __kmp_load_x87_fpu_control_word( &serial_team->t.t_x87_fpu_control_word ); - __kmp_load_mxcsr( &serial_team->t.t_mxcsr ); - } -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - this_thr -> th.th_team = serial_team -> t.t_parent; - this_thr -> th.th_info.ds.ds_tid = serial_team -> t.t_master_tid; - - /* restore values cached in the thread */ - this_thr -> th.th_team_nproc = serial_team -> t.t_parent -> t.t_nproc; /* JPH */ - this_thr -> th.th_team_master = serial_team -> t.t_parent -> t.t_threads[0]; /* JPH */ - this_thr -> th.th_team_serialized = this_thr -> th.th_team -> t.t_serialized; - - /* TODO the below shouldn't need to be adjusted for serialized teams */ - this_thr -> th.th_dispatch = & this_thr -> th.th_team -> - t.t_dispatch[ serial_team -> t.t_master_tid ]; - - __kmp_pop_current_task_from_thread( this_thr ); - - KMP_ASSERT( this_thr -> th.th_current_task -> td_flags.executing == 0 ); - this_thr -> th.th_current_task -> td_flags.executing = 1; - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - // Copy the task team from the new child / old parent team to the thread. - this_thr->th.th_task_team = this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]; - KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d restoring task_team %p / team %p\n", - global_tid, this_thr -> th.th_task_team, this_thr -> th.th_team ) ); - } - } else { - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KA_TRACE( 20, ( "__kmpc_end_serialized_parallel: T#%d decreasing nesting depth of serial team %p to %d\n", - global_tid, serial_team, serial_team -> t.t_serialized ) ); - } - } - - if ( __kmp_env_consistency_check ) - __kmp_pop_parallel( global_tid, NULL ); -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information. - -Execute flush. This is implemented as a full memory fence. (Though -depending on the memory ordering convention obeyed by the compiler -even that may not be necessary). -*/ -void -__kmpc_flush(ident_t *loc) -{ - KC_TRACE( 10, ("__kmpc_flush: called\n" ) ); - - /* need explicit __mf() here since use volatile instead in library */ - KMP_MB(); /* Flush all pending memory write invalidates. */ - - #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) - #if KMP_MIC - // fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used. - // We shouldn't need it, though, since the ABI rules require that - // * If the compiler generates NGO stores it also generates the fence - // * If users hand-code NGO stores they should insert the fence - // therefore no incomplete unordered stores should be visible. - #else - // C74404 - // This is to address non-temporal store instructions (sfence needed). - // The clflush instruction is addressed either (mfence needed). - // Probably the non-temporal load monvtdqa instruction should also be addressed. - // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2. - if ( ! __kmp_cpuinfo.initialized ) { - __kmp_query_cpuid( & __kmp_cpuinfo ); - }; // if - if ( ! __kmp_cpuinfo.sse2 ) { - // CPU cannot execute SSE2 instructions. - } else { - #if KMP_COMPILER_ICC - _mm_mfence(); - #elif KMP_COMPILER_MSVC - MemoryBarrier(); - #else - __sync_synchronize(); - #endif // KMP_COMPILER_ICC - }; // if - #endif // KMP_MIC - #elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || KMP_ARCH_MIPS64) - // Nothing to see here move along - #elif KMP_ARCH_PPC64 - // Nothing needed here (we have a real MB above). - #if KMP_OS_CNK - // The flushing thread needs to yield here; this prevents a - // busy-waiting thread from saturating the pipeline. flush is - // often used in loops like this: - // while (!flag) { - // #pragma omp flush(flag) - // } - // and adding the yield here is good for at least a 10x speedup - // when running >2 threads per core (on the NAS LU benchmark). - __kmp_yield(TRUE); - #endif - #else - #error Unknown or unsupported architecture - #endif - -} - -/* -------------------------------------------------------------------------- */ - -/* -------------------------------------------------------------------------- */ - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid thread id. - -Execute a barrier. -*/ -void -__kmpc_barrier(ident_t *loc, kmp_int32 global_tid) -{ - KMP_COUNT_BLOCK(OMP_BARRIER); - KC_TRACE( 10, ("__kmpc_barrier: called T#%d\n", global_tid ) ); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - - if ( __kmp_env_consistency_check ) { - if ( loc == 0 ) { - KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? - }; // if - - __kmp_check_barrier( global_tid, ct_barrier, loc ); - } - -#if OMPT_SUPPORT && OMPT_TRACE - ompt_frame_t * ompt_frame; - if (ompt_enabled ) { - ompt_frame = __ompt_get_task_frame_internal(0); - if ( ompt_frame->reenter_runtime_frame == NULL ) - ompt_frame->reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - __kmp_threads[ global_tid ]->th.th_ident = loc; - // TODO: explicit barrier_wait_id: - // this function is called when 'barrier' directive is present or - // implicit barrier at the end of a worksharing construct. - // 1) better to add a per-thread barrier counter to a thread data structure - // 2) set to 0 when a new team is created - // 4) no sync is required - - __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled ) { - ompt_frame->reenter_runtime_frame = NULL; - } -#endif -} - -/* The BARRIER for a MASTER section is always explicit */ -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param global_tid global thread number . -@return 1 if this thread should execute the master block, 0 otherwise. -*/ -kmp_int32 -__kmpc_master(ident_t *loc, kmp_int32 global_tid) -{ - int status = 0; - - KC_TRACE( 10, ("__kmpc_master: called T#%d\n", global_tid ) ); - - if( ! TCR_4( __kmp_init_parallel ) ) - __kmp_parallel_initialize(); - - if( KMP_MASTER_GTID( global_tid )) { - KMP_COUNT_BLOCK(OMP_MASTER); - KMP_PUSH_PARTITIONED_TIMER(OMP_master); - status = 1; - } - -#if OMPT_SUPPORT && OMPT_TRACE - if (status) { - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_master_begin)) { - kmp_info_t *this_thr = __kmp_threads[ global_tid ]; - kmp_team_t *team = this_thr -> th.th_team; - - int tid = __kmp_tid_from_gtid( global_tid ); - ompt_callbacks.ompt_callback(ompt_event_master_begin)( - team->t.ompt_team_info.parallel_id, - team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); - } - } -#endif - - if ( __kmp_env_consistency_check ) { -#if KMP_USE_DYNAMIC_LOCK - if (status) - __kmp_push_sync( global_tid, ct_master, loc, NULL, 0 ); - else - __kmp_check_sync( global_tid, ct_master, loc, NULL, 0 ); -#else - if (status) - __kmp_push_sync( global_tid, ct_master, loc, NULL ); - else - __kmp_check_sync( global_tid, ct_master, loc, NULL ); -#endif - } - - return status; -} - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param global_tid global thread number . - -Mark the end of a master region. This should only be called by the thread -that executes the master region. -*/ -void -__kmpc_end_master(ident_t *loc, kmp_int32 global_tid) -{ - KC_TRACE( 10, ("__kmpc_end_master: called T#%d\n", global_tid ) ); - - KMP_DEBUG_ASSERT( KMP_MASTER_GTID( global_tid )); - KMP_POP_PARTITIONED_TIMER(); - -#if OMPT_SUPPORT && OMPT_TRACE - kmp_info_t *this_thr = __kmp_threads[ global_tid ]; - kmp_team_t *team = this_thr -> th.th_team; - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_master_end)) { - int tid = __kmp_tid_from_gtid( global_tid ); - ompt_callbacks.ompt_callback(ompt_event_master_end)( - team->t.ompt_team_info.parallel_id, - team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); - } -#endif - - if ( __kmp_env_consistency_check ) { - if( global_tid < 0 ) - KMP_WARNING( ThreadIdentInvalid ); - - if( KMP_MASTER_GTID( global_tid )) - __kmp_pop_sync( global_tid, ct_master, loc ); - } -} - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param gtid global thread number. - -Start execution of an ordered construct. -*/ -void -__kmpc_ordered( ident_t * loc, kmp_int32 gtid ) -{ - int cid = 0; - kmp_info_t *th; - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - KC_TRACE( 10, ("__kmpc_ordered: called T#%d\n", gtid )); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - -#if USE_ITT_BUILD - __kmp_itt_ordered_prep( gtid ); - // TODO: ordered_wait_id -#endif /* USE_ITT_BUILD */ - - th = __kmp_threads[ gtid ]; - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - /* OMPT state update */ - th->th.ompt_thread_info.wait_id = (uint64_t) loc; - th->th.ompt_thread_info.state = ompt_state_wait_ordered; - - /* OMPT event callback */ - if (ompt_callbacks.ompt_callback(ompt_event_wait_ordered)) { - ompt_callbacks.ompt_callback(ompt_event_wait_ordered)( - th->th.ompt_thread_info.wait_id); - } - } -#endif - - if ( th -> th.th_dispatch -> th_deo_fcn != 0 ) - (*th->th.th_dispatch->th_deo_fcn)( & gtid, & cid, loc ); - else - __kmp_parallel_deo( & gtid, & cid, loc ); - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - /* OMPT state update */ - th->th.ompt_thread_info.state = ompt_state_work_parallel; - th->th.ompt_thread_info.wait_id = 0; - - /* OMPT event callback */ - if (ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)) { - ompt_callbacks.ompt_callback(ompt_event_acquired_ordered)( - th->th.ompt_thread_info.wait_id); - } - } -#endif - -#if USE_ITT_BUILD - __kmp_itt_ordered_start( gtid ); -#endif /* USE_ITT_BUILD */ -} - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param gtid global thread number. - -End execution of an ordered construct. -*/ -void -__kmpc_end_ordered( ident_t * loc, kmp_int32 gtid ) -{ - int cid = 0; - kmp_info_t *th; - - KC_TRACE( 10, ("__kmpc_end_ordered: called T#%d\n", gtid ) ); - -#if USE_ITT_BUILD - __kmp_itt_ordered_end( gtid ); - // TODO: ordered_wait_id -#endif /* USE_ITT_BUILD */ - - th = __kmp_threads[ gtid ]; - - if ( th -> th.th_dispatch -> th_dxo_fcn != 0 ) - (*th->th.th_dispatch->th_dxo_fcn)( & gtid, & cid, loc ); - else - __kmp_parallel_dxo( & gtid, & cid, loc ); - -#if OMPT_SUPPORT && OMPT_BLAME - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { - ompt_callbacks.ompt_callback(ompt_event_release_ordered)( - th->th.ompt_thread_info.wait_id); - } -#endif -} - -#if KMP_USE_DYNAMIC_LOCK - -static __forceinline void -__kmp_init_indirect_csptr(kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid, kmp_indirect_locktag_t tag) -{ - // Pointer to the allocated indirect lock is written to crit, while indexing is ignored. - void *idx; - kmp_indirect_lock_t **lck; - lck = (kmp_indirect_lock_t **)crit; - kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag); - KMP_I_LOCK_FUNC(ilk, init)(ilk->lock); - KMP_SET_I_LOCK_LOCATION(ilk, loc); - KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section); - KA_TRACE(20, ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag)); -#if USE_ITT_BUILD - __kmp_itt_critical_creating(ilk->lock, loc); -#endif - int status = KMP_COMPARE_AND_STORE_PTR(lck, 0, ilk); - if (status == 0) { -#if USE_ITT_BUILD - __kmp_itt_critical_destroyed(ilk->lock); -#endif - // We don't really need to destroy the unclaimed lock here since it will be cleaned up at program exit. - //KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx); - } - KMP_DEBUG_ASSERT(*lck != NULL); -} - -// Fast-path acquire tas lock -#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) { \ - kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ - if (l->lk.poll != KMP_LOCK_FREE(tas) || \ - ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ - kmp_uint32 spins; \ - KMP_FSYNC_PREPARE(l); \ - KMP_INIT_YIELD(spins); \ - if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ - KMP_YIELD(TRUE); \ - } else { \ - KMP_YIELD_SPIN(spins); \ - } \ - kmp_backoff_t backoff = __kmp_spin_backoff_params; \ - while (l->lk.poll != KMP_LOCK_FREE(tas) || \ - ! KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas))) { \ - __kmp_spin_backoff(&backoff); \ - if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \ - KMP_YIELD(TRUE); \ - } else { \ - KMP_YIELD_SPIN(spins); \ - } \ - } \ - } \ - KMP_FSYNC_ACQUIRED(l); \ -} - -// Fast-path test tas lock -#define KMP_TEST_TAS_LOCK(lock, gtid, rc) { \ - kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \ - rc = l->lk.poll == KMP_LOCK_FREE(tas) && \ - KMP_COMPARE_AND_STORE_ACQ32(&(l->lk.poll), KMP_LOCK_FREE(tas), KMP_LOCK_BUSY(gtid+1, tas)); \ -} - -// Fast-path release tas lock -#define KMP_RELEASE_TAS_LOCK(lock, gtid) { \ - TCW_4(((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); \ - KMP_MB(); \ -} - -#if KMP_USE_FUTEX - -# include -# include -# ifndef FUTEX_WAIT -# define FUTEX_WAIT 0 -# endif -# ifndef FUTEX_WAKE -# define FUTEX_WAKE 1 -# endif - -// Fast-path acquire futex lock -#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) { \ - kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ - kmp_int32 gtid_code = (gtid+1) << 1; \ - KMP_MB(); \ - KMP_FSYNC_PREPARE(ftx); \ - kmp_int32 poll_val; \ - while ((poll_val = KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \ - KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \ - kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \ - if (!cond) { \ - if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, poll_val | KMP_LOCK_BUSY(1, futex))) { \ - continue; \ - } \ - poll_val |= KMP_LOCK_BUSY(1, futex); \ - } \ - kmp_int32 rc; \ - if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, NULL, NULL, 0)) != 0) { \ - continue; \ - } \ - gtid_code |= 1; \ - } \ - KMP_FSYNC_ACQUIRED(ftx); \ -} - -// Fast-path test futex lock -#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) { \ - kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ - if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), KMP_LOCK_BUSY(gtid+1 << 1, futex))) { \ - KMP_FSYNC_ACQUIRED(ftx); \ - rc = TRUE; \ - } else { \ - rc = FALSE; \ - } \ -} - -// Fast-path release futex lock -#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) { \ - kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \ - KMP_MB(); \ - KMP_FSYNC_RELEASING(ftx); \ - kmp_int32 poll_val = KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \ - if (KMP_LOCK_STRIP(poll_val) & 1) { \ - syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \ - } \ - KMP_MB(); \ - KMP_YIELD(TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \ -} - -#endif // KMP_USE_FUTEX - -#else // KMP_USE_DYNAMIC_LOCK - -static kmp_user_lock_p -__kmp_get_critical_section_ptr( kmp_critical_name * crit, ident_t const * loc, kmp_int32 gtid ) -{ - kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit; - - // - // Because of the double-check, the following load - // doesn't need to be volatile. - // - kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); - - if ( lck == NULL ) { - void * idx; - - // Allocate & initialize the lock. - // Remember allocated locks in table in order to free them in __kmp_cleanup() - lck = __kmp_user_lock_allocate( &idx, gtid, kmp_lf_critical_section ); - __kmp_init_user_lock_with_checks( lck ); - __kmp_set_user_lock_location( lck, loc ); -#if USE_ITT_BUILD - __kmp_itt_critical_creating( lck ); - // __kmp_itt_critical_creating() should be called *before* the first usage of underlying - // lock. It is the only place where we can guarantee it. There are chances the lock will - // destroyed with no usage, but it is not a problem, because this is not real event seen - // by user but rather setting name for object (lock). See more details in kmp_itt.h. -#endif /* USE_ITT_BUILD */ - - // - // Use a cmpxchg instruction to slam the start of the critical - // section with the lock pointer. If another thread beat us - // to it, deallocate the lock, and use the lock that the other - // thread allocated. - // - int status = KMP_COMPARE_AND_STORE_PTR( lck_pp, 0, lck ); - - if ( status == 0 ) { - // Deallocate the lock and reload the value. -#if USE_ITT_BUILD - __kmp_itt_critical_destroyed( lck ); - // Let ITT know the lock is destroyed and the same memory location may be reused for - // another purpose. -#endif /* USE_ITT_BUILD */ - __kmp_destroy_user_lock_with_checks( lck ); - __kmp_user_lock_free( &idx, gtid, lck ); - lck = (kmp_user_lock_p)TCR_PTR( *lck_pp ); - KMP_DEBUG_ASSERT( lck != NULL ); - } - } - return lck; -} - -#endif // KMP_USE_DYNAMIC_LOCK - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param global_tid global thread number . -@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or -some other suitably unique value. - -Enter code protected by a `critical` construct. -This function blocks until the executing thread can enter the critical section. -*/ -void -__kmpc_critical( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) -{ -#if KMP_USE_DYNAMIC_LOCK - __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none); -#else - KMP_COUNT_BLOCK(OMP_CRITICAL); - KMP_TIME_PARTITIONED_BLOCK(OMP_critical_wait); /* Time spent waiting to enter the critical section */ - kmp_user_lock_p lck; - - KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); - - //TODO: add THR_OVHD_STATE - - KMP_CHECK_USER_LOCK_INIT(); - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { - lck = (kmp_user_lock_p)crit; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { - lck = (kmp_user_lock_p)crit; - } -#endif - else { // ticket, queuing or drdpa - lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); - } - - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_critical, loc, lck ); - - /* since the critical directive binds to all threads, not just - * the current team we have to check this even if we are in a - * serialized team */ - /* also, even if we are the uber thread, we still have to conduct the lock, - * as we have to contend with sibling threads */ - -#if USE_ITT_BUILD - __kmp_itt_critical_acquiring( lck ); -#endif /* USE_ITT_BUILD */ - // Value of 'crit' should be good for using as a critical_id of the critical section directive. - __kmp_acquire_user_lock_with_checks( lck, global_tid ); - -#if USE_ITT_BUILD - __kmp_itt_critical_acquired( lck ); -#endif /* USE_ITT_BUILD */ - - KMP_START_EXPLICIT_TIMER(OMP_critical); - KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); -#endif // KMP_USE_DYNAMIC_LOCK -} - -#if KMP_USE_DYNAMIC_LOCK - -// Converts the given hint to an internal lock implementation -static __forceinline kmp_dyna_lockseq_t -__kmp_map_hint_to_lock(uintptr_t hint) -{ -#if KMP_USE_TSX -# define KMP_TSX_LOCK(seq) lockseq_##seq -#else -# define KMP_TSX_LOCK(seq) __kmp_user_lock_seq -#endif - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -# define KMP_CPUINFO_RTM (__kmp_cpuinfo.rtm) -#else -# define KMP_CPUINFO_RTM 0 -#endif - - // Hints that do not require further logic - if (hint & kmp_lock_hint_hle) - return KMP_TSX_LOCK(hle); - if (hint & kmp_lock_hint_rtm) - return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(rtm): __kmp_user_lock_seq; - if (hint & kmp_lock_hint_adaptive) - return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(adaptive): __kmp_user_lock_seq; - - // Rule out conflicting hints first by returning the default lock - if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended)) - return __kmp_user_lock_seq; - if ((hint & omp_lock_hint_speculative) && (hint & omp_lock_hint_nonspeculative)) - return __kmp_user_lock_seq; - - // Do not even consider speculation when it appears to be contended - if (hint & omp_lock_hint_contended) - return lockseq_queuing; - - // Uncontended lock without speculation - if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative)) - return lockseq_tas; - - // HLE lock for speculation - if (hint & omp_lock_hint_speculative) - return KMP_TSX_LOCK(hle); - - return __kmp_user_lock_seq; -} - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param global_tid global thread number. -@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, -or some other suitably unique value. -@param hint the lock hint. - -Enter code protected by a `critical` construct with a hint. The hint value is used to suggest a lock implementation. -This function blocks until the executing thread can enter the critical section unless the hint suggests use of -speculative execution and the hardware supports it. -*/ -void -__kmpc_critical_with_hint( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit, uintptr_t hint ) -{ - KMP_COUNT_BLOCK(OMP_CRITICAL); - kmp_user_lock_p lck; - - KC_TRACE( 10, ("__kmpc_critical: called T#%d\n", global_tid ) ); - - kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; - // Check if it is initialized. - if (*lk == 0) { - kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint); - if (KMP_IS_D_LOCK(lckseq)) { - KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(lckseq)); - } else { - __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq)); - } - } - // Branch for accessing the actual lock object and set operation. This branching is inevitable since - // this lock initialization does not follow the normal dispatch path (lock table is not used). - if (KMP_EXTRACT_D_TAG(lk) != 0) { - lck = (kmp_user_lock_p)lk; - if (__kmp_env_consistency_check) { - __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); - } -# if USE_ITT_BUILD - __kmp_itt_critical_acquiring(lck); -# endif -# if KMP_USE_INLINED_TAS - if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { - KMP_ACQUIRE_TAS_LOCK(lck, global_tid); - } else -# elif KMP_USE_INLINED_FUTEX - if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { - KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid); - } else -# endif - { - KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); - } - } else { - kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); - lck = ilk->lock; - if (__kmp_env_consistency_check) { - __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_map_hint_to_lock(hint)); - } -# if USE_ITT_BUILD - __kmp_itt_critical_acquiring(lck); -# endif - KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); - } - -#if USE_ITT_BUILD - __kmp_itt_critical_acquired( lck ); -#endif /* USE_ITT_BUILD */ - - KMP_PUSH_PARTITIONED_TIMER(OMP_critical); - KA_TRACE( 15, ("__kmpc_critical: done T#%d\n", global_tid )); -} // __kmpc_critical_with_hint - -#endif // KMP_USE_DYNAMIC_LOCK - -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param global_tid global thread number . -@param crit identity of the critical section. This could be a pointer to a lock associated with the critical section, or -some other suitably unique value. - -Leave a critical section, releasing any lock that was held during its execution. -*/ -void -__kmpc_end_critical(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *crit) -{ - kmp_user_lock_p lck; - - KC_TRACE( 10, ("__kmpc_end_critical: called T#%d\n", global_tid )); - -#if KMP_USE_DYNAMIC_LOCK - if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { - lck = (kmp_user_lock_p)crit; - KMP_ASSERT(lck != NULL); - if (__kmp_env_consistency_check) { - __kmp_pop_sync(global_tid, ct_critical, loc); - } -# if USE_ITT_BUILD - __kmp_itt_critical_releasing( lck ); -# endif -# if KMP_USE_INLINED_TAS - if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) { - KMP_RELEASE_TAS_LOCK(lck, global_tid); - } else -# elif KMP_USE_INLINED_FUTEX - if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) { - KMP_RELEASE_FUTEX_LOCK(lck, global_tid); - } else -# endif - { - KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); - } - } else { - kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); - KMP_ASSERT(ilk != NULL); - lck = ilk->lock; - if (__kmp_env_consistency_check) { - __kmp_pop_sync(global_tid, ct_critical, loc); - } -# if USE_ITT_BUILD - __kmp_itt_critical_releasing( lck ); -# endif - KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid); - } - -#else // KMP_USE_DYNAMIC_LOCK - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { - lck = (kmp_user_lock_p)crit; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_CRITICAL_SIZE ) ) { - lck = (kmp_user_lock_p)crit; - } -#endif - else { // ticket, queuing or drdpa - lck = (kmp_user_lock_p) TCR_PTR(*((kmp_user_lock_p *)crit)); - } - - KMP_ASSERT(lck != NULL); - - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( global_tid, ct_critical, loc ); - -#if USE_ITT_BUILD - __kmp_itt_critical_releasing( lck ); -#endif /* USE_ITT_BUILD */ - // Value of 'crit' should be good for using as a critical_id of the critical section directive. - __kmp_release_user_lock_with_checks( lck, global_tid ); - -#if OMPT_SUPPORT && OMPT_BLAME - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_release_critical)) { - ompt_callbacks.ompt_callback(ompt_event_release_critical)( - (uint64_t) lck); - } -#endif - -#endif // KMP_USE_DYNAMIC_LOCK - KMP_POP_PARTITIONED_TIMER(); - KA_TRACE( 15, ("__kmpc_end_critical: done T#%d\n", global_tid )); -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid thread id. -@return one if the thread should execute the master block, zero otherwise - -Start execution of a combined barrier and master. The barrier is executed inside this function. -*/ -kmp_int32 -__kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) -{ - int status; - - KC_TRACE( 10, ("__kmpc_barrier_master: called T#%d\n", global_tid ) ); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - - if ( __kmp_env_consistency_check ) - __kmp_check_barrier( global_tid, ct_barrier, loc ); - -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - status = __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL ); - - return (status != 0) ? 0 : 1; -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid thread id. - -Complete the execution of a combined barrier and master. This function should -only be called at the completion of the master code. Other threads will -still be waiting at the barrier and this call releases them. -*/ -void -__kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) -{ - KC_TRACE( 10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid )); - - __kmp_end_split_barrier ( bs_plain_barrier, global_tid ); -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid thread id. -@return one if the thread should execute the master block, zero otherwise - -Start execution of a combined barrier and master(nowait) construct. -The barrier is executed inside this function. -There is no equivalent "end" function, since the -*/ -kmp_int32 -__kmpc_barrier_master_nowait( ident_t * loc, kmp_int32 global_tid ) -{ - kmp_int32 ret; - - KC_TRACE( 10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid )); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - - if ( __kmp_env_consistency_check ) { - if ( loc == 0 ) { - KMP_WARNING( ConstructIdentInvalid ); // ??? What does it mean for the user? - } - __kmp_check_barrier( global_tid, ct_barrier, loc ); - } - -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); - - ret = __kmpc_master (loc, global_tid); - - if ( __kmp_env_consistency_check ) { - /* there's no __kmpc_end_master called; so the (stats) */ - /* actions of __kmpc_end_master are done here */ - - if ( global_tid < 0 ) { - KMP_WARNING( ThreadIdentInvalid ); - } - if (ret) { - /* only one thread should do the pop since only */ - /* one did the push (see __kmpc_master()) */ - - __kmp_pop_sync( global_tid, ct_master, loc ); - } - } - - return (ret); -} - -/* The BARRIER for a SINGLE process section is always explicit */ -/*! -@ingroup WORK_SHARING -@param loc source location information -@param global_tid global thread number -@return One if this thread should execute the single construct, zero otherwise. - -Test whether to execute a single construct. -There are no implicit barriers in the two "single" calls, rather the compiler should -introduce an explicit barrier if it is required. -*/ - -kmp_int32 -__kmpc_single(ident_t *loc, kmp_int32 global_tid) -{ - kmp_int32 rc = __kmp_enter_single( global_tid, loc, TRUE ); - - if (rc) { - // We are going to execute the single statement, so we should count it. - KMP_COUNT_BLOCK(OMP_SINGLE); - KMP_PUSH_PARTITIONED_TIMER(OMP_single); - } - -#if OMPT_SUPPORT && OMPT_TRACE - kmp_info_t *this_thr = __kmp_threads[ global_tid ]; - kmp_team_t *team = this_thr -> th.th_team; - int tid = __kmp_tid_from_gtid( global_tid ); - - if (ompt_enabled) { - if (rc) { - if (ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)) { - ompt_callbacks.ompt_callback(ompt_event_single_in_block_begin)( - team->t.ompt_team_info.parallel_id, - team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id, - team->t.ompt_team_info.microtask); - } - } else { - if (ompt_callbacks.ompt_callback(ompt_event_single_others_begin)) { - ompt_callbacks.ompt_callback(ompt_event_single_others_begin)( - team->t.ompt_team_info.parallel_id, - team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); - } - this_thr->th.ompt_thread_info.state = ompt_state_wait_single; - } - } -#endif - - return rc; -} - -/*! -@ingroup WORK_SHARING -@param loc source location information -@param global_tid global thread number - -Mark the end of a single construct. This function should -only be called by the thread that executed the block of code protected -by the `single` construct. -*/ -void -__kmpc_end_single(ident_t *loc, kmp_int32 global_tid) -{ - __kmp_exit_single( global_tid ); - KMP_POP_PARTITIONED_TIMER(); - -#if OMPT_SUPPORT && OMPT_TRACE - kmp_info_t *this_thr = __kmp_threads[ global_tid ]; - kmp_team_t *team = this_thr -> th.th_team; - int tid = __kmp_tid_from_gtid( global_tid ); - - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)) { - ompt_callbacks.ompt_callback(ompt_event_single_in_block_end)( - team->t.ompt_team_info.parallel_id, - team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id); - } -#endif -} - -/*! -@ingroup WORK_SHARING -@param loc Source location -@param global_tid Global thread id - -Mark the end of a statically scheduled loop. -*/ -void -__kmpc_for_static_fini( ident_t *loc, kmp_int32 global_tid ) -{ - KE_TRACE( 10, ("__kmpc_for_static_fini called T#%d\n", global_tid)); - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_loop_end)) { - ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - ompt_callbacks.ompt_callback(ompt_event_loop_end)( - team_info->parallel_id, task_info->task_id); - } -#endif - - if ( __kmp_env_consistency_check ) - __kmp_pop_workshare( global_tid, ct_pdo, loc ); -} - -/* - * User routines which take C-style arguments (call by value) - * different from the Fortran equivalent routines - */ - -void -ompc_set_num_threads( int arg ) -{ -// !!!!! TODO: check the per-task binding - __kmp_set_num_threads( arg, __kmp_entry_gtid() ); -} - -void -ompc_set_dynamic( int flag ) -{ - kmp_info_t *thread; - - /* For the thread-private implementation of the internal controls */ - thread = __kmp_entry_thread(); - - __kmp_save_internal_controls( thread ); - - set__dynamic( thread, flag ? TRUE : FALSE ); -} - -void -ompc_set_nested( int flag ) -{ - kmp_info_t *thread; - - /* For the thread-private internal controls implementation */ - thread = __kmp_entry_thread(); - - __kmp_save_internal_controls( thread ); - - set__nested( thread, flag ? TRUE : FALSE ); -} - -void -ompc_set_max_active_levels( int max_active_levels ) -{ - /* TO DO */ - /* we want per-task implementation of this internal control */ - - /* For the per-thread internal controls implementation */ - __kmp_set_max_active_levels( __kmp_entry_gtid(), max_active_levels ); -} - -void -ompc_set_schedule( omp_sched_t kind, int modifier ) -{ -// !!!!! TODO: check the per-task binding - __kmp_set_schedule( __kmp_entry_gtid(), ( kmp_sched_t ) kind, modifier ); -} - -int -ompc_get_ancestor_thread_num( int level ) -{ - return __kmp_get_ancestor_thread_num( __kmp_entry_gtid(), level ); -} - -int -ompc_get_team_size( int level ) -{ - return __kmp_get_team_size( __kmp_entry_gtid(), level ); -} - -void -kmpc_set_stacksize( int arg ) -{ - // __kmp_aux_set_stacksize initializes the library if needed - __kmp_aux_set_stacksize( arg ); -} - -void -kmpc_set_stacksize_s( size_t arg ) -{ - // __kmp_aux_set_stacksize initializes the library if needed - __kmp_aux_set_stacksize( arg ); -} - -void -kmpc_set_blocktime( int arg ) -{ - int gtid, tid; - kmp_info_t *thread; - - gtid = __kmp_entry_gtid(); - tid = __kmp_tid_from_gtid(gtid); - thread = __kmp_thread_from_gtid(gtid); - - __kmp_aux_set_blocktime( arg, thread, tid ); -} - -void -kmpc_set_library( int arg ) -{ - // __kmp_user_set_library initializes the library if needed - __kmp_user_set_library( (enum library_type)arg ); -} - -void -kmpc_set_defaults( char const * str ) -{ - // __kmp_aux_set_defaults initializes the library if needed - __kmp_aux_set_defaults( str, KMP_STRLEN( str ) ); -} - -void -kmpc_set_disp_num_buffers( int arg ) -{ - // ignore after initialization because some teams have already - // allocated dispatch buffers - if( __kmp_init_serial == 0 && arg > 0 ) - __kmp_dispatch_num_buffers = arg; -} - -int -kmpc_set_affinity_mask_proc( int proc, void **mask ) -{ -#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED - return -1; -#else - if ( ! TCR_4(__kmp_init_middle) ) { - __kmp_middle_initialize(); - } - return __kmp_aux_set_affinity_mask_proc( proc, mask ); -#endif -} - -int -kmpc_unset_affinity_mask_proc( int proc, void **mask ) -{ -#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED - return -1; -#else - if ( ! TCR_4(__kmp_init_middle) ) { - __kmp_middle_initialize(); - } - return __kmp_aux_unset_affinity_mask_proc( proc, mask ); -#endif -} - -int -kmpc_get_affinity_mask_proc( int proc, void **mask ) -{ -#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED - return -1; -#else - if ( ! TCR_4(__kmp_init_middle) ) { - __kmp_middle_initialize(); - } - return __kmp_aux_get_affinity_mask_proc( proc, mask ); -#endif -} - - -/* -------------------------------------------------------------------------- */ -/*! -@ingroup THREADPRIVATE -@param loc source location information -@param gtid global thread number -@param cpy_size size of the cpy_data buffer -@param cpy_data pointer to data to be copied -@param cpy_func helper function to call for copying data -@param didit flag variable: 1=single thread; 0=not single thread - -__kmpc_copyprivate implements the interface for the private data broadcast needed for -the copyprivate clause associated with a single region in an OpenMP* program (both C and Fortran). -All threads participating in the parallel region call this routine. -One of the threads (called the single thread) should have the didit variable set to 1 -and all other threads should have that variable set to 0. -All threads pass a pointer to a data buffer (cpy_data) that they have built. - -The OpenMP specification forbids the use of nowait on the single region when a copyprivate -clause is present. However, @ref __kmpc_copyprivate implements a barrier internally to avoid -race conditions, so the code generation for the single region should avoid generating a barrier -after the call to @ref __kmpc_copyprivate. - -The gtid parameter is the global thread id for the current thread. -The loc parameter is a pointer to source location information. - -Internal implementation: The single thread will first copy its descriptor address (cpy_data) -to a team-private location, then the other threads will each call the function pointed to by -the parameter cpy_func, which carries out the copy by copying the data using the cpy_data buffer. - -The cpy_func routine used for the copy and the contents of the data area defined by cpy_data -and cpy_size may be built in any fashion that will allow the copy to be done. For instance, -the cpy_data buffer can hold the actual data to be copied or it may hold a list of pointers -to the data. The cpy_func routine must interpret the cpy_data buffer appropriately. - -The interface to cpy_func is as follows: -@code -void cpy_func( void *destination, void *source ) -@endcode -where void *destination is the cpy_data pointer for the thread being copied to -and void *source is the cpy_data pointer for the thread being copied from. -*/ -void -__kmpc_copyprivate( ident_t *loc, kmp_int32 gtid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void*,void*), kmp_int32 didit ) -{ - void **data_ptr; - - KC_TRACE( 10, ("__kmpc_copyprivate: called T#%d\n", gtid )); - - KMP_MB(); - - data_ptr = & __kmp_team_from_gtid( gtid )->t.t_copypriv_data; - - if ( __kmp_env_consistency_check ) { - if ( loc == 0 ) { - KMP_WARNING( ConstructIdentInvalid ); - } - } - - /* ToDo: Optimize the following two barriers into some kind of split barrier */ - - if (didit) *data_ptr = cpy_data; - - /* This barrier is not a barrier region boundary */ -#if USE_ITT_NOTIFY - __kmp_threads[gtid]->th.th_ident = loc; -#endif - __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); - - if (! didit) (*cpy_func)( cpy_data, *data_ptr ); - - /* Consider next barrier the user-visible barrier for barrier region boundaries */ - /* Nesting checks are already handled by the single construct checks */ - -#if USE_ITT_NOTIFY - __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g. tasks can overwrite the location) -#endif - __kmp_barrier( bs_plain_barrier, gtid, FALSE , 0, NULL, NULL ); -} - -/* -------------------------------------------------------------------------- */ - -#define INIT_LOCK __kmp_init_user_lock_with_checks -#define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks -#define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks -#define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed -#define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks -#define ACQUIRE_NESTED_LOCK_TIMED __kmp_acquire_nested_user_lock_with_checks_timed -#define RELEASE_LOCK __kmp_release_user_lock_with_checks -#define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks -#define TEST_LOCK __kmp_test_user_lock_with_checks -#define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks -#define DESTROY_LOCK __kmp_destroy_user_lock_with_checks -#define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks - - -/* - * TODO: Make check abort messages use location info & pass it - * into with_checks routines - */ - -#if KMP_USE_DYNAMIC_LOCK - -// internal lock initializer -static __forceinline void -__kmp_init_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) -{ - if (KMP_IS_D_LOCK(seq)) { - KMP_INIT_D_LOCK(lock, seq); -#if USE_ITT_BUILD - __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL); -#endif - } else { - KMP_INIT_I_LOCK(lock, seq); -#if USE_ITT_BUILD - kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); - __kmp_itt_lock_creating(ilk->lock, loc); -#endif - } -} - -// internal nest lock initializer -static __forceinline void -__kmp_init_nest_lock_with_hint(ident_t *loc, void **lock, kmp_dyna_lockseq_t seq) -{ -#if KMP_USE_TSX - // Don't have nested lock implementation for speculative locks - if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive) - seq = __kmp_user_lock_seq; -#endif - switch (seq) { - case lockseq_tas: - seq = lockseq_nested_tas; - break; -#if KMP_USE_FUTEX - case lockseq_futex: - seq = lockseq_nested_futex; - break; -#endif - case lockseq_ticket: - seq = lockseq_nested_ticket; - break; - case lockseq_queuing: - seq = lockseq_nested_queuing; - break; - case lockseq_drdpa: - seq = lockseq_nested_drdpa; - break; - default: - seq = lockseq_nested_queuing; - } - KMP_INIT_I_LOCK(lock, seq); -#if USE_ITT_BUILD - kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock); - __kmp_itt_lock_creating(ilk->lock, loc); -#endif -} - -/* initialize the lock with a hint */ -void -__kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) -{ - KMP_DEBUG_ASSERT(__kmp_init_serial); - if (__kmp_env_consistency_check && user_lock == NULL) { - KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint"); - } - - __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); -} - -/* initialize the lock with a hint */ -void -__kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint) -{ - KMP_DEBUG_ASSERT(__kmp_init_serial); - if (__kmp_env_consistency_check && user_lock == NULL) { - KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint"); - } - - __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint)); -} - -#endif // KMP_USE_DYNAMIC_LOCK - -/* initialize the lock */ -void -__kmpc_init_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { -#if KMP_USE_DYNAMIC_LOCK - KMP_DEBUG_ASSERT(__kmp_init_serial); - if (__kmp_env_consistency_check && user_lock == NULL) { - KMP_FATAL(LockIsUninitialized, "omp_init_lock"); - } - __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); - -#else // KMP_USE_DYNAMIC_LOCK - - static char const * const func = "omp_init_lock"; - kmp_user_lock_p lck; - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - if ( __kmp_env_consistency_check ) { - if ( user_lock == NULL ) { - KMP_FATAL( LockIsUninitialized, func ); - } - } - - KMP_CHECK_USER_LOCK_INIT(); - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); - } - INIT_LOCK( lck ); - __kmp_set_user_lock_location( lck, loc ); - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_init_lock)) { - ompt_callbacks.ompt_callback(ompt_event_init_lock)((uint64_t) lck); - } -#endif - -#if USE_ITT_BUILD - __kmp_itt_lock_creating( lck ); -#endif /* USE_ITT_BUILD */ - -#endif // KMP_USE_DYNAMIC_LOCK -} // __kmpc_init_lock - -/* initialize the lock */ -void -__kmpc_init_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { -#if KMP_USE_DYNAMIC_LOCK - - KMP_DEBUG_ASSERT(__kmp_init_serial); - if (__kmp_env_consistency_check && user_lock == NULL) { - KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock"); - } - __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq); - -#else // KMP_USE_DYNAMIC_LOCK - - static char const * const func = "omp_init_nest_lock"; - kmp_user_lock_p lck; - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - if ( __kmp_env_consistency_check ) { - if ( user_lock == NULL ) { - KMP_FATAL( LockIsUninitialized, func ); - } - } - - KMP_CHECK_USER_LOCK_INIT(); - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_user_lock_allocate( user_lock, gtid, 0 ); - } - - INIT_NESTED_LOCK( lck ); - __kmp_set_user_lock_location( lck, loc ); - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)) { - ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)((uint64_t) lck); - } -#endif - -#if USE_ITT_BUILD - __kmp_itt_lock_creating( lck ); -#endif /* USE_ITT_BUILD */ - -#endif // KMP_USE_DYNAMIC_LOCK -} // __kmpc_init_nest_lock - -void -__kmpc_destroy_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { -#if KMP_USE_DYNAMIC_LOCK - -# if USE_ITT_BUILD - kmp_user_lock_p lck; - if (KMP_EXTRACT_D_TAG(user_lock) == 0) { - lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock; - } else { - lck = (kmp_user_lock_p)user_lock; - } - __kmp_itt_lock_destroyed(lck); -# endif - KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); -#else - kmp_user_lock_p lck; - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); - } - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_destroy_lock)) { - ompt_callbacks.ompt_callback(ompt_event_destroy_lock)((uint64_t) lck); - } -#endif - -#if USE_ITT_BUILD - __kmp_itt_lock_destroyed( lck ); -#endif /* USE_ITT_BUILD */ - DESTROY_LOCK( lck ); - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - ; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - ; - } -#endif - else { - __kmp_user_lock_free( user_lock, gtid, lck ); - } -#endif // KMP_USE_DYNAMIC_LOCK -} // __kmpc_destroy_lock - -/* destroy the lock */ -void -__kmpc_destroy_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { -#if KMP_USE_DYNAMIC_LOCK - -# if USE_ITT_BUILD - kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock); - __kmp_itt_lock_destroyed(ilk->lock); -# endif - KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock); - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); - } - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)) { - ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)((uint64_t) lck); - } -#endif - -#if USE_ITT_BUILD - __kmp_itt_lock_destroyed( lck ); -#endif /* USE_ITT_BUILD */ - - DESTROY_NESTED_LOCK( lck ); - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { - ; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - ; - } -#endif - else { - __kmp_user_lock_free( user_lock, gtid, lck ); - } -#endif // KMP_USE_DYNAMIC_LOCK -} // __kmpc_destroy_nest_lock - -void -__kmpc_set_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { - KMP_COUNT_BLOCK(OMP_set_lock); -#if KMP_USE_DYNAMIC_LOCK - int tag = KMP_EXTRACT_D_TAG(user_lock); -# if USE_ITT_BUILD - __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); // itt function will get to the right lock object. -# endif -# if KMP_USE_INLINED_TAS - if (tag == locktag_tas && !__kmp_env_consistency_check) { - KMP_ACQUIRE_TAS_LOCK(user_lock, gtid); - } else -# elif KMP_USE_INLINED_FUTEX - if (tag == locktag_futex && !__kmp_env_consistency_check) { - KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid); - } else -# endif - { - __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid); - } -# if USE_ITT_BUILD - __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); -# endif - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_set_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_acquiring( lck ); -#endif /* USE_ITT_BUILD */ - - ACQUIRE_LOCK( lck, gtid ); - -#if USE_ITT_BUILD - __kmp_itt_lock_acquired( lck ); -#endif /* USE_ITT_BUILD */ - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_acquired_lock)) { - ompt_callbacks.ompt_callback(ompt_event_acquired_lock)((uint64_t) lck); - } -#endif - -#endif // KMP_USE_DYNAMIC_LOCK -} - -void -__kmpc_set_nest_lock( ident_t * loc, kmp_int32 gtid, void ** user_lock ) { -#if KMP_USE_DYNAMIC_LOCK - -# if USE_ITT_BUILD - __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); -# endif - KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid); -# if USE_ITT_BUILD - __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); -#endif - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - // missing support here: need to know whether acquired first or not - } -#endif - -#else // KMP_USE_DYNAMIC_LOCK - int acquire_status; - kmp_user_lock_p lck; - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_set_nest_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_acquiring( lck ); -#endif /* USE_ITT_BUILD */ - - ACQUIRE_NESTED_LOCK( lck, gtid, &acquire_status ); - -#if USE_ITT_BUILD - __kmp_itt_lock_acquired( lck ); -#endif /* USE_ITT_BUILD */ - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) { - if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)) - ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)((uint64_t) lck); - } else { - if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)) - ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)((uint64_t) lck); - } - } -#endif - -#endif // KMP_USE_DYNAMIC_LOCK -} - -void -__kmpc_unset_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) -{ -#if KMP_USE_DYNAMIC_LOCK - - int tag = KMP_EXTRACT_D_TAG(user_lock); -# if USE_ITT_BUILD - __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); -# endif -# if KMP_USE_INLINED_TAS - if (tag == locktag_tas && !__kmp_env_consistency_check) { - KMP_RELEASE_TAS_LOCK(user_lock, gtid); - } else -# elif KMP_USE_INLINED_FUTEX - if (tag == locktag_futex && !__kmp_env_consistency_check) { - KMP_RELEASE_FUTEX_LOCK(user_lock, gtid); - } else -# endif - { - __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid); - } - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - - /* Can't use serial interval since not block structured */ - /* release the lock */ - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { -#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) - // "fast" path implemented to fix customer performance issue -#if USE_ITT_BUILD - __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); -#endif /* USE_ITT_BUILD */ - TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0); - KMP_MB(); - return; -#else - lck = (kmp_user_lock_p)user_lock; -#endif - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_unset_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_releasing( lck ); -#endif /* USE_ITT_BUILD */ - - RELEASE_LOCK( lck, gtid ); - -#if OMPT_SUPPORT && OMPT_BLAME - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_release_lock)) { - ompt_callbacks.ompt_callback(ompt_event_release_lock)((uint64_t) lck); - } -#endif - -#endif // KMP_USE_DYNAMIC_LOCK -} - -/* release the lock */ -void -__kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) -{ -#if KMP_USE_DYNAMIC_LOCK - -# if USE_ITT_BUILD - __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock); -# endif - KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid); - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - - /* Can't use serial interval since not block structured */ - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { -#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) - // "fast" path implemented to fix customer performance issue - kmp_tas_lock_t *tl = (kmp_tas_lock_t*)user_lock; -#if USE_ITT_BUILD - __kmp_itt_lock_releasing( (kmp_user_lock_p)user_lock ); -#endif /* USE_ITT_BUILD */ - if ( --(tl->lk.depth_locked) == 0 ) { - TCW_4(tl->lk.poll, 0); - } - KMP_MB(); - return; -#else - lck = (kmp_user_lock_p)user_lock; -#endif - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_unset_nest_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_releasing( lck ); -#endif /* USE_ITT_BUILD */ - - int release_status; - release_status = RELEASE_NESTED_LOCK( lck, gtid ); -#if OMPT_SUPPORT && OMPT_BLAME - if (ompt_enabled) { - if (release_status == KMP_LOCK_RELEASED) { - if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)) { - ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_last)( - (uint64_t) lck); - } - } else if (ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)) { - ompt_callbacks.ompt_callback(ompt_event_release_nest_lock_prev)( - (uint64_t) lck); - } - } -#endif - -#endif // KMP_USE_DYNAMIC_LOCK -} - -/* try to acquire the lock */ -int -__kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) -{ - KMP_COUNT_BLOCK(OMP_test_lock); - -#if KMP_USE_DYNAMIC_LOCK - int rc; - int tag = KMP_EXTRACT_D_TAG(user_lock); -# if USE_ITT_BUILD - __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); -# endif -# if KMP_USE_INLINED_TAS - if (tag == locktag_tas && !__kmp_env_consistency_check) { - KMP_TEST_TAS_LOCK(user_lock, gtid, rc); - } else -# elif KMP_USE_INLINED_FUTEX - if (tag == locktag_futex && !__kmp_env_consistency_check) { - KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc); - } else -# endif - { - rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid); - } - if (rc) { -# if USE_ITT_BUILD - __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); -# endif - return FTN_TRUE; - } else { -# if USE_ITT_BUILD - __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); -# endif - return FTN_FALSE; - } - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - int rc; - - if ( ( __kmp_user_lock_kind == lk_tas ) - && ( sizeof( lck->tas.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) <= OMP_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_test_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_acquiring( lck ); -#endif /* USE_ITT_BUILD */ - - rc = TEST_LOCK( lck, gtid ); -#if USE_ITT_BUILD - if ( rc ) { - __kmp_itt_lock_acquired( lck ); - } else { - __kmp_itt_lock_cancelled( lck ); - } -#endif /* USE_ITT_BUILD */ - return ( rc ? FTN_TRUE : FTN_FALSE ); - - /* Can't use serial interval since not block structured */ - -#endif // KMP_USE_DYNAMIC_LOCK -} - -/* try to acquire the lock */ -int -__kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock ) -{ -#if KMP_USE_DYNAMIC_LOCK - int rc; -# if USE_ITT_BUILD - __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock); -# endif - rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid); -# if USE_ITT_BUILD - if (rc) { - __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock); - } else { - __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock); - } -# endif - return rc; - -#else // KMP_USE_DYNAMIC_LOCK - - kmp_user_lock_p lck; - int rc; - - if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) - + sizeof( lck->tas.lk.depth_locked ) <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#if KMP_USE_FUTEX - else if ( ( __kmp_user_lock_kind == lk_futex ) - && ( sizeof( lck->futex.lk.poll ) + sizeof( lck->futex.lk.depth_locked ) - <= OMP_NEST_LOCK_T_SIZE ) ) { - lck = (kmp_user_lock_p)user_lock; - } -#endif - else { - lck = __kmp_lookup_user_lock( user_lock, "omp_test_nest_lock" ); - } - -#if USE_ITT_BUILD - __kmp_itt_lock_acquiring( lck ); -#endif /* USE_ITT_BUILD */ - - rc = TEST_NESTED_LOCK( lck, gtid ); -#if USE_ITT_BUILD - if ( rc ) { - __kmp_itt_lock_acquired( lck ); - } else { - __kmp_itt_lock_cancelled( lck ); - } -#endif /* USE_ITT_BUILD */ - return rc; - - /* Can't use serial interval since not block structured */ - -#endif // KMP_USE_DYNAMIC_LOCK -} - - -/*--------------------------------------------------------------------------------------------------------------------*/ - -/* - * Interface to fast scalable reduce methods routines - */ - -// keep the selected method in a thread local structure for cross-function usage: will be used in __kmpc_end_reduce* functions; -// another solution: to re-determine the method one more time in __kmpc_end_reduce* functions (new prototype required then) -// AT: which solution is better? -#define __KMP_SET_REDUCTION_METHOD(gtid,rmethod) \ - ( ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) = ( rmethod ) ) - -#define __KMP_GET_REDUCTION_METHOD(gtid) \ - ( __kmp_threads[ ( gtid ) ] -> th.th_local.packed_reduction_method ) - -// description of the packed_reduction_method variable: look at the macros in kmp.h - - -// used in a critical section reduce block -static __forceinline void -__kmp_enter_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { - - // this lock was visible to a customer and to the threading profile tool as a serial overhead span - // (although it's used for an internal purpose only) - // why was it visible in previous implementation? - // should we keep it visible in new reduce block? - kmp_user_lock_p lck; - -#if KMP_USE_DYNAMIC_LOCK - - kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit; - // Check if it is initialized. - if (*lk == 0) { - if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { - KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0, KMP_GET_D_TAG(__kmp_user_lock_seq)); - } else { - __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(__kmp_user_lock_seq)); - } - } - // Branch for accessing the actual lock object and set operation. This branching is inevitable since - // this lock initialization does not follow the normal dispatch path (lock table is not used). - if (KMP_EXTRACT_D_TAG(lk) != 0) { - lck = (kmp_user_lock_p)lk; - KMP_DEBUG_ASSERT(lck != NULL); - if (__kmp_env_consistency_check) { - __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); - } - KMP_D_LOCK_FUNC(lk, set)(lk, global_tid); - } else { - kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk); - lck = ilk->lock; - KMP_DEBUG_ASSERT(lck != NULL); - if (__kmp_env_consistency_check) { - __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq); - } - KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid); - } - -#else // KMP_USE_DYNAMIC_LOCK - - // We know that the fast reduction code is only emitted by Intel compilers - // with 32 byte critical sections. If there isn't enough space, then we - // have to use a pointer. - if ( __kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE ) { - lck = (kmp_user_lock_p)crit; - } - else { - lck = __kmp_get_critical_section_ptr( crit, loc, global_tid ); - } - KMP_DEBUG_ASSERT( lck != NULL ); - - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_critical, loc, lck ); - - __kmp_acquire_user_lock_with_checks( lck, global_tid ); - -#endif // KMP_USE_DYNAMIC_LOCK -} - -// used in a critical section reduce block -static __forceinline void -__kmp_end_critical_section_reduce_block( ident_t * loc, kmp_int32 global_tid, kmp_critical_name * crit ) { - - kmp_user_lock_p lck; - -#if KMP_USE_DYNAMIC_LOCK - - if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) { - lck = (kmp_user_lock_p)crit; - if (__kmp_env_consistency_check) - __kmp_pop_sync(global_tid, ct_critical, loc); - KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid); - } else { - kmp_indirect_lock_t *ilk = (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit)); - if (__kmp_env_consistency_check) - __kmp_pop_sync(global_tid, ct_critical, loc); - KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid); - } - -#else // KMP_USE_DYNAMIC_LOCK - - // We know that the fast reduction code is only emitted by Intel compilers with 32 byte critical - // sections. If there isn't enough space, then we have to use a pointer. - if ( __kmp_base_user_lock_size > 32 ) { - lck = *( (kmp_user_lock_p *) crit ); - KMP_ASSERT( lck != NULL ); - } else { - lck = (kmp_user_lock_p) crit; - } - - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( global_tid, ct_critical, loc ); - - __kmp_release_user_lock_with_checks( lck, global_tid ); - -#endif // KMP_USE_DYNAMIC_LOCK -} // __kmp_end_critical_section_reduce_block - - -/* 2.a.i. Reduce Block without a terminating barrier */ -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid global thread number -@param num_vars number of items (variables) to be reduced -@param reduce_size size of data in bytes to be reduced -@param reduce_data pointer to data to be reduced -@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data -@param lck pointer to the unique lock data structure -@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed - -The nowait version is used for a reduce clause with the nowait argument. -*/ -kmp_int32 -__kmpc_reduce_nowait( - ident_t *loc, kmp_int32 global_tid, - kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), - kmp_critical_name *lck ) { - - KMP_COUNT_BLOCK(REDUCE_nowait); - int retval = 0; - PACKED_REDUCTION_METHOD_T packed_reduction_method; -#if OMP_40_ENABLED - kmp_team_t *team; - kmp_info_t *th; - int teams_swapped = 0, task_state; -#endif - KA_TRACE( 10, ( "__kmpc_reduce_nowait() enter: called T#%d\n", global_tid ) ); - - // why do we need this initialization here at all? - // Reduction clause can not be used as a stand-alone directive. - - // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed - // possible detection of false-positive race by the threadchecker ??? - if( ! TCR_4( __kmp_init_parallel ) ) - __kmp_parallel_initialize(); - - // check correctness of reduce block nesting -#if KMP_USE_DYNAMIC_LOCK - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); -#else - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); -#endif - -#if OMP_40_ENABLED - th = __kmp_thread_from_gtid(global_tid); - if( th->th.th_teams_microtask ) { // AC: check if we are inside the teams construct? - team = th->th.th_team; - if( team->t.t_level == th->th.th_teams_level ) { - // this is reduction at teams construct - KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0 - // Let's swap teams temporarily for the reduction barrier - teams_swapped = 1; - th->th.th_info.ds.ds_tid = team->t.t_master_tid; - th->th.th_team = team->t.t_parent; - th->th.th_team_nproc = th->th.th_team->t.t_nproc; - th->th.th_task_team = th->th.th_team->t.t_task_team[0]; - task_state = th->th.th_task_state; - th->th.th_task_state = 0; - } - } -#endif // OMP_40_ENABLED - - // packed_reduction_method value will be reused by __kmp_end_reduce* function, the value should be kept in a variable - // the variable should be either a construct-specific or thread-specific property, not a team specific property - // (a thread can reach the next reduce block on the next construct, reduce method may differ on the next construct) - // an ident_t "loc" parameter could be used as a construct-specific property (what if loc == 0?) - // (if both construct-specific and team-specific variables were shared, then unness extra syncs should be needed) - // a thread-specific variable is better regarding two issues above (next construct and extra syncs) - // a thread-specific "th_local.reduction_method" variable is used currently - // each thread executes 'determine' and 'set' lines (no need to execute by one thread, to avoid unness extra syncs) - - packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); - __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); - - if( packed_reduction_method == critical_reduce_block ) { - - __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); - retval = 1; - - } else if( packed_reduction_method == empty_reduce_block ) { - - // usage: if team size == 1, no synchronization is required ( Intel platforms only ) - retval = 1; - - } else if( packed_reduction_method == atomic_reduce_block ) { - - retval = 2; - - // all threads should do this pop here (because __kmpc_end_reduce_nowait() won't be called by the code gen) - // (it's not quite good, because the checking block has been closed by this 'pop', - // but atomic operation has not been executed yet, will be executed slightly later, literally on next instruction) - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( global_tid, ct_reduce, loc ); - - } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { - - //AT: performance issue: a real barrier here - //AT: (if master goes slow, other threads are blocked here waiting for the master to come and release them) - //AT: (it's not what a customer might expect specifying NOWAIT clause) - //AT: (specifying NOWAIT won't result in improvement of performance, it'll be confusing to a customer) - //AT: another implementation of *barrier_gather*nowait() (or some other design) might go faster - // and be more in line with sense of NOWAIT - //AT: TO DO: do epcc test and compare times - - // this barrier should be invisible to a customer and to the threading profile tool - // (it's neither a terminating barrier nor customer's code, it's used for an internal purpose) -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, FALSE, reduce_size, reduce_data, reduce_func ); - retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); - - // all other workers except master should do this pop here - // ( none of other workers will get to __kmpc_end_reduce_nowait() ) - if ( __kmp_env_consistency_check ) { - if( retval == 0 ) { - __kmp_pop_sync( global_tid, ct_reduce, loc ); - } - } - - } else { - - // should never reach this block - KMP_ASSERT( 0 ); // "unexpected method" - - } -#if OMP_40_ENABLED - if( teams_swapped ) { - // Restore thread structure - th->th.th_info.ds.ds_tid = 0; - th->th.th_team = team; - th->th.th_team_nproc = team->t.t_nproc; - th->th.th_task_team = team->t.t_task_team[task_state]; - th->th.th_task_state = task_state; - } -#endif - KA_TRACE( 10, ( "__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); - - return retval; -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid global thread id. -@param lck pointer to the unique lock data structure - -Finish the execution of a reduce nowait. -*/ -void -__kmpc_end_reduce_nowait( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { - - PACKED_REDUCTION_METHOD_T packed_reduction_method; - - KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid ) ); - - packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); - - if( packed_reduction_method == critical_reduce_block ) { - - __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); - - } else if( packed_reduction_method == empty_reduce_block ) { - - // usage: if team size == 1, no synchronization is required ( on Intel platforms only ) - - } else if( packed_reduction_method == atomic_reduce_block ) { - - // neither master nor other workers should get here - // (code gen does not generate this call in case 2: atomic reduce block) - // actually it's better to remove this elseif at all; - // after removal this value will checked by the 'else' and will assert - - } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { - - // only master gets here - - } else { - - // should never reach this block - KMP_ASSERT( 0 ); // "unexpected method" - - } - - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( global_tid, ct_reduce, loc ); - - KA_TRACE( 10, ( "__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); - - return; -} - -/* 2.a.ii. Reduce Block with a terminating barrier */ - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid global thread number -@param num_vars number of items (variables) to be reduced -@param reduce_size size of data in bytes to be reduced -@param reduce_data pointer to data to be reduced -@param reduce_func callback function providing reduction operation on two operands and returning result of reduction in lhs_data -@param lck pointer to the unique lock data structure -@result 1 for the master thread, 0 for all other team threads, 2 for all team threads if atomic reduction needed - -A blocking reduce that includes an implicit barrier. -*/ -kmp_int32 -__kmpc_reduce( - ident_t *loc, kmp_int32 global_tid, - kmp_int32 num_vars, size_t reduce_size, void *reduce_data, - void (*reduce_func)(void *lhs_data, void *rhs_data), - kmp_critical_name *lck ) -{ - KMP_COUNT_BLOCK(REDUCE_wait); - int retval = 0; - PACKED_REDUCTION_METHOD_T packed_reduction_method; - - KA_TRACE( 10, ( "__kmpc_reduce() enter: called T#%d\n", global_tid ) ); - - // why do we need this initialization here at all? - // Reduction clause can not be a stand-alone directive. - - // do not call __kmp_serial_initialize(), it will be called by __kmp_parallel_initialize() if needed - // possible detection of false-positive race by the threadchecker ??? - if( ! TCR_4( __kmp_init_parallel ) ) - __kmp_parallel_initialize(); - - // check correctness of reduce block nesting -#if KMP_USE_DYNAMIC_LOCK - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_reduce, loc, NULL, 0 ); -#else - if ( __kmp_env_consistency_check ) - __kmp_push_sync( global_tid, ct_reduce, loc, NULL ); -#endif - - packed_reduction_method = __kmp_determine_reduction_method( loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck ); - __KMP_SET_REDUCTION_METHOD( global_tid, packed_reduction_method ); - - if( packed_reduction_method == critical_reduce_block ) { - - __kmp_enter_critical_section_reduce_block( loc, global_tid, lck ); - retval = 1; - - } else if( packed_reduction_method == empty_reduce_block ) { - - // usage: if team size == 1, no synchronization is required ( Intel platforms only ) - retval = 1; - - } else if( packed_reduction_method == atomic_reduce_block ) { - - retval = 2; - - } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { - - //case tree_reduce_block: - // this barrier should be visible to a customer and to the threading profile tool - // (it's a terminating barrier on constructs if NOWAIT not specified) -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; // needed for correct notification of frames -#endif - retval = __kmp_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid, TRUE, reduce_size, reduce_data, reduce_func ); - retval = ( retval != 0 ) ? ( 0 ) : ( 1 ); - - // all other workers except master should do this pop here - // ( none of other workers except master will enter __kmpc_end_reduce() ) - if ( __kmp_env_consistency_check ) { - if( retval == 0 ) { // 0: all other workers; 1: master - __kmp_pop_sync( global_tid, ct_reduce, loc ); - } - } - - } else { - - // should never reach this block - KMP_ASSERT( 0 ); // "unexpected method" - - } - - KA_TRACE( 10, ( "__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n", global_tid, packed_reduction_method, retval ) ); - - return retval; -} - -/*! -@ingroup SYNCHRONIZATION -@param loc source location information -@param global_tid global thread id. -@param lck pointer to the unique lock data structure - -Finish the execution of a blocking reduce. -The lck pointer must be the same as that used in the corresponding start function. -*/ -void -__kmpc_end_reduce( ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck ) { - - PACKED_REDUCTION_METHOD_T packed_reduction_method; - - KA_TRACE( 10, ( "__kmpc_end_reduce() enter: called T#%d\n", global_tid ) ); - - packed_reduction_method = __KMP_GET_REDUCTION_METHOD( global_tid ); - - // this barrier should be visible to a customer and to the threading profile tool - // (it's a terminating barrier on constructs if NOWAIT not specified) - - if( packed_reduction_method == critical_reduce_block ) { - - __kmp_end_critical_section_reduce_block( loc, global_tid, lck ); - - // TODO: implicit barrier: should be exposed -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); - - } else if( packed_reduction_method == empty_reduce_block ) { - - // usage: if team size == 1, no synchronization is required ( Intel platforms only ) - - // TODO: implicit barrier: should be exposed -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); - - } else if( packed_reduction_method == atomic_reduce_block ) { - - // TODO: implicit barrier: should be exposed -#if USE_ITT_NOTIFY - __kmp_threads[global_tid]->th.th_ident = loc; -#endif - __kmp_barrier( bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL ); - - } else if( TEST_REDUCTION_METHOD( packed_reduction_method, tree_reduce_block ) ) { - - // only master executes here (master releases all other workers) - __kmp_end_split_barrier( UNPACK_REDUCTION_BARRIER( packed_reduction_method ), global_tid ); - - } else { - - // should never reach this block - KMP_ASSERT( 0 ); // "unexpected method" - - } - - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( global_tid, ct_reduce, loc ); - - KA_TRACE( 10, ( "__kmpc_end_reduce() exit: called T#%d: method %08x\n", global_tid, packed_reduction_method ) ); - - return; -} - -#undef __KMP_GET_REDUCTION_METHOD -#undef __KMP_SET_REDUCTION_METHOD - -/*-- end of interface to fast scalable reduce routines ---------------------------------------------------------------*/ - -kmp_uint64 -__kmpc_get_taskid() { - - kmp_int32 gtid; - kmp_info_t * thread; - - gtid = __kmp_get_gtid(); - if ( gtid < 0 ) { - return 0; - }; // if - thread = __kmp_thread_from_gtid( gtid ); - return thread->th.th_current_task->td_task_id; - -} // __kmpc_get_taskid - - -kmp_uint64 -__kmpc_get_parent_taskid() { - - kmp_int32 gtid; - kmp_info_t * thread; - kmp_taskdata_t * parent_task; - - gtid = __kmp_get_gtid(); - if ( gtid < 0 ) { - return 0; - }; // if - thread = __kmp_thread_from_gtid( gtid ); - parent_task = thread->th.th_current_task->td_parent; - return ( parent_task == NULL ? 0 : parent_task->td_task_id ); - -} // __kmpc_get_parent_taskid - -void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT) -{ - if ( ! __kmp_init_serial ) { - __kmp_serial_initialize(); - } - __kmp_place_num_sockets = nS; - __kmp_place_socket_offset = sO; - __kmp_place_num_cores = nC; - __kmp_place_core_offset = cO; - __kmp_place_num_threads_per_core = nT; -} - -#if OMP_45_ENABLED -/*! -@ingroup WORK_SHARING -@param loc source location information. -@param gtid global thread number. -@param num_dims number of associated doacross loops. -@param dims info on loops bounds. - -Initialize doacross loop information. -Expect compiler send us inclusive bounds, -e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2. -*/ -void -__kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, struct kmp_dim * dims) -{ - int j, idx; - kmp_int64 last, trace_count; - kmp_info_t *th = __kmp_threads[gtid]; - kmp_team_t *team = th->th.th_team; - kmp_uint32 *flags; - kmp_disp_t *pr_buf = th->th.th_dispatch; - dispatch_shared_info_t *sh_buf; - - KA_TRACE(20,("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n", - gtid, num_dims, !team->t.t_serialized)); - KMP_DEBUG_ASSERT(dims != NULL); - KMP_DEBUG_ASSERT(num_dims > 0); - - if( team->t.t_serialized ) { - KA_TRACE(20,("__kmpc_doacross_init() exit: serialized team\n")); - return; // no dependencies if team is serialized - } - KMP_DEBUG_ASSERT(team->t.t_nproc > 1); - idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for the next loop - sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; - - // Save bounds info into allocated private buffer - KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL); - pr_buf->th_doacross_info = - (kmp_int64*)__kmp_thread_malloc(th, sizeof(kmp_int64)*(4 * num_dims + 1)); - KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); - pr_buf->th_doacross_info[0] = (kmp_int64)num_dims; // first element is number of dimensions - // Save also address of num_done in order to access it later without knowing the buffer index - pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; - pr_buf->th_doacross_info[2] = dims[0].lo; - pr_buf->th_doacross_info[3] = dims[0].up; - pr_buf->th_doacross_info[4] = dims[0].st; - last = 5; - for( j = 1; j < num_dims; ++j ) { - kmp_int64 range_length; // To keep ranges of all dimensions but the first dims[0] - if( dims[j].st == 1 ) { // most common case - // AC: should we care of ranges bigger than LLONG_MAX? (not for now) - range_length = dims[j].up - dims[j].lo + 1; - } else { - if( dims[j].st > 0 ) { - KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo); - range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1; - } else { // negative increment - KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up); - range_length = (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1; - } - } - pr_buf->th_doacross_info[last++] = range_length; - pr_buf->th_doacross_info[last++] = dims[j].lo; - pr_buf->th_doacross_info[last++] = dims[j].up; - pr_buf->th_doacross_info[last++] = dims[j].st; - } - - // Compute total trip count. - // Start with range of dims[0] which we don't need to keep in the buffer. - if( dims[0].st == 1 ) { // most common case - trace_count = dims[0].up - dims[0].lo + 1; - } else if( dims[0].st > 0 ) { - KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo); - trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1; - } else { // negative increment - KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up); - trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1; - } - for( j = 1; j < num_dims; ++j ) { - trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges - } - KMP_DEBUG_ASSERT(trace_count > 0); - - // Check if shared buffer is not occupied by other loop (idx - __kmp_dispatch_num_buffers) - if( idx != sh_buf->doacross_buf_idx ) { - // Shared buffer is occupied, wait for it to be free - __kmp_wait_yield_4( (kmp_uint32*)&sh_buf->doacross_buf_idx, idx, __kmp_eq_4, NULL ); - } - // Check if we are the first thread. After the CAS the first thread gets 0, - // others get 1 if initialization is in progress, allocated pointer otherwise. - flags = (kmp_uint32*)KMP_COMPARE_AND_STORE_RET64( - (kmp_int64*)&sh_buf->doacross_flags,NULL,(kmp_int64)1); - if( flags == NULL ) { - // we are the first thread, allocate the array of flags - kmp_int64 size = trace_count / 8 + 8; // in bytes, use single bit per iteration - sh_buf->doacross_flags = (kmp_uint32*)__kmp_thread_calloc(th, size, 1); - } else if( (kmp_int64)flags == 1 ) { - // initialization is still in progress, need to wait - while( (volatile kmp_int64)sh_buf->doacross_flags == 1 ) { - KMP_YIELD(TRUE); - } - } - KMP_DEBUG_ASSERT((kmp_int64)sh_buf->doacross_flags > 1); // check value of pointer - pr_buf->th_doacross_flags = sh_buf->doacross_flags; // save private copy in order to not - // touch shared buffer on each iteration - KA_TRACE(20,("__kmpc_doacross_init() exit: T#%d\n", gtid)); -} - -void -__kmpc_doacross_wait(ident_t *loc, int gtid, long long *vec) -{ - kmp_int32 shft, num_dims, i; - kmp_uint32 flag; - kmp_int64 iter_number; // iteration number of "collapsed" loop nest - kmp_info_t *th = __kmp_threads[gtid]; - kmp_team_t *team = th->th.th_team; - kmp_disp_t *pr_buf; - kmp_int64 lo, up, st; - - KA_TRACE(20,("__kmpc_doacross_wait() enter: called T#%d\n", gtid)); - if( team->t.t_serialized ) { - KA_TRACE(20,("__kmpc_doacross_wait() exit: serialized team\n")); - return; // no dependencies if team is serialized - } - - // calculate sequential iteration number and check out-of-bounds condition - pr_buf = th->th.th_dispatch; - KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); - num_dims = pr_buf->th_doacross_info[0]; - lo = pr_buf->th_doacross_info[2]; - up = pr_buf->th_doacross_info[3]; - st = pr_buf->th_doacross_info[4]; - if( st == 1 ) { // most common case - if( vec[0] < lo || vec[0] > up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[0], lo, up)); - return; - } - iter_number = vec[0] - lo; - } else if( st > 0 ) { - if( vec[0] < lo || vec[0] > up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[0], lo, up)); - return; - } - iter_number = (kmp_uint64)(vec[0] - lo) / st; - } else { // negative increment - if( vec[0] > lo || vec[0] < up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[0], lo, up)); - return; - } - iter_number = (kmp_uint64)(lo - vec[0]) / (-st); - } - for( i = 1; i < num_dims; ++i ) { - kmp_int64 iter, ln; - kmp_int32 j = i * 4; - ln = pr_buf->th_doacross_info[j + 1]; - lo = pr_buf->th_doacross_info[j + 2]; - up = pr_buf->th_doacross_info[j + 3]; - st = pr_buf->th_doacross_info[j + 4]; - if( st == 1 ) { - if( vec[i] < lo || vec[i] > up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[i], lo, up)); - return; - } - iter = vec[i] - lo; - } else if( st > 0 ) { - if( vec[i] < lo || vec[i] > up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[i], lo, up)); - return; - } - iter = (kmp_uint64)(vec[i] - lo) / st; - } else { // st < 0 - if( vec[i] > lo || vec[i] < up ) { - KA_TRACE(20,( - "__kmpc_doacross_wait() exit: T#%d iter %lld is out of bounds [%lld,%lld]\n", - gtid, vec[i], lo, up)); - return; - } - iter = (kmp_uint64)(lo - vec[i]) / (-st); - } - iter_number = iter + ln * iter_number; - } - shft = iter_number % 32; // use 32-bit granularity - iter_number >>= 5; // divided by 32 - flag = 1 << shft; - while( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) { - KMP_YIELD(TRUE); - } - KA_TRACE(20,("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n", - gtid, (iter_number<<5)+shft)); -} - -void -__kmpc_doacross_post(ident_t *loc, int gtid, long long *vec) -{ - kmp_int32 shft, num_dims, i; - kmp_uint32 flag; - kmp_int64 iter_number; // iteration number of "collapsed" loop nest - kmp_info_t *th = __kmp_threads[gtid]; - kmp_team_t *team = th->th.th_team; - kmp_disp_t *pr_buf; - kmp_int64 lo, st; - - KA_TRACE(20,("__kmpc_doacross_post() enter: called T#%d\n", gtid)); - if( team->t.t_serialized ) { - KA_TRACE(20,("__kmpc_doacross_post() exit: serialized team\n")); - return; // no dependencies if team is serialized - } - - // calculate sequential iteration number (same as in "wait" but no out-of-bounds checks) - pr_buf = th->th.th_dispatch; - KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL); - num_dims = pr_buf->th_doacross_info[0]; - lo = pr_buf->th_doacross_info[2]; - st = pr_buf->th_doacross_info[4]; - if( st == 1 ) { // most common case - iter_number = vec[0] - lo; - } else if( st > 0 ) { - iter_number = (kmp_uint64)(vec[0] - lo) / st; - } else { // negative increment - iter_number = (kmp_uint64)(lo - vec[0]) / (-st); - } - for( i = 1; i < num_dims; ++i ) { - kmp_int64 iter, ln; - kmp_int32 j = i * 4; - ln = pr_buf->th_doacross_info[j + 1]; - lo = pr_buf->th_doacross_info[j + 2]; - st = pr_buf->th_doacross_info[j + 4]; - if( st == 1 ) { - iter = vec[i] - lo; - } else if( st > 0 ) { - iter = (kmp_uint64)(vec[i] - lo) / st; - } else { // st < 0 - iter = (kmp_uint64)(lo - vec[i]) / (-st); - } - iter_number = iter + ln * iter_number; - } - shft = iter_number % 32; // use 32-bit granularity - iter_number >>= 5; // divided by 32 - flag = 1 << shft; - if( (flag & pr_buf->th_doacross_flags[iter_number]) == 0 ) - KMP_TEST_THEN_OR32( (kmp_int32*)&pr_buf->th_doacross_flags[iter_number], (kmp_int32)flag ); - KA_TRACE(20,("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", - gtid, (iter_number<<5)+shft)); -} - -void -__kmpc_doacross_fini(ident_t *loc, int gtid) -{ - kmp_int64 num_done; - kmp_info_t *th = __kmp_threads[gtid]; - kmp_team_t *team = th->th.th_team; - kmp_disp_t *pr_buf = th->th.th_dispatch; - - KA_TRACE(20,("__kmpc_doacross_fini() enter: called T#%d\n", gtid)); - if( team->t.t_serialized ) { - KA_TRACE(20,("__kmpc_doacross_fini() exit: serialized team %p\n", team)); - return; // nothing to do - } - num_done = KMP_TEST_THEN_INC64((kmp_int64*)pr_buf->th_doacross_info[1]) + 1; - if( num_done == th->th.th_team_nproc ) { - // we are the last thread, need to free shared resources - int idx = pr_buf->th_doacross_buf_idx - 1; - dispatch_shared_info_t *sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers]; - KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] == (kmp_int64)&sh_buf->doacross_num_done); - KMP_DEBUG_ASSERT(num_done == (kmp_int64)sh_buf->doacross_num_done); - KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx); - __kmp_thread_free(th, (void*)sh_buf->doacross_flags); - sh_buf->doacross_flags = NULL; - sh_buf->doacross_num_done = 0; - sh_buf->doacross_buf_idx += __kmp_dispatch_num_buffers; // free buffer for future re-use - } - // free private resources (need to keep buffer index forever) - __kmp_thread_free(th, (void*)pr_buf->th_doacross_info); - pr_buf->th_doacross_info = NULL; - KA_TRACE(20,("__kmpc_doacross_fini() exit: T#%d\n", gtid)); -} -#endif - -// end of file // - Index: runtime/src/kmp_csupport.cpp =================================================================== --- runtime/src/kmp_csupport.cpp +++ runtime/src/kmp_csupport.cpp @@ -1,5 +1,5 @@ /* - * kmp_csupport.c -- kfront linkage support for OpenMP. + * kmp_csupport.cpp -- kfront linkage support for OpenMP. */ @@ -464,9 +464,10 @@ void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) { - __kmp_serialized_parallel(loc, global_tid); /* The implementation is now in kmp_runtime.c so that it can share static functions with - * kmp_fork_call since the tasks to be done are similar in each case. - */ + // The implementation is now in kmp_runtime.cpp so that it can share static + // functions with kmp_fork_call since the tasks to be done are similar in + // each case. + __kmp_serialized_parallel(loc, global_tid); } /*! Index: runtime/src/kmp_debug.c =================================================================== --- runtime/src/kmp_debug.c +++ runtime/src/kmp_debug.c @@ -1,142 +0,0 @@ -/* - * kmp_debug.c -- debug utilities for the Guide library - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_debug.h" /* really necessary? */ -#include "kmp_i18n.h" -#include "kmp_io.h" - -#ifdef KMP_DEBUG -void -__kmp_debug_printf_stdout( char const * format, ... ) -{ - va_list ap; - va_start( ap, format ); - - __kmp_vprintf( kmp_out, format, ap ); - - va_end(ap); -} -#endif - -void -__kmp_debug_printf( char const * format, ... ) -{ - va_list ap; - va_start( ap, format ); - - __kmp_vprintf( kmp_err, format, ap ); - - va_end( ap ); -} - -#ifdef KMP_USE_ASSERT - int - __kmp_debug_assert( - char const * msg, - char const * file, - int line - ) { - - if ( file == NULL ) { - file = KMP_I18N_STR( UnknownFile ); - } else { - // Remove directories from path, leave only file name. File name is enough, there is no need - // in bothering developers and customers with full paths. - char const * slash = strrchr( file, '/' ); - if ( slash != NULL ) { - file = slash + 1; - }; // if - }; // if - - #ifdef KMP_DEBUG - __kmp_acquire_bootstrap_lock( & __kmp_stdio_lock ); - __kmp_debug_printf( "Assertion failure at %s(%d): %s.\n", file, line, msg ); - __kmp_release_bootstrap_lock( & __kmp_stdio_lock ); - #ifdef USE_ASSERT_BREAK - #if KMP_OS_WINDOWS - DebugBreak(); - #endif - #endif // USE_ASSERT_BREAK - #ifdef USE_ASSERT_STALL - /* __kmp_infinite_loop(); */ - for(;;); - #endif // USE_ASSERT_STALL - #ifdef USE_ASSERT_SEG - { - int volatile * ZERO = (int*) 0; - ++ (*ZERO); - } - #endif // USE_ASSERT_SEG - #endif - - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( AssertionFailure, file, line ), - KMP_HNT( SubmitBugReport ), - __kmp_msg_null - ); - - return 0; - - } // __kmp_debug_assert - -#endif // KMP_USE_ASSERT - -/* Dump debugging buffer to stderr */ -void -__kmp_dump_debug_buffer( void ) -{ - if ( __kmp_debug_buffer != NULL ) { - int i; - int dc = __kmp_debug_count; - char *db = & __kmp_debug_buffer[ (dc % __kmp_debug_buf_lines) * __kmp_debug_buf_chars ]; - char *db_end = & __kmp_debug_buffer[ __kmp_debug_buf_lines * __kmp_debug_buf_chars ]; - char *db2; - - __kmp_acquire_bootstrap_lock( & __kmp_stdio_lock ); - __kmp_printf_no_lock( "\nStart dump of debugging buffer (entry=%d):\n", - dc % __kmp_debug_buf_lines ); - - for ( i = 0; i < __kmp_debug_buf_lines; i++ ) { - - if ( *db != '\0' ) { - /* Fix up where no carriage return before string termination char */ - for ( db2 = db + 1; db2 < db + __kmp_debug_buf_chars - 1; db2 ++) { - if ( *db2 == '\0' ) { - if ( *(db2-1) != '\n' ) { *db2 = '\n'; *(db2+1) = '\0'; } - break; - } - } - /* Handle case at end by shortening the printed message by one char if necessary */ - if ( db2 == db + __kmp_debug_buf_chars - 1 && - *db2 == '\0' && *(db2-1) != '\n' ) { - *(db2-1) = '\n'; - } - - __kmp_printf_no_lock( "%4d: %.*s", i, __kmp_debug_buf_chars, db ); - *db = '\0'; /* only let it print once! */ - } - - db += __kmp_debug_buf_chars; - if ( db >= db_end ) - db = __kmp_debug_buffer; - } - - __kmp_printf_no_lock( "End dump of debugging buffer (entry=%d).\n\n", - ( dc+i-1 ) % __kmp_debug_buf_lines ); - __kmp_release_bootstrap_lock( & __kmp_stdio_lock ); - } -} Index: runtime/src/kmp_debug.cpp =================================================================== --- runtime/src/kmp_debug.cpp +++ runtime/src/kmp_debug.cpp @@ -1,5 +1,5 @@ /* - * kmp_debug.c -- debug utilities for the Guide library + * kmp_debug.cpp -- debug utilities for the Guide library */ Index: runtime/src/kmp_debugger.c =================================================================== --- runtime/src/kmp_debugger.c +++ runtime/src/kmp_debugger.c @@ -1,315 +0,0 @@ -#if USE_DEBUGGER -/* - * kmp_debugger.c -- debugger support. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_lock.h" -#include "kmp_omp.h" -#include "kmp_str.h" - -/* - NOTE: All variable names are known to the debugger, do not change! -*/ - -#ifdef __cplusplus - extern "C" { - extern kmp_omp_struct_info_t __kmp_omp_debug_struct_info; - } // extern "C" -#endif // __cplusplus - -int __kmp_debugging = FALSE; // Boolean whether currently debugging OpenMP RTL. - -#define offset_and_size_of( structure, field ) \ - { \ - offsetof( structure, field ), \ - sizeof( ( (structure *) NULL)->field ) \ - } - -#define offset_and_size_not_available \ - { -1, -1 } - -#define addr_and_size_of( var ) \ - { \ - (kmp_uint64)( & var ), \ - sizeof( var ) \ - } - -#define nthr_buffer_size 1024 -static kmp_int32 -kmp_omp_nthr_info_buffer[ nthr_buffer_size ] = - { nthr_buffer_size * sizeof( kmp_int32 ) }; - -/* TODO: Check punctuation for various platforms here */ -static char func_microtask[] = "__kmp_invoke_microtask"; -static char func_fork[] = "__kmpc_fork_call"; -static char func_fork_teams[] = "__kmpc_fork_teams"; - - -// Various info about runtime structures: addresses, field offsets, sizes, etc. -kmp_omp_struct_info_t -__kmp_omp_debug_struct_info = { - - /* Change this only if you make a fundamental data structure change here */ - KMP_OMP_VERSION, - - /* sanity check. Only should be checked if versions are identical - * This is also used for backward compatibility to get the runtime - * structure size if it the runtime is older than the interface */ - sizeof( kmp_omp_struct_info_t ), - - /* OpenMP RTL version info. */ - addr_and_size_of( __kmp_version_major ), - addr_and_size_of( __kmp_version_minor ), - addr_and_size_of( __kmp_version_build ), - addr_and_size_of( __kmp_openmp_version ), - { (kmp_uint64)( __kmp_copyright ) + KMP_VERSION_MAGIC_LEN, 0 }, // Skip magic prefix. - - /* Various globals. */ - addr_and_size_of( __kmp_threads ), - addr_and_size_of( __kmp_root ), - addr_and_size_of( __kmp_threads_capacity ), - addr_and_size_of( __kmp_monitor ), -#if ! KMP_USE_DYNAMIC_LOCK - addr_and_size_of( __kmp_user_lock_table ), -#endif - addr_and_size_of( func_microtask ), - addr_and_size_of( func_fork ), - addr_and_size_of( func_fork_teams ), - addr_and_size_of( __kmp_team_counter ), - addr_and_size_of( __kmp_task_counter ), - addr_and_size_of( kmp_omp_nthr_info_buffer ), - sizeof( void * ), - OMP_LOCK_T_SIZE < sizeof(void *), - bs_last_barrier, - INITIAL_TASK_DEQUE_SIZE, - - // thread structure information - sizeof( kmp_base_info_t ), - offset_and_size_of( kmp_base_info_t, th_info ), - offset_and_size_of( kmp_base_info_t, th_team ), - offset_and_size_of( kmp_base_info_t, th_root ), - offset_and_size_of( kmp_base_info_t, th_serial_team ), - offset_and_size_of( kmp_base_info_t, th_ident ), - offset_and_size_of( kmp_base_info_t, th_spin_here ), - offset_and_size_of( kmp_base_info_t, th_next_waiting ), - offset_and_size_of( kmp_base_info_t, th_task_team ), - offset_and_size_of( kmp_base_info_t, th_current_task ), - offset_and_size_of( kmp_base_info_t, th_task_state ), - offset_and_size_of( kmp_base_info_t, th_bar ), - offset_and_size_of( kmp_bstate_t, b_worker_arrived ), - -#if OMP_40_ENABLED - // teams information - offset_and_size_of( kmp_base_info_t, th_teams_microtask), - offset_and_size_of( kmp_base_info_t, th_teams_level), - offset_and_size_of( kmp_teams_size_t, nteams ), - offset_and_size_of( kmp_teams_size_t, nth ), -#endif - - // kmp_desc structure (for info field above) - sizeof( kmp_desc_base_t ), - offset_and_size_of( kmp_desc_base_t, ds_tid ), - offset_and_size_of( kmp_desc_base_t, ds_gtid ), - // On Windows* OS, ds_thread contains a thread /handle/, which is not usable, while thread /id/ - // is in ds_thread_id. - #if KMP_OS_WINDOWS - offset_and_size_of( kmp_desc_base_t, ds_thread_id), - #else - offset_and_size_of( kmp_desc_base_t, ds_thread), - #endif - - // team structure information - sizeof( kmp_base_team_t ), - offset_and_size_of( kmp_base_team_t, t_master_tid ), - offset_and_size_of( kmp_base_team_t, t_ident ), - offset_and_size_of( kmp_base_team_t, t_parent ), - offset_and_size_of( kmp_base_team_t, t_nproc ), - offset_and_size_of( kmp_base_team_t, t_threads ), - offset_and_size_of( kmp_base_team_t, t_serialized ), - offset_and_size_of( kmp_base_team_t, t_id ), - offset_and_size_of( kmp_base_team_t, t_pkfn ), - offset_and_size_of( kmp_base_team_t, t_task_team ), - offset_and_size_of( kmp_base_team_t, t_implicit_task_taskdata ), -#if OMP_40_ENABLED - offset_and_size_of( kmp_base_team_t, t_cancel_request ), -#endif - offset_and_size_of( kmp_base_team_t, t_bar ), - offset_and_size_of( kmp_balign_team_t, b_master_arrived ), - offset_and_size_of( kmp_balign_team_t, b_team_arrived ), - - // root structure information - sizeof( kmp_base_root_t ), - offset_and_size_of( kmp_base_root_t, r_root_team ), - offset_and_size_of( kmp_base_root_t, r_hot_team ), - offset_and_size_of( kmp_base_root_t, r_uber_thread ), - offset_and_size_not_available, - - // ident structure information - sizeof( ident_t ), - offset_and_size_of( ident_t, psource ), - offset_and_size_of( ident_t, flags ), - - // lock structure information - sizeof( kmp_base_queuing_lock_t ), - offset_and_size_of( kmp_base_queuing_lock_t, initialized ), - offset_and_size_of( kmp_base_queuing_lock_t, location ), - offset_and_size_of( kmp_base_queuing_lock_t, tail_id ), - offset_and_size_of( kmp_base_queuing_lock_t, head_id ), - offset_and_size_of( kmp_base_queuing_lock_t, next_ticket ), - offset_and_size_of( kmp_base_queuing_lock_t, now_serving ), - offset_and_size_of( kmp_base_queuing_lock_t, owner_id ), - offset_and_size_of( kmp_base_queuing_lock_t, depth_locked ), - offset_and_size_of( kmp_base_queuing_lock_t, flags ), - -#if ! KMP_USE_DYNAMIC_LOCK - /* Lock table. */ - sizeof( kmp_lock_table_t ), - offset_and_size_of( kmp_lock_table_t, used ), - offset_and_size_of( kmp_lock_table_t, allocated ), - offset_and_size_of( kmp_lock_table_t, table ), -#endif - - // Task team structure information. - sizeof( kmp_base_task_team_t ), - offset_and_size_of( kmp_base_task_team_t, tt_threads_data ), - offset_and_size_of( kmp_base_task_team_t, tt_found_tasks ), - offset_and_size_of( kmp_base_task_team_t, tt_nproc ), - offset_and_size_of( kmp_base_task_team_t, tt_unfinished_threads ), - offset_and_size_of( kmp_base_task_team_t, tt_active ), - - // task_data_t. - sizeof( kmp_taskdata_t ), - offset_and_size_of( kmp_taskdata_t, td_task_id ), - offset_and_size_of( kmp_taskdata_t, td_flags ), - offset_and_size_of( kmp_taskdata_t, td_team ), - offset_and_size_of( kmp_taskdata_t, td_parent ), - offset_and_size_of( kmp_taskdata_t, td_level ), - offset_and_size_of( kmp_taskdata_t, td_ident ), - offset_and_size_of( kmp_taskdata_t, td_allocated_child_tasks ), - offset_and_size_of( kmp_taskdata_t, td_incomplete_child_tasks ), - - offset_and_size_of( kmp_taskdata_t, td_taskwait_ident ), - offset_and_size_of( kmp_taskdata_t, td_taskwait_counter ), - offset_and_size_of( kmp_taskdata_t, td_taskwait_thread ), - -#if OMP_40_ENABLED - offset_and_size_of( kmp_taskdata_t, td_taskgroup ), - offset_and_size_of( kmp_taskgroup_t, count ), - offset_and_size_of( kmp_taskgroup_t, cancel_request ), - - offset_and_size_of( kmp_taskdata_t, td_depnode ), - offset_and_size_of( kmp_depnode_list_t, node ), - offset_and_size_of( kmp_depnode_list_t, next ), - offset_and_size_of( kmp_base_depnode_t, successors ), - offset_and_size_of( kmp_base_depnode_t, task ), - offset_and_size_of( kmp_base_depnode_t, npredecessors ), - offset_and_size_of( kmp_base_depnode_t, nrefs ), -#endif - offset_and_size_of( kmp_task_t, routine ), - - // thread_data_t. - sizeof( kmp_thread_data_t ), - offset_and_size_of( kmp_base_thread_data_t, td_deque ), - offset_and_size_of( kmp_base_thread_data_t, td_deque_size ), - offset_and_size_of( kmp_base_thread_data_t, td_deque_head ), - offset_and_size_of( kmp_base_thread_data_t, td_deque_tail ), - offset_and_size_of( kmp_base_thread_data_t, td_deque_ntasks ), - offset_and_size_of( kmp_base_thread_data_t, td_deque_last_stolen ), - - // The last field. - KMP_OMP_VERSION, - -}; // __kmp_omp_debug_struct_info - -#undef offset_and_size_of -#undef addr_and_size_of - -/* - Intel compiler on IA-32 architecture issues a warning "conversion - from "unsigned long long" to "char *" may lose significant bits" - when 64-bit value is assigned to 32-bit pointer. Use this function - to suppress the warning. -*/ -static inline -void * -__kmp_convert_to_ptr( - kmp_uint64 addr -) { - #if KMP_COMPILER_ICC - #pragma warning( push ) - #pragma warning( disable: 810 ) // conversion from "unsigned long long" to "char *" may lose significant bits - #pragma warning( disable: 1195 ) // conversion from integer to smaller pointer - #endif // KMP_COMPILER_ICC - return (void *) addr; - #if KMP_COMPILER_ICC - #pragma warning( pop ) - #endif // KMP_COMPILER_ICC -} // __kmp_convert_to_ptr - - -static int -kmp_location_match( - kmp_str_loc_t * loc, - kmp_omp_nthr_item_t * item -) { - - int file_match = 0; - int func_match = 0; - int line_match = 0; - - char * file = (char *) __kmp_convert_to_ptr( item->file ); - char * func = (char *) __kmp_convert_to_ptr( item->func ); - file_match = __kmp_str_fname_match( & loc->fname, file ); - func_match = - item->func == 0 // If item->func is NULL, it allows any func name. - || - strcmp( func, "*" ) == 0 - || - ( loc->func != NULL && strcmp( loc->func, func ) == 0 ); - line_match = - item->begin <= loc->line - && - ( item->end <= 0 || loc->line <= item->end ); // if item->end <= 0, it means "end of file". - - return ( file_match && func_match && line_match ); - -} // kmp_location_match - - -int -__kmp_omp_num_threads( - ident_t const * ident -) { - - int num_threads = 0; - - kmp_omp_nthr_info_t * info = - (kmp_omp_nthr_info_t *) __kmp_convert_to_ptr( __kmp_omp_debug_struct_info.nthr_info.addr ); - if ( info->num > 0 && info->array != 0 ) { - kmp_omp_nthr_item_t * items = (kmp_omp_nthr_item_t *) __kmp_convert_to_ptr( info->array ); - kmp_str_loc_t loc = __kmp_str_loc_init( ident->psource, 1 ); - int i; - for ( i = 0; i < info->num; ++ i ) { - if ( kmp_location_match( & loc, & items[ i ] ) ) { - num_threads = items[ i ].num_threads; - }; // if - }; // for - __kmp_str_loc_free( & loc ); - }; // if - - return num_threads;; - -} // __kmp_omp_num_threads -#endif /* USE_DEBUGGER */ Index: runtime/src/kmp_dispatch.cpp =================================================================== --- runtime/src/kmp_dispatch.cpp +++ runtime/src/kmp_dispatch.cpp @@ -2616,7 +2616,7 @@ /*! @} */ //----------------------------------------------------------------------------------------- -//Non-template routines from kmp_dispatch.c used in other sources +//Non-template routines from kmp_dispatch.cpp used in other sources kmp_uint32 __kmp_eq_4( kmp_uint32 value, kmp_uint32 checker) { return value == checker; Index: runtime/src/kmp_environment.c =================================================================== --- runtime/src/kmp_environment.c +++ runtime/src/kmp_environment.c @@ -1,595 +0,0 @@ -/* - * kmp_environment.c -- Handle environment variables OS-independently. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -/* - ------------------------------------------------------------------------------------------------ - We use GetEnvironmentVariable for Windows* OS instead of getenv because the act of - loading a DLL on Windows* OS makes any user-set environment variables (i.e. with putenv()) - unavailable. getenv() apparently gets a clean copy of the env variables as they existed - at the start of the run. - JH 12/23/2002 - ------------------------------------------------------------------------------------------------ - On Windows* OS, there are two environments (at least, see below): - - 1. Environment maintained by Windows* OS on IA-32 architecture. - Accessible through GetEnvironmentVariable(), - SetEnvironmentVariable(), and GetEnvironmentStrings(). - - 2. Environment maintained by C RTL. Accessible through getenv(), putenv(). - - putenv() function updates both C and Windows* OS on IA-32 architecture. getenv() function - search for variables in C RTL environment only. Windows* OS on IA-32 architecture functions work *only* - with Windows* OS on IA-32 architecture. - - Windows* OS on IA-32 architecture maintained by OS, so there is always only one Windows* OS on - IA-32 architecture per process. Changes in Windows* OS on IA-32 architecture are process-visible. - - C environment maintained by C RTL. Multiple copies of C RTL may be present in the process, and - each C RTL maintains its own environment. :-( - - Thus, proper way to work with environment on Windows* OS is: - - 1. Set variables with putenv() function -- both C and Windows* OS on - IA-32 architecture are being updated. Windows* OS on - IA-32 architecture may be considered as primary target, - while updating C RTL environment is a free bonus. - - 2. Get variables with GetEnvironmentVariable() -- getenv() does not - search Windows* OS on IA-32 architecture, and can not see variables - set with SetEnvironmentVariable(). - - 2007-04-05 -- lev - ------------------------------------------------------------------------------------------------ -*/ - -#include "kmp_environment.h" - -#include "kmp_os.h" // KMP_OS_*. -#include "kmp.h" // -#include "kmp_str.h" // __kmp_str_*(). -#include "kmp_i18n.h" - -#if KMP_OS_UNIX - #include // getenv, setenv, unsetenv. - #include // strlen, strcpy. - #if KMP_OS_DARWIN - #include - #define environ (*_NSGetEnviron()) - #else - extern char * * environ; - #endif -#elif KMP_OS_WINDOWS - #include // GetEnvironmentVariable, SetEnvironmentVariable, GetLastError. -#else - #error Unknown or unsupported OS. -#endif - - -// TODO: Eliminate direct memory allocations, use string operations instead. - -static inline -void * -allocate( - size_t size -) { - void * ptr = KMP_INTERNAL_MALLOC( size ); - if ( ptr == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - return ptr; -} // allocate - - -char * -__kmp_env_get( char const * name ) { - - char * result = NULL; - - #if KMP_OS_UNIX - char const * value = getenv( name ); - if ( value != NULL ) { - size_t len = KMP_STRLEN( value ) + 1; - result = (char *) KMP_INTERNAL_MALLOC( len ); - if ( result == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - KMP_STRNCPY_S( result, len, value, len ); - }; // if - #elif KMP_OS_WINDOWS - /* - We use GetEnvironmentVariable for Windows* OS instead of getenv because the act of - loading a DLL on Windows* OS makes any user-set environment variables (i.e. with putenv()) - unavailable. getenv() apparently gets a clean copy of the env variables as they existed - at the start of the run. - JH 12/23/2002 - */ - DWORD rc; - rc = GetEnvironmentVariable( name, NULL, 0 ); - if ( ! rc ) { - DWORD error = GetLastError(); - if ( error != ERROR_ENVVAR_NOT_FOUND ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantGetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - // Variable is not found, it's ok, just continue. - } else { - DWORD len = rc; - result = (char *) KMP_INTERNAL_MALLOC( len ); - if ( result == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - rc = GetEnvironmentVariable( name, result, len ); - if ( ! rc ) { - // GetEnvironmentVariable() may return 0 if variable is empty. - // In such a case GetLastError() returns ERROR_SUCCESS. - DWORD error = GetLastError(); - if ( error != ERROR_SUCCESS ) { - // Unexpected error. The variable should be in the environment, - // and buffer should be large enough. - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantGetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - KMP_INTERNAL_FREE( (void *) result ); - result = NULL; - }; // if - }; // if - }; // if - #else - #error Unknown or unsupported OS. - #endif - - return result; - -} // func __kmp_env_get - - -// TODO: Find and replace all regular free() with __kmp_env_free(). - -void -__kmp_env_free( char const * * value ) { - - KMP_DEBUG_ASSERT( value != NULL ); - KMP_INTERNAL_FREE( (void *) * value ); - * value = NULL; - -} // func __kmp_env_free - - - -int -__kmp_env_exists( char const * name ) { - - #if KMP_OS_UNIX - char const * value = getenv( name ); - return ( ( value == NULL ) ? ( 0 ) : ( 1 ) ); - #elif KMP_OS_WINDOWS - DWORD rc; - rc = GetEnvironmentVariable( name, NULL, 0 ); - if ( rc == 0 ) { - DWORD error = GetLastError(); - if ( error != ERROR_ENVVAR_NOT_FOUND ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantGetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - return 0; - }; // if - return 1; - #else - #error Unknown or unsupported OS. - #endif - -} // func __kmp_env_exists - - - -void -__kmp_env_set( char const * name, char const * value, int overwrite ) { - - #if KMP_OS_UNIX - int rc = setenv( name, value, overwrite ); - if ( rc != 0 ) { - // Dead code. I tried to put too many variables into Linux* OS - // environment on IA-32 architecture. When application consumes - // more than ~2.5 GB of memory, entire system feels bad. Sometimes - // application is killed (by OS?), sometimes system stops - // responding... But this error message never appears. --ln - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetEnvVar, name ), - KMP_HNT( NotEnoughMemory ), - __kmp_msg_null - ); - }; // if - #elif KMP_OS_WINDOWS - BOOL rc; - if ( ! overwrite ) { - rc = GetEnvironmentVariable( name, NULL, 0 ); - if ( rc ) { - // Variable exists, do not overwrite. - return; - }; // if - DWORD error = GetLastError(); - if ( error != ERROR_ENVVAR_NOT_FOUND ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantGetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - }; // if - rc = SetEnvironmentVariable( name, value ); - if ( ! rc ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - #else - #error Unknown or unsupported OS. - #endif - -} // func __kmp_env_set - - - -void -__kmp_env_unset( char const * name ) { - - #if KMP_OS_UNIX - unsetenv( name ); - #elif KMP_OS_WINDOWS - BOOL rc = SetEnvironmentVariable( name, NULL ); - if ( ! rc ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetEnvVar, name ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - #else - #error Unknown or unsupported OS. - #endif - -} // func __kmp_env_unset - -// ------------------------------------------------------------------------------------------------- - -/* - Intel OpenMP RTL string representation of environment: just a string of characters, variables - are separated with vertical bars, e. g.: - - "KMP_WARNINGS=0|KMP_AFFINITY=compact|" - - Empty variables are allowed and ignored: - - "||KMP_WARNINGS=1||" - -*/ - -static -void -___kmp_env_blk_parse_string( - kmp_env_blk_t * block, // M: Env block to fill. - char const * env // I: String to parse. -) { - - char const chr_delimiter = '|'; - char const str_delimiter[] = { chr_delimiter, 0 }; - - char * bulk = NULL; - kmp_env_var_t * vars = NULL; - int count = 0; // Number of used elements in vars array. - int delimiters = 0; // Number of delimiters in input string. - - // Copy original string, we will modify the copy. - bulk = __kmp_str_format( "%s", env ); - - // Loop thru all the vars in environment block. Count delimiters (maximum number of variables - // is number of delimiters plus one). - { - char const * ptr = bulk; - for ( ; ; ) { - ptr = strchr( ptr, chr_delimiter ); - if ( ptr == NULL ) { - break; - }; // if - ++ delimiters; - ptr += 1; - }; // forever - } - - // Allocate vars array. - vars = (kmp_env_var_t *) allocate( ( delimiters + 1 ) * sizeof( kmp_env_var_t ) ); - - // Loop thru all the variables. - { - char * var; // Pointer to variable (both name and value). - char * name; // Pointer to name of variable. - char * value; // Pointer to value. - char * buf; // Buffer for __kmp_str_token() function. - var = __kmp_str_token( bulk, str_delimiter, & buf ); // Get the first var. - while ( var != NULL ) { - // Save found variable in vars array. - __kmp_str_split( var, '=', & name, & value ); - KMP_DEBUG_ASSERT( count < delimiters + 1 ); - vars[ count ].name = name; - vars[ count ].value = value; - ++ count; - // Get the next var. - var = __kmp_str_token( NULL, str_delimiter, & buf ); - }; // while - } - - // Fill out result. - block->bulk = bulk; - block->vars = vars; - block->count = count; - -}; // ___kmp_env_blk_parse_string - - - -/* - Windows* OS (actually, DOS) environment block is a piece of memory with environment variables. Each - variable is terminated with zero byte, entire block is terminated with one extra zero byte, so - we have two zero bytes at the end of environment block, e. g.: - - "HOME=C:\\users\\lev\x00OS=Windows_NT\x00\x00" - - It is not clear how empty environment is represented. "\x00\x00"? -*/ - -#if KMP_OS_WINDOWS -static -void -___kmp_env_blk_parse_windows( - kmp_env_blk_t * block, // M: Env block to fill. - char const * env // I: Pointer to Windows* OS (DOS) environment block. -) { - - char * bulk = NULL; - kmp_env_var_t * vars = NULL; - int count = 0; // Number of used elements in vars array. - int size = 0; // Size of bulk. - - char * name; // Pointer to name of variable. - char * value; // Pointer to value. - - if ( env != NULL ) { - - // Loop thru all the vars in environment block. Count variables, find size of block. - { - char const * var; // Pointer to beginning of var. - int len; // Length of variable. - count = 0; - var = env; // The first variable starts and beginning of environment block. - len = KMP_STRLEN( var ); - while ( len != 0 ) { - ++ count; - size = size + len + 1; - var = var + len + 1; // Move pointer to the beginning of the next variable. - len = KMP_STRLEN( var ); - }; // while - size = size + 1; // Total size of env block, including terminating zero byte. - } - - // Copy original block to bulk, we will modify bulk, not original block. - bulk = (char *) allocate( size ); - KMP_MEMCPY_S( bulk, size, env, size ); - // Allocate vars array. - vars = (kmp_env_var_t *) allocate( count * sizeof( kmp_env_var_t ) ); - - // Loop thru all the vars, now in bulk. - { - char * var; // Pointer to beginning of var. - int len; // Length of variable. - count = 0; - var = bulk; - len = KMP_STRLEN( var ); - while ( len != 0 ) { - // Save variable in vars array. - __kmp_str_split( var, '=', & name, & value ); - vars[ count ].name = name; - vars[ count ].value = value; - ++ count; - // Get the next var. - var = var + len + 1; - len = KMP_STRLEN( var ); - }; // while - } - - }; // if - - // Fill out result. - block->bulk = bulk; - block->vars = vars; - block->count = count; - -}; // ___kmp_env_blk_parse_windows -#endif - - -/* - Unix environment block is a array of pointers to variables, last pointer in array is NULL: - - { "HOME=/home/lev", "TERM=xterm", NULL } -*/ - -static -void -___kmp_env_blk_parse_unix( - kmp_env_blk_t * block, // M: Env block to fill. - char * * env // I: Unix environment to parse. -) { - - char * bulk = NULL; - kmp_env_var_t * vars = NULL; - int count = 0; - int size = 0; // Size of bulk. - - // Count number of variables and length of required bulk. - { - count = 0; - size = 0; - while ( env[ count ] != NULL ) { - size += KMP_STRLEN( env[ count ] ) + 1; - ++ count; - }; // while - } - - // Allocate memory. - bulk = (char *) allocate( size ); - vars = (kmp_env_var_t *) allocate( count * sizeof( kmp_env_var_t ) ); - - // Loop thru all the vars. - { - char * var; // Pointer to beginning of var. - char * name; // Pointer to name of variable. - char * value; // Pointer to value. - int len; // Length of variable. - int i; - var = bulk; - for ( i = 0; i < count; ++ i ) { - // Copy variable to bulk. - len = KMP_STRLEN( env[ i ] ); - KMP_MEMCPY_S( var, size, env[ i ], len + 1 ); - // Save found variable in vars array. - __kmp_str_split( var, '=', & name, & value ); - vars[ i ].name = name; - vars[ i ].value = value; - // Move pointer. - var += len + 1; - }; // for - } - - // Fill out result. - block->bulk = bulk; - block->vars = vars; - block->count = count; - -}; // ___kmp_env_blk_parse_unix - - - -void -__kmp_env_blk_init( - kmp_env_blk_t * block, // M: Block to initialize. - char const * bulk // I: Initialization string, or NULL. -) { - - if ( bulk != NULL ) { - ___kmp_env_blk_parse_string( block, bulk ); - } else { - #if KMP_OS_UNIX - ___kmp_env_blk_parse_unix( block, environ ); - #elif KMP_OS_WINDOWS - { - char * mem = GetEnvironmentStrings(); - if ( mem == NULL ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantGetEnvironment ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - ___kmp_env_blk_parse_windows( block, mem ); - FreeEnvironmentStrings( mem ); - } - #else - #error Unknown or unsupported OS. - #endif - }; // if - -} // __kmp_env_blk_init - - - -static -int -___kmp_env_var_cmp( // Comparison function for qsort(). - kmp_env_var_t const * lhs, - kmp_env_var_t const * rhs -) { - return strcmp( lhs->name, rhs->name ); -} - -void -__kmp_env_blk_sort( - kmp_env_blk_t * block // M: Block of environment variables to sort. -) { - - qsort( - (void *) block->vars, - block->count, - sizeof( kmp_env_var_t ), - ( int ( * )( void const *, void const * ) ) & ___kmp_env_var_cmp - ); - -} // __kmp_env_block_sort - - - -void -__kmp_env_blk_free( - kmp_env_blk_t * block // M: Block of environment variables to free. -) { - - KMP_INTERNAL_FREE( (void *) block->vars ); - __kmp_str_free(&(block->bulk)); - - block->count = 0; - block->vars = NULL; - -} // __kmp_env_blk_free - - - -char const * // R: Value of variable or NULL if variable does not exist. -__kmp_env_blk_var( - kmp_env_blk_t * block, // I: Block of environment variables. - char const * name // I: Name of variable to find. -) { - - int i; - for ( i = 0; i < block->count; ++ i ) { - if ( strcmp( block->vars[ i ].name, name ) == 0 ) { - return block->vars[ i ].value; - }; // if - }; // for - return NULL; - -} // __kmp_env_block_var - - -// end of file // Index: runtime/src/kmp_environment.cpp =================================================================== --- runtime/src/kmp_environment.cpp +++ runtime/src/kmp_environment.cpp @@ -1,5 +1,5 @@ /* - * kmp_environment.c -- Handle environment variables OS-independently. + * kmp_environment.cpp -- Handle environment variables OS-independently. */ Index: runtime/src/kmp_error.c =================================================================== --- runtime/src/kmp_error.c +++ runtime/src/kmp_error.c @@ -1,523 +0,0 @@ -/* - * kmp_error.c -- KPTS functions for error checking at runtime - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_i18n.h" -#include "kmp_str.h" -#include "kmp_error.h" - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#define MIN_STACK 100 - - -static char const * cons_text_c[] = { - "(none)", - "\"parallel\"", - "work-sharing", /* this is not called "for" because of lowering of "sections" pragmas */ - "\"ordered\" work-sharing", /* this is not called "for ordered" because of lowering of "sections" pragmas */ - "\"sections\"", - "work-sharing", /* this is not called "single" because of lowering of "sections" pragmas */ - "\"taskq\"", - "\"taskq\"", - "\"taskq ordered\"", - "\"critical\"", - "\"ordered\"", /* in PARALLEL */ - "\"ordered\"", /* in PDO */ - "\"ordered\"", /* in TASKQ */ - "\"master\"", - "\"reduce\"", - "\"barrier\"" -}; - -#define get_src( ident ) ( (ident) == NULL ? NULL : (ident)->psource ) - -#define PUSH_MSG( ct, ident ) \ - "\tpushing on stack: %s (%s)\n", cons_text_c[ (ct) ], get_src( (ident) ) -#define POP_MSG( p ) \ - "\tpopping off stack: %s (%s)\n", \ - cons_text_c[ (p)->stack_data[ tos ].type ], \ - get_src( (p)->stack_data[ tos ].ident ) - -static int const cons_text_c_num = sizeof( cons_text_c ) / sizeof( char const * ); - -/* ------------------------------------------------------------------------ */ -/* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */ -/* ------------------------------------------------------------------------ */ - -static void -__kmp_check_null_func( void ) -{ - /* nothing to do */ -} - -static void -__kmp_expand_cons_stack( int gtid, struct cons_header *p ) -{ - int i; - struct cons_data *d; - - /* TODO for monitor perhaps? */ - if (gtid < 0) - __kmp_check_null_func(); - - KE_TRACE( 10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid() ) ); - - d = p->stack_data; - - p->stack_size = (p->stack_size * 2) + 100; - - /* TODO free the old data */ - p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (p->stack_size+1) ); - - for (i = p->stack_top; i >= 0; --i) - p->stack_data[i] = d[i]; - - /* NOTE: we do not free the old stack_data */ -} - -// NOTE: Function returns allocated memory, caller must free it! -static char const * -__kmp_pragma( - int ct, - ident_t const * ident -) { - char const * cons = NULL; // Construct name. - char * file = NULL; // File name. - char * func = NULL; // Function (routine) name. - char * line = NULL; // Line number. - kmp_str_buf_t buffer; - kmp_msg_t prgm; - __kmp_str_buf_init( & buffer ); - if ( 0 < ct && ct < cons_text_c_num ) { - cons = cons_text_c[ ct ]; - } else { - KMP_DEBUG_ASSERT( 0 ); - }; - if ( ident != NULL && ident->psource != NULL ) { - char * tail = NULL; - __kmp_str_buf_print( & buffer, "%s", ident->psource ); // Copy source to buffer. - // Split string in buffer to file, func, and line. - tail = buffer.str; - __kmp_str_split( tail, ';', NULL, & tail ); - __kmp_str_split( tail, ';', & file, & tail ); - __kmp_str_split( tail, ';', & func, & tail ); - __kmp_str_split( tail, ';', & line, & tail ); - }; // if - prgm = __kmp_msg_format( kmp_i18n_fmt_Pragma, cons, file, func, line ); - __kmp_str_buf_free( & buffer ); - return prgm.str; -} // __kmp_pragma - -/* ------------------------------------------------------------------------ */ -/* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */ -/* ------------------------------------------------------------------------ */ - - -void -__kmp_error_construct( - kmp_i18n_id_t id, // Message identifier. - enum cons_type ct, // Construct type. - ident_t const * ident // Construct ident. -) { - char const * construct = __kmp_pragma( ct, ident ); - __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct ), __kmp_msg_null ); - KMP_INTERNAL_FREE( (void *) construct ); -} - -void -__kmp_error_construct2( - kmp_i18n_id_t id, // Message identifier. - enum cons_type ct, // First construct type. - ident_t const * ident, // First construct ident. - struct cons_data const * cons // Second construct. -) { - char const * construct1 = __kmp_pragma( ct, ident ); - char const * construct2 = __kmp_pragma( cons->type, cons->ident ); - __kmp_msg( kmp_ms_fatal, __kmp_msg_format( id, construct1, construct2 ), __kmp_msg_null ); - KMP_INTERNAL_FREE( (void *) construct1 ); - KMP_INTERNAL_FREE( (void *) construct2 ); -} - - -struct cons_header * -__kmp_allocate_cons_stack( int gtid ) -{ - struct cons_header *p; - - /* TODO for monitor perhaps? */ - if ( gtid < 0 ) { - __kmp_check_null_func(); - }; // if - KE_TRACE( 10, ("allocate cons_stack (%d)\n", gtid ) ); - p = (struct cons_header *) __kmp_allocate( sizeof( struct cons_header ) ); - p->p_top = p->w_top = p->s_top = 0; - p->stack_data = (struct cons_data *) __kmp_allocate( sizeof( struct cons_data ) * (MIN_STACK+1) ); - p->stack_size = MIN_STACK; - p->stack_top = 0; - p->stack_data[ 0 ].type = ct_none; - p->stack_data[ 0 ].prev = 0; - p->stack_data[ 0 ].ident = NULL; - return p; -} - -void -__kmp_free_cons_stack( void * ptr ) { - struct cons_header * p = (struct cons_header *) ptr; - if ( p != NULL ) { - if ( p->stack_data != NULL ) { - __kmp_free( p->stack_data ); - p->stack_data = NULL; - }; // if - __kmp_free( p ); - }; // if -} - - -#if KMP_DEBUG -static void -dump_cons_stack( int gtid, struct cons_header * p ) { - int i; - int tos = p->stack_top; - kmp_str_buf_t buffer; - __kmp_str_buf_init( & buffer ); - __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" ); - __kmp_str_buf_print( & buffer, "Begin construct stack with %d items for thread %d\n", tos, gtid ); - __kmp_str_buf_print( & buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos, p->p_top, p->w_top, p->s_top ); - for ( i = tos; i > 0; i-- ) { - struct cons_data * c = & ( p->stack_data[ i ] ); - __kmp_str_buf_print( & buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i, cons_text_c[ c->type ], get_src( c->ident ), c->prev, c->name ); - }; // for i - __kmp_str_buf_print( & buffer, "End construct stack for thread %d\n", gtid ); - __kmp_str_buf_print( & buffer, "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n" ); - __kmp_debug_printf( "%s", buffer.str ); - __kmp_str_buf_free( & buffer ); -} -#endif - -void -__kmp_push_parallel( int gtid, ident_t const * ident ) -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - - KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons ); - KE_TRACE( 10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) ); - KE_TRACE( 100, ( PUSH_MSG( ct_parallel, ident ) ) ); - if ( p->stack_top >= p->stack_size ) { - __kmp_expand_cons_stack( gtid, p ); - }; // if - tos = ++p->stack_top; - p->stack_data[ tos ].type = ct_parallel; - p->stack_data[ tos ].prev = p->p_top; - p->stack_data[ tos ].ident = ident; - p->stack_data[ tos ].name = NULL; - p->p_top = tos; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); -} - -void -__kmp_check_workshare( int gtid, enum cons_type ct, ident_t const * ident ) -{ - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - - KMP_DEBUG_ASSERT( __kmp_threads[ gtid ]-> th.th_cons ); - KE_TRACE( 10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); - - - if ( p->stack_top >= p->stack_size ) { - __kmp_expand_cons_stack( gtid, p ); - }; // if - if ( p->w_top > p->p_top && - !(IS_CONS_TYPE_TASKQ(p->stack_data[ p->w_top ].type) && IS_CONS_TYPE_TASKQ(ct))) { - // We are already in a WORKSHARE construct for this PARALLEL region. - __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->w_top ] ); - }; // if - if ( p->s_top > p->p_top ) { - // We are already in a SYNC construct for this PARALLEL region. - __kmp_error_construct2( kmp_i18n_msg_CnsInvalidNesting, ct, ident, & p->stack_data[ p->s_top ] ); - }; // if -} - -void -__kmp_push_workshare( int gtid, enum cons_type ct, ident_t const * ident ) -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - KE_TRACE( 10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); - __kmp_check_workshare( gtid, ct, ident ); - KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); - tos = ++p->stack_top; - p->stack_data[ tos ].type = ct; - p->stack_data[ tos ].prev = p->w_top; - p->stack_data[ tos ].ident = ident; - p->stack_data[ tos ].name = NULL; - p->w_top = tos; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); -} - -void -#if KMP_USE_DYNAMIC_LOCK -__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) -#else -__kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) -#endif -{ - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - - KE_TRACE( 10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid() ) ); - - if (p->stack_top >= p->stack_size) - __kmp_expand_cons_stack( gtid, p ); - - if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo || ct == ct_ordered_in_taskq ) { - if (p->w_top <= p->p_top) { - /* we are not in a worksharing construct */ - #ifdef BUILD_PARALLEL_ORDERED - /* do not report error messages for PARALLEL ORDERED */ - KMP_ASSERT( ct == ct_ordered_in_parallel ); - #else - __kmp_error_construct( kmp_i18n_msg_CnsBoundToWorksharing, ct, ident ); - #endif /* BUILD_PARALLEL_ORDERED */ - } else { - /* inside a WORKSHARING construct for this PARALLEL region */ - if (!IS_CONS_TYPE_ORDERED(p->stack_data[ p->w_top ].type)) { - if (p->stack_data[ p->w_top ].type == ct_taskq) { - __kmp_error_construct2( - kmp_i18n_msg_CnsNotInTaskConstruct, - ct, ident, - & p->stack_data[ p->w_top ] - ); - } else { - __kmp_error_construct2( - kmp_i18n_msg_CnsNoOrderedClause, - ct, ident, - & p->stack_data[ p->w_top ] - ); - } - } - } - if (p->s_top > p->p_top && p->s_top > p->w_top) { - /* inside a sync construct which is inside a worksharing construct */ - int index = p->s_top; - enum cons_type stack_type; - - stack_type = p->stack_data[ index ].type; - - if (stack_type == ct_critical || - ( ( stack_type == ct_ordered_in_parallel || - stack_type == ct_ordered_in_pdo || - stack_type == ct_ordered_in_taskq ) && /* C doesn't allow named ordered; ordered in ordered gets error */ - p->stack_data[ index ].ident != NULL && - (p->stack_data[ index ].ident->flags & KMP_IDENT_KMPC ))) { - /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */ - __kmp_error_construct2( - kmp_i18n_msg_CnsInvalidNesting, - ct, ident, - & p->stack_data[ index ] - ); - } - } - } else if ( ct == ct_critical ) { -#if KMP_USE_DYNAMIC_LOCK - if ( lck != NULL && __kmp_get_user_lock_owner( lck, seq ) == gtid ) { /* this same thread already has lock for this critical section */ -#else - if ( lck != NULL && __kmp_get_user_lock_owner( lck ) == gtid ) { /* this same thread already has lock for this critical section */ -#endif - int index = p->s_top; - struct cons_data cons = { NULL, ct_critical, 0, NULL }; - /* walk up construct stack and try to find critical with matching name */ - while ( index != 0 && p->stack_data[ index ].name != lck ) { - index = p->stack_data[ index ].prev; - } - if ( index != 0 ) { - /* found match on the stack (may not always because of interleaved critical for Fortran) */ - cons = p->stack_data[ index ]; - } - /* we are in CRITICAL which is inside a CRITICAL construct of the same name */ - __kmp_error_construct2( kmp_i18n_msg_CnsNestingSameName, ct, ident, & cons ); - } - } else if ( ct == ct_master || ct == ct_reduce ) { - if (p->w_top > p->p_top) { - /* inside a WORKSHARING construct for this PARALLEL region */ - __kmp_error_construct2( - kmp_i18n_msg_CnsInvalidNesting, - ct, ident, - & p->stack_data[ p->w_top ] - ); - } - if (ct == ct_reduce && p->s_top > p->p_top) { - /* inside a another SYNC construct for this PARALLEL region */ - __kmp_error_construct2( - kmp_i18n_msg_CnsInvalidNesting, - ct, ident, - & p->stack_data[ p->s_top ] - ); - }; // if - }; // if -} - -void -#if KMP_USE_DYNAMIC_LOCK -__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq ) -#else -__kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck ) -#endif -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - - KMP_ASSERT( gtid == __kmp_get_gtid() ); - KE_TRACE( 10, ("__kmp_push_sync (gtid=%d)\n", gtid ) ); -#if KMP_USE_DYNAMIC_LOCK - __kmp_check_sync( gtid, ct, ident, lck, seq ); -#else - __kmp_check_sync( gtid, ct, ident, lck ); -#endif - KE_TRACE( 100, ( PUSH_MSG( ct, ident ) ) ); - tos = ++ p->stack_top; - p->stack_data[ tos ].type = ct; - p->stack_data[ tos ].prev = p->s_top; - p->stack_data[ tos ].ident = ident; - p->stack_data[ tos ].name = lck; - p->s_top = tos; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); -} - -/* ------------------------------------------------------------------------ */ - -void -__kmp_pop_parallel( int gtid, ident_t const * ident ) -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - tos = p->stack_top; - KE_TRACE( 10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid() ) ); - if ( tos == 0 || p->p_top == 0 ) { - __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident ); - } - if ( tos != p->p_top || p->stack_data[ tos ].type != ct_parallel ) { - __kmp_error_construct2( - kmp_i18n_msg_CnsExpectedEnd, - ct_parallel, ident, - & p->stack_data[ tos ] - ); - } - KE_TRACE( 100, ( POP_MSG( p ) ) ); - p->p_top = p->stack_data[ tos ].prev; - p->stack_data[ tos ].type = ct_none; - p->stack_data[ tos ].ident = NULL; - p->stack_top = tos - 1; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); -} - -enum cons_type -__kmp_pop_workshare( int gtid, enum cons_type ct, ident_t const * ident ) -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - - tos = p->stack_top; - KE_TRACE( 10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid() ) ); - if ( tos == 0 || p->w_top == 0 ) { - __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); - } - - if ( tos != p->w_top || - ( p->stack_data[ tos ].type != ct && - /* below are two exceptions to the rule that construct types must match */ - ! ( p->stack_data[ tos ].type == ct_pdo_ordered && ct == ct_pdo ) && - ! ( p->stack_data[ tos ].type == ct_task_ordered && ct == ct_task ) - ) - ) { - __kmp_check_null_func(); - __kmp_error_construct2( - kmp_i18n_msg_CnsExpectedEnd, - ct, ident, - & p->stack_data[ tos ] - ); - } - KE_TRACE( 100, ( POP_MSG( p ) ) ); - p->w_top = p->stack_data[ tos ].prev; - p->stack_data[ tos ].type = ct_none; - p->stack_data[ tos ].ident = NULL; - p->stack_top = tos - 1; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); - return p->stack_data[ p->w_top ].type; -} - -void -__kmp_pop_sync( int gtid, enum cons_type ct, ident_t const * ident ) -{ - int tos; - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - tos = p->stack_top; - KE_TRACE( 10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid() ) ); - if ( tos == 0 || p->s_top == 0 ) { - __kmp_error_construct( kmp_i18n_msg_CnsDetectedEnd, ct, ident ); - }; - if ( tos != p->s_top || p->stack_data[ tos ].type != ct ) { - __kmp_check_null_func(); - __kmp_error_construct2( - kmp_i18n_msg_CnsExpectedEnd, - ct, ident, - & p->stack_data[ tos ] - ); - }; - if ( gtid < 0 ) { - __kmp_check_null_func(); - }; - KE_TRACE( 100, ( POP_MSG( p ) ) ); - p->s_top = p->stack_data[ tos ].prev; - p->stack_data[ tos ].type = ct_none; - p->stack_data[ tos ].ident = NULL; - p->stack_top = tos - 1; - KE_DUMP( 1000, dump_cons_stack( gtid, p ) ); -} - -/* ------------------------------------------------------------------------ */ - -void -__kmp_check_barrier( int gtid, enum cons_type ct, ident_t const * ident ) -{ - struct cons_header *p = __kmp_threads[ gtid ]->th.th_cons; - KE_TRACE( 10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid, __kmp_get_gtid() ) ); - if ( ident != 0 ) { - __kmp_check_null_func(); - } - if ( p->w_top > p->p_top ) { - /* we are already in a WORKSHARING construct for this PARALLEL region */ - __kmp_error_construct2( - kmp_i18n_msg_CnsInvalidNesting, - ct, ident, - & p->stack_data[ p->w_top ] - ); - } - if (p->s_top > p->p_top) { - /* we are already in a SYNC construct for this PARALLEL region */ - __kmp_error_construct2( - kmp_i18n_msg_CnsInvalidNesting, - ct, ident, - & p->stack_data[ p->s_top ] - ); - } -} - -/* ------------------------------------------------------------------------ */ - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ Index: runtime/src/kmp_error.cpp =================================================================== --- runtime/src/kmp_error.cpp +++ runtime/src/kmp_error.cpp @@ -1,5 +1,5 @@ /* - * kmp_error.c -- KPTS functions for error checking at runtime + * kmp_error.cpp -- KPTS functions for error checking at runtime */ Index: runtime/src/kmp_ftn_cdecl.c =================================================================== --- runtime/src/kmp_ftn_cdecl.c +++ runtime/src/kmp_ftn_cdecl.c @@ -1,36 +0,0 @@ -/* - * kmp_ftn_cdecl.c -- Fortran __cdecl linkage support for OpenMP. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_affinity.h" - -#if KMP_OS_WINDOWS -# if defined KMP_WIN_CDECL || !defined KMP_DYNAMIC_LIB -# define KMP_FTN_ENTRIES KMP_FTN_UPPER -# endif -#elif KMP_OS_UNIX -# define KMP_FTN_ENTRIES KMP_FTN_PLAIN -#endif - -// Note: This string is not printed when KMP_VERSION=1. -char const __kmp_version_ftncdecl[] = KMP_VERSION_PREFIX "Fortran __cdecl OMP support: " -#ifdef KMP_FTN_ENTRIES - "yes"; -# define FTN_STDCALL /* no stdcall */ -# include "kmp_ftn_os.h" -# include "kmp_ftn_entry.h" -#else - "no"; -#endif /* KMP_FTN_ENTRIES */ Index: runtime/src/kmp_ftn_cdecl.cpp =================================================================== --- runtime/src/kmp_ftn_cdecl.cpp +++ runtime/src/kmp_ftn_cdecl.cpp @@ -1,5 +1,5 @@ /* - * kmp_ftn_cdecl.c -- Fortran __cdecl linkage support for OpenMP. + * kmp_ftn_cdecl.cpp -- Fortran __cdecl linkage support for OpenMP. */ Index: runtime/src/kmp_ftn_extra.c =================================================================== --- runtime/src/kmp_ftn_extra.c +++ runtime/src/kmp_ftn_extra.c @@ -1,34 +0,0 @@ -/* - * kmp_ftn_extra.c -- Fortran 'extra' linkage support for OpenMP. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_affinity.h" - -#if KMP_OS_WINDOWS -# define KMP_FTN_ENTRIES KMP_FTN_PLAIN -#elif KMP_OS_UNIX -# define KMP_FTN_ENTRIES KMP_FTN_APPEND -#endif - -// Note: This string is not printed when KMP_VERSION=1. -char const __kmp_version_ftnextra[] = KMP_VERSION_PREFIX "Fortran \"extra\" OMP support: " -#ifdef KMP_FTN_ENTRIES - "yes"; -# define FTN_STDCALL /* nothing to do */ -# include "kmp_ftn_os.h" -# include "kmp_ftn_entry.h" -#else - "no"; -#endif /* KMP_FTN_ENTRIES */ Index: runtime/src/kmp_ftn_extra.cpp =================================================================== --- runtime/src/kmp_ftn_extra.cpp +++ runtime/src/kmp_ftn_extra.cpp @@ -1,5 +1,5 @@ /* - * kmp_ftn_extra.c -- Fortran 'extra' linkage support for OpenMP. + * kmp_ftn_extra.cpp -- Fortran 'extra' linkage support for OpenMP. */ Index: runtime/src/kmp_ftn_stdcall.c =================================================================== --- runtime/src/kmp_ftn_stdcall.c +++ runtime/src/kmp_ftn_stdcall.c @@ -1,35 +0,0 @@ -/* - * kmp_ftn_stdcall.c -- Fortran __stdcall linkage support for OpenMP. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" - -// Note: This string is not printed when KMP_VERSION=1. -char const __kmp_version_ftnstdcall[] = KMP_VERSION_PREFIX "Fortran __stdcall OMP support: " -#ifdef USE_FTN_STDCALL - "yes"; -#else - "no"; -#endif - -#ifdef USE_FTN_STDCALL - -#define FTN_STDCALL KMP_STDCALL -#define KMP_FTN_ENTRIES USE_FTN_STDCALL - -#include "kmp_ftn_os.h" -#include "kmp_ftn_entry.h" - -#endif /* USE_FTN_STDCALL */ - Index: runtime/src/kmp_ftn_stdcall.cpp =================================================================== --- runtime/src/kmp_ftn_stdcall.cpp +++ runtime/src/kmp_ftn_stdcall.cpp @@ -1,5 +1,5 @@ /* - * kmp_ftn_stdcall.c -- Fortran __stdcall linkage support for OpenMP. + * kmp_ftn_stdcall.cpp -- Fortran __stdcall linkage support for OpenMP. */ Index: runtime/src/kmp_global.c =================================================================== --- runtime/src/kmp_global.c +++ runtime/src/kmp_global.c @@ -1,497 +0,0 @@ -/* - * kmp_global.c -- KPTS global variables for runtime support library - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_affinity.h" - -kmp_key_t __kmp_gtid_threadprivate_key; - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -kmp_cpuinfo_t __kmp_cpuinfo = { 0 }; // Not initialized -#endif - -#if KMP_STATS_ENABLED -#include "kmp_stats.h" -// lock for modifying the global __kmp_stats_list -kmp_tas_lock_t __kmp_stats_lock; - -// global list of per thread stats, the head is a sentinel node which accumulates all stats produced before __kmp_create_worker is called. -kmp_stats_list* __kmp_stats_list; - -// thread local pointer to stats node within list -__thread kmp_stats_list* __kmp_stats_thread_ptr = NULL; - -// gives reference tick for all events (considered the 0 tick) -tsc_tick_count __kmp_stats_start_time; -#endif - -/* ----------------------------------------------------- */ -/* INITIALIZATION VARIABLES */ -/* they are syncronized to write during init, but read anytime */ -volatile int __kmp_init_serial = FALSE; -volatile int __kmp_init_gtid = FALSE; -volatile int __kmp_init_common = FALSE; -volatile int __kmp_init_middle = FALSE; -volatile int __kmp_init_parallel = FALSE; -#if KMP_USE_MONITOR -volatile int __kmp_init_monitor = 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ -#endif -volatile int __kmp_init_user_locks = FALSE; - -/* list of address of allocated caches for commons */ -kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; - -int __kmp_init_counter = 0; -int __kmp_root_counter = 0; -int __kmp_version = 0; - -volatile kmp_uint32 __kmp_team_counter = 0; -volatile kmp_uint32 __kmp_task_counter = 0; - -unsigned int __kmp_init_wait = KMP_DEFAULT_INIT_WAIT; /* initial number of spin-tests */ -unsigned int __kmp_next_wait = KMP_DEFAULT_NEXT_WAIT; /* susequent number of spin-tests */ - -size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; -#if KMP_USE_MONITOR -size_t __kmp_monitor_stksize = 0; // auto adjust -#endif -size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET; -int __kmp_stkpadding = KMP_MIN_STKPADDING; - -size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR; - -/* Barrier method defaults, settings, and strings */ -/* branch factor = 2^branch_bits (only relevant for tree and hyper barrier types) */ -#if KMP_ARCH_X86_64 -kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ -kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* hyper2: C78980 */ -#else -kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ -kmp_uint32 __kmp_barrier_release_bb_dflt = 2; /* branch_factor = 4 */ /* communication in core for MIC */ -#endif // KMP_ARCH_X86_64 -#if KMP_ARCH_X86_64 -kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ -kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar; /* hyper2: C78980 */ -#else -kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_linear_bar; -kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_linear_bar; -#endif -kmp_uint32 __kmp_barrier_gather_branch_bits [ bs_last_barrier ] = { 0 }; -kmp_uint32 __kmp_barrier_release_branch_bits [ bs_last_barrier ] = { 0 }; -kmp_bar_pat_e __kmp_barrier_gather_pattern [ bs_last_barrier ] = { bp_linear_bar }; -kmp_bar_pat_e __kmp_barrier_release_pattern [ bs_last_barrier ] = { bp_linear_bar }; -char const *__kmp_barrier_branch_bit_env_name [ bs_last_barrier ] = - { "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER" - #if KMP_FAST_REDUCTION_BARRIER - , "KMP_REDUCTION_BARRIER" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_pattern_env_name [ bs_last_barrier ] = - { "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN" - #if KMP_FAST_REDUCTION_BARRIER - , "KMP_REDUCTION_BARRIER_PATTERN" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_type_name [ bs_last_barrier ] = - { "plain", "forkjoin" - #if KMP_FAST_REDUCTION_BARRIER - , "reduction" - #endif // KMP_FAST_REDUCTION_BARRIER - }; -char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear","tree","hyper","hierarchical"}; - -int __kmp_allThreadsSpecified = 0; -size_t __kmp_align_alloc = CACHE_LINE; - - -int __kmp_generate_warnings = kmp_warnings_low; -int __kmp_reserve_warn = 0; -int __kmp_xproc = 0; -int __kmp_avail_proc = 0; -size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE; -int __kmp_sys_max_nth = KMP_MAX_NTH; -int __kmp_max_nth = 0; -int __kmp_threads_capacity = 0; -int __kmp_dflt_team_nth = 0; -int __kmp_dflt_team_nth_ub = 0; -int __kmp_tp_capacity = 0; -int __kmp_tp_cached = 0; -int __kmp_dflt_nested = FALSE; -int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; -int __kmp_dflt_max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT; /* max_active_levels limit */ -#if KMP_NESTED_HOT_TEAMS -int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ - /* 1 - keep extra threads when reduced */ -int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */ -#endif -enum library_type __kmp_library = library_none; -enum sched_type __kmp_sched = kmp_sch_default; /* scheduling method for runtime scheduling */ -enum sched_type __kmp_static = kmp_sch_static_greedy; /* default static scheduling method */ -enum sched_type __kmp_guided = kmp_sch_guided_iterative_chunked; /* default guided scheduling method */ -enum sched_type __kmp_auto = kmp_sch_guided_analytical_chunked; /* default auto scheduling method */ -int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; -#if KMP_USE_MONITOR -int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS; -int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( KMP_DEFAULT_BLOCKTIME, KMP_MIN_MONITOR_WAKEUPS ); -#endif -#ifdef KMP_ADJUST_BLOCKTIME -int __kmp_zero_bt = FALSE; -#endif /* KMP_ADJUST_BLOCKTIME */ -#ifdef KMP_DFLT_NTH_CORES -int __kmp_ncores = 0; -#endif -int __kmp_chunk = 0; -int __kmp_abort_delay = 0; -#if KMP_OS_LINUX && defined(KMP_TDATA_GTID) -int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */ -int __kmp_adjust_gtid_mode = FALSE; -#elif KMP_OS_WINDOWS -int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */ -int __kmp_adjust_gtid_mode = FALSE; -#else -int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */ -int __kmp_adjust_gtid_mode = TRUE; -#endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */ -#ifdef KMP_TDATA_GTID -#if KMP_OS_WINDOWS -__declspec(thread) int __kmp_gtid = KMP_GTID_DNE; -#else -__thread int __kmp_gtid = KMP_GTID_DNE; -#endif /* KMP_OS_WINDOWS - workaround because Intel(R) Many Integrated Core compiler 20110316 doesn't accept __declspec */ -#endif /* KMP_TDATA_GTID */ -int __kmp_tls_gtid_min = INT_MAX; -int __kmp_foreign_tp = TRUE; -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -int __kmp_inherit_fp_control = TRUE; -kmp_int16 __kmp_init_x87_fpu_control_word = 0; -kmp_uint32 __kmp_init_mxcsr = 0; -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -#ifdef USE_LOAD_BALANCE -double __kmp_load_balance_interval = 1.0; -#endif /* USE_LOAD_BALANCE */ - -kmp_nested_nthreads_t __kmp_nested_nth = { NULL, 0, 0 }; - -#if KMP_USE_ADAPTIVE_LOCKS - -kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = { 1, 1024 }; // TODO: tune it! - -#if KMP_DEBUG_ADAPTIVE_LOCKS -char * __kmp_speculative_statsfile = "-"; -#endif - -#endif // KMP_USE_ADAPTIVE_LOCKS - -#if OMP_40_ENABLED -int __kmp_display_env = FALSE; -int __kmp_display_env_verbose = FALSE; -int __kmp_omp_cancellation = FALSE; -#endif - -/* map OMP 3.0 schedule types with our internal schedule types */ -enum sched_type __kmp_sch_map[ kmp_sched_upper - kmp_sched_lower_ext + kmp_sched_upper_std - kmp_sched_lower - 2 ] = { - kmp_sch_static_chunked, // ==> kmp_sched_static = 1 - kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2 - kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3 - kmp_sch_auto, // ==> kmp_sched_auto = 4 - kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101 - // will likely not used, introduced here just to debug the code - // of public intel extension schedules -}; - -#if KMP_OS_LINUX -enum clock_function_type __kmp_clock_function; -int __kmp_clock_function_param; -#endif /* KMP_OS_LINUX */ - -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) -enum mic_type __kmp_mic_type = non_mic; -#endif - -#if KMP_AFFINITY_SUPPORTED - -KMPAffinity* __kmp_affinity_dispatch = NULL; - -# if KMP_USE_HWLOC -int __kmp_hwloc_error = FALSE; -hwloc_topology_t __kmp_hwloc_topology = NULL; -# endif - -# if KMP_OS_WINDOWS -# if KMP_GROUP_AFFINITY -int __kmp_num_proc_groups = 1; -# endif /* KMP_GROUP_AFFINITY */ -kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL; -kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL; -kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL; -kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL; -# endif /* KMP_OS_WINDOWS */ - -size_t __kmp_affin_mask_size = 0; -enum affinity_type __kmp_affinity_type = affinity_default; -enum affinity_gran __kmp_affinity_gran = affinity_gran_default; -int __kmp_affinity_gran_levels = -1; -int __kmp_affinity_dups = TRUE; -enum affinity_top_method __kmp_affinity_top_method = affinity_top_method_default; -int __kmp_affinity_compact = 0; -int __kmp_affinity_offset = 0; -int __kmp_affinity_verbose = FALSE; -int __kmp_affinity_warnings = TRUE; -int __kmp_affinity_respect_mask = affinity_respect_mask_default; -char * __kmp_affinity_proclist = NULL; -kmp_affin_mask_t *__kmp_affinity_masks = NULL; -unsigned __kmp_affinity_num_masks = 0; - -char const * __kmp_cpuinfo_file = NULL; - -#endif /* KMP_AFFINITY_SUPPORTED */ - -#if OMP_40_ENABLED -kmp_nested_proc_bind_t __kmp_nested_proc_bind = { NULL, 0, 0 }; -int __kmp_affinity_num_places = 0; -#endif - -int __kmp_place_num_sockets = 0; -int __kmp_place_socket_offset = 0; -int __kmp_place_num_cores = 0; -int __kmp_place_core_offset = 0; -int __kmp_place_num_threads_per_core = 0; - -#if OMP_40_ENABLED -kmp_int32 __kmp_default_device = 0; -#endif - -kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams; -#if OMP_45_ENABLED -kmp_int32 __kmp_max_task_priority = 0; -#endif - -/* This check ensures that the compiler is passing the correct data type - * for the flags formal parameter of the function kmpc_omp_task_alloc(). - * If the type is not a 4-byte type, then give an error message about - * a non-positive length array pointing here. If that happens, the - * kmp_tasking_flags_t structure must be redefined to have exactly 32 bits. - */ -KMP_BUILD_ASSERT( sizeof(kmp_tasking_flags_t) == 4 ); - -kmp_int32 __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */ - -#ifdef DEBUG_SUSPEND -int __kmp_suspend_count = 0; -#endif - -int __kmp_settings = FALSE; -int __kmp_duplicate_library_ok = 0; -#if USE_ITT_BUILD -int __kmp_forkjoin_frames = 1; -int __kmp_forkjoin_frames_mode = 3; -#endif -PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = reduction_method_not_defined; -int __kmp_determ_red = FALSE; - -#ifdef KMP_DEBUG -int kmp_a_debug = 0; -int kmp_b_debug = 0; -int kmp_c_debug = 0; -int kmp_d_debug = 0; -int kmp_e_debug = 0; -int kmp_f_debug = 0; -int kmp_diag = 0; -#endif - -/* For debug information logging using rotating buffer */ -int __kmp_debug_buf = FALSE; /* TRUE means use buffer, FALSE means print to stderr */ -int __kmp_debug_buf_lines = KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */ -int __kmp_debug_buf_chars = KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */ -int __kmp_debug_buf_atomic = FALSE; /* TRUE means use atomic update of buffer entry pointer */ - -char *__kmp_debug_buffer = NULL; /* Debug buffer itself */ -int __kmp_debug_count = 0; /* Counter for number of lines printed in buffer so far */ -int __kmp_debug_buf_warn_chars = 0; /* Keep track of char increase recommended in warnings */ -/* end rotating debug buffer */ - -#ifdef KMP_DEBUG -int __kmp_par_range; /* +1 => only go par for constructs in range */ - /* -1 => only go par for constructs outside range */ -char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = { '\0' }; -char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = { '\0' }; -int __kmp_par_range_lb = 0; -int __kmp_par_range_ub = INT_MAX; -#endif /* KMP_DEBUG */ - -/* For printing out dynamic storage map for threads and teams */ -int __kmp_storage_map = FALSE; /* True means print storage map for threads and teams */ -int __kmp_storage_map_verbose = FALSE; /* True means storage map includes placement info */ -int __kmp_storage_map_verbose_specified = FALSE; -/* Initialize the library data structures when we fork a child process, defaults to TRUE */ -int __kmp_need_register_atfork = TRUE; /* At initialization, call pthread_atfork to install fork handler */ -int __kmp_need_register_atfork_specified = TRUE; - -int __kmp_env_chunk = FALSE; /* KMP_CHUNK specified? */ -int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */ -int __kmp_env_omp_stksize = FALSE; /* OMP_STACKSIZE specified? */ -int __kmp_env_all_threads = FALSE;/* KMP_ALL_THREADS or KMP_MAX_THREADS specified? */ -int __kmp_env_omp_all_threads = FALSE;/* OMP_THREAD_LIMIT specified? */ -int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ -int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ -int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ - -kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT; -kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT; - -#if KMP_USE_MONITOR -kmp_uint32 __kmp_yielding_on = 1; -#if KMP_OS_CNK -kmp_uint32 __kmp_yield_cycle = 0; -#else -kmp_uint32 __kmp_yield_cycle = 1; /* Yield-cycle is on by default */ -#endif -kmp_int32 __kmp_yield_on_count = 10; /* By default, yielding is on for 10 monitor periods. */ -kmp_int32 __kmp_yield_off_count = 1; /* By default, yielding is off for 1 monitor periods. */ -#endif -/* ----------------------------------------------------- */ - - -/* ------------------------------------------------------ */ -/* STATE mostly syncronized with global lock */ -/* data written to rarely by masters, read often by workers */ -/* - * SHALL WE EDIT THE COMMENT BELOW IN SOME WAY? - * TODO: None of this global padding stuff works consistently because - * the order of declaration is not necessarily correlated to storage order. - * To fix this, all the important globals must be put in a big structure - * instead. - */ -KMP_ALIGN_CACHE - kmp_info_t **__kmp_threads = NULL; - kmp_root_t **__kmp_root = NULL; - -/* data read/written to often by masters */ -KMP_ALIGN_CACHE -volatile int __kmp_nth = 0; -volatile int __kmp_all_nth = 0; -int __kmp_thread_pool_nth = 0; -volatile kmp_info_t *__kmp_thread_pool = NULL; -volatile kmp_team_t *__kmp_team_pool = NULL; - -KMP_ALIGN_CACHE -volatile int __kmp_thread_pool_active_nth = 0; - -/* ------------------------------------------------- - * GLOBAL/ROOT STATE */ -KMP_ALIGN_CACHE -kmp_global_t __kmp_global = {{ 0 }}; - -/* ----------------------------------------------- */ -/* GLOBAL SYNCHRONIZATION LOCKS */ -/* TODO verify the need for these locks and if they need to be global */ - -#if KMP_USE_INTERNODE_ALIGNMENT -/* Multinode systems have larger cache line granularity which can cause - * false sharing if the alignment is not large enough for these locks */ -KMP_ALIGN_CACHE_INTERNODE - -kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ -KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ -KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ -#if KMP_USE_MONITOR -KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ -#endif -KMP_ALIGN_CACHE_INTERNODE -kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ - -KMP_ALIGN_CACHE_INTERNODE -kmp_lock_t __kmp_global_lock; /* Control OS/global access */ -KMP_ALIGN_CACHE_INTERNODE -kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ -KMP_ALIGN_CACHE_INTERNODE -kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ -#else -KMP_ALIGN_CACHE - -kmp_bootstrap_lock_t __kmp_initz_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_initz_lock ); /* Control initializations */ -kmp_bootstrap_lock_t __kmp_forkjoin_lock; /* control fork/join access */ -kmp_bootstrap_lock_t __kmp_exit_lock; /* exit() is not always thread-safe */ -#if KMP_USE_MONITOR -kmp_bootstrap_lock_t __kmp_monitor_lock; /* control monitor thread creation */ -#endif -kmp_bootstrap_lock_t __kmp_tp_cached_lock; /* used for the hack to allow threadprivate cache and __kmp_threads expansion to co-exist */ - -KMP_ALIGN(128) -kmp_lock_t __kmp_global_lock; /* Control OS/global access */ -KMP_ALIGN(128) -kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ -KMP_ALIGN(128) -kmp_lock_t __kmp_debug_lock; /* Control I/O access for KMP_DEBUG */ -#endif - -/* ----------------------------------------------- */ - -#if KMP_HANDLE_SIGNALS - /* - Signal handling is disabled by default, because it confuses users: In case of sigsegv - (or other trouble) in user code signal handler catches the signal, which then "appears" in - the monitor thread (when the monitor executes raise() function). Users see signal in the - monitor thread and blame OpenMP RTL. - - Grant said signal handling required on some older OSes (Irix?) supported by KAI, because - bad applications hung but not aborted. Currently it is not a problem for Linux* OS, OS X* and - Windows* OS. - - Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm putting - the default back for now to see if that fixes hangs on those machines. - - 2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of stack backtrace - when program is aborting, but the code is not signal-safe. When multiple signals raised at - the same time (which occurs in dynamic negative tests because all the worker threads detects - the same error), Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided - by Steve R., and will be available soon. - */ - int __kmp_handle_signals = FALSE; -#endif - -/* ----------------------------------------------- */ -#ifdef BUILD_TV -kmp_key_t __kmp_tv_key = 0; -#endif - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#ifdef DEBUG_SUSPEND -int -get_suspend_count_( void ) { - int count = __kmp_suspend_count; - __kmp_suspend_count = 0; - return count; -} -void -set_suspend_count_( int * value ) { - __kmp_suspend_count = *value; -} -#endif - -// Symbols for MS mutual detection. -int _You_must_link_with_exactly_one_OpenMP_library = 1; -int _You_must_link_with_Intel_OpenMP_library = 1; -#if KMP_OS_WINDOWS && ( KMP_VERSION_MAJOR > 4 ) - int _You_must_link_with_Microsoft_OpenMP_library = 1; -#endif - -// end of file // Index: runtime/src/kmp_global.cpp =================================================================== --- runtime/src/kmp_global.cpp +++ runtime/src/kmp_global.cpp @@ -1,5 +1,5 @@ /* - * kmp_global.c -- KPTS global variables for runtime support library + * kmp_global.cpp -- KPTS global variables for runtime support library */ Index: runtime/src/kmp_gsupport.c =================================================================== --- runtime/src/kmp_gsupport.c +++ runtime/src/kmp_gsupport.c @@ -1,1621 +0,0 @@ -/* - * kmp_gsupport.c - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_atomic.h" - -#if OMPT_SUPPORT -#include "ompt-specific.h" -#endif - -#ifdef __cplusplus - extern "C" { -#endif // __cplusplus - -#define MKLOC(loc,routine) \ - static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" }; - -#include "kmp_ftn_os.h" - -void -xexpand(KMP_API_NAME_GOMP_BARRIER)(void) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_barrier"); - KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid)); -#if OMPT_SUPPORT && OMPT_TRACE - ompt_frame_t * ompt_frame; - if (ompt_enabled ) { - ompt_frame = __ompt_get_task_frame_internal(0); - ompt_frame->reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - __kmpc_barrier(&loc, gtid); -} - - -// -// Mutual exclusion -// - -// -// The symbol that icc/ifort generates for unnamed for unnamed critical -// sections - .gomp_critical_user_ - is defined using .comm in any objects -// reference it. We can't reference it directly here in C code, as the -// symbol contains a ".". -// -// The RTL contains an assembly language definition of .gomp_critical_user_ -// with another symbol __kmp_unnamed_critical_addr initialized with it's -// address. -// -extern kmp_critical_name *__kmp_unnamed_critical_addr; - - -void -xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_critical_start"); - KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid)); - __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr); -} - - -void -xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void) -{ - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_critical_end"); - KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid)); - __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr); -} - - -void -xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_critical_name_start"); - KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid)); - __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr); -} - - -void -xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) -{ - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_critical_name_end"); - KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid)); - __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr); -} - - -// -// The Gnu codegen tries to use locked operations to perform atomic updates -// inline. If it can't, then it calls GOMP_atomic_start() before performing -// the update and GOMP_atomic_end() afterward, regardless of the data type. -// - -void -xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void) -{ - int gtid = __kmp_entry_gtid(); - KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid)); - -#if OMPT_SUPPORT - __ompt_thread_assign_wait_id(0); -#endif - - __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); -} - - -void -xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void) -{ - int gtid = __kmp_get_gtid(); - KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid)); - __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); -} - - -int -xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_single_start"); - KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid)); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - - // - // 3rd parameter == FALSE prevents kmp_enter_single from pushing a - // workshare when USE_CHECKS is defined. We need to avoid the push, - // as there is no corresponding GOMP_single_end() call. - // - return __kmp_enter_single(gtid, &loc, FALSE); -} - - -void * -xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) -{ - void *retval; - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_single_copy_start"); - KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid)); - - if (! TCR_4(__kmp_init_parallel)) - __kmp_parallel_initialize(); - - // - // If this is the first thread to enter, return NULL. The generated - // code will then call GOMP_single_copy_end() for this thread only, - // with the copyprivate data pointer as an argument. - // - if (__kmp_enter_single(gtid, &loc, FALSE)) - return NULL; - - // - // Wait for the first thread to set the copyprivate data pointer, - // and for all other threads to reach this point. - // - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); - - // - // Retrieve the value of the copyprivate data point, and wait for all - // threads to do likewise, then return. - // - retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data; - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); - return retval; -} - - -void -xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) -{ - int gtid = __kmp_get_gtid(); - KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid)); - - // - // Set the copyprivate data pointer fo the team, then hit the barrier - // so that the other threads will continue on and read it. Hit another - // barrier before continuing, so that the know that the copyprivate - // data pointer has been propagated to all threads before trying to - // reuse the t_copypriv_data field. - // - __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data; - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); -} - - -void -xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_ordered_start"); - KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); - __kmpc_ordered(&loc, gtid); -} - - -void -xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void) -{ - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_ordered_end"); - KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid)); - __kmpc_end_ordered(&loc, gtid); -} - - -// -// Dispatch macro defs -// -// They come in two flavors: 64-bit unsigned, and either 32-bit signed -// (IA-32 architecture) or 64-bit signed (Intel(R) 64). -// - -#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS -# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4 -# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4 -# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4 -#else -# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8 -# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8 -# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8 -#endif /* KMP_ARCH_X86 */ - -# define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u -# define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u -# define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u - - -// -// The parallel contruct -// - -#ifndef KMP_DEBUG -static -#endif /* KMP_DEBUG */ -void -__kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *), - void *data) -{ -#if OMPT_SUPPORT - kmp_info_t *thr; - ompt_frame_t *ompt_frame; - ompt_state_t enclosing_state; - - if (ompt_enabled) { - // get pointer to thread data structure - thr = __kmp_threads[*gtid]; - - // save enclosing task state; set current state for task - enclosing_state = thr->th.ompt_thread_info.state; - thr->th.ompt_thread_info.state = ompt_state_work_parallel; - - // set task frame - ompt_frame = __ompt_get_task_frame_internal(0); - ompt_frame->exit_runtime_frame = __builtin_frame_address(0); - } -#endif - - task(data); - -#if OMPT_SUPPORT - if (ompt_enabled) { - // clear task frame - ompt_frame->exit_runtime_frame = NULL; - - // restore enclosing state - thr->th.ompt_thread_info.state = enclosing_state; - } -#endif -} - - -#ifndef KMP_DEBUG -static -#endif /* KMP_DEBUG */ -void -__kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr, - void (*task)(void *), void *data, unsigned num_threads, ident_t *loc, - enum sched_type schedule, long start, long end, long incr, long chunk_size) -{ - // - // Intialize the loop worksharing construct. - // - KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size, - schedule != kmp_sch_static); - -#if OMPT_SUPPORT - kmp_info_t *thr; - ompt_frame_t *ompt_frame; - ompt_state_t enclosing_state; - - if (ompt_enabled) { - thr = __kmp_threads[*gtid]; - // save enclosing task state; set current state for task - enclosing_state = thr->th.ompt_thread_info.state; - thr->th.ompt_thread_info.state = ompt_state_work_parallel; - - // set task frame - ompt_frame = __ompt_get_task_frame_internal(0); - ompt_frame->exit_runtime_frame = __builtin_frame_address(0); - } -#endif - - // - // Now invoke the microtask. - // - task(data); - -#if OMPT_SUPPORT - if (ompt_enabled) { - // clear task frame - ompt_frame->exit_runtime_frame = NULL; - - // reset enclosing state - thr->th.ompt_thread_info.state = enclosing_state; - } -#endif -} - - -#ifndef KMP_DEBUG -static -#endif /* KMP_DEBUG */ -void -__kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...) -{ - int rc; - kmp_info_t *thr = __kmp_threads[gtid]; - kmp_team_t *team = thr->th.th_team; - int tid = __kmp_tid_from_gtid(gtid); - - va_list ap; - va_start(ap, argc); - - rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, -#if OMPT_SUPPORT - VOLATILE_CAST(void *) unwrapped_task, -#endif - wrapper, __kmp_invoke_task_func, -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - &ap -#else - ap -#endif - ); - - va_end(ap); - - if (rc) { - __kmp_run_before_invoked_task(gtid, tid, thr, team); - } - -#if OMPT_SUPPORT - if (ompt_enabled) { -#if OMPT_TRACE - ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - - // implicit task callback - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - team_info->parallel_id, task_info->task_id); - } -#endif - thr->th.ompt_thread_info.state = ompt_state_work_parallel; - } -#endif -} - -static void -__kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *)) -{ -#if OMPT_SUPPORT - ompt_parallel_id_t ompt_parallel_id; - if (ompt_enabled) { - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - - ompt_parallel_id = __ompt_parallel_id_new(gtid); - - // parallel region callback - if (ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) { - int team_size = 1; - ompt_callbacks.ompt_callback(ompt_event_parallel_begin)( - task_info->task_id, &task_info->frame, ompt_parallel_id, - team_size, (void *) task, - OMPT_INVOKER(fork_context_gnu)); - } - } -#endif - - __kmp_serialized_parallel(loc, gtid); - -#if OMPT_SUPPORT - if (ompt_enabled) { - kmp_info_t *thr = __kmp_threads[gtid]; - - ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid); - - // set up lightweight task - ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *) - __kmp_allocate(sizeof(ompt_lw_taskteam_t)); - __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id); - lwt->ompt_task_info.task_id = my_ompt_task_id; - __ompt_lw_taskteam_link(lwt, thr); - -#if OMPT_TRACE - // implicit task callback - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - ompt_parallel_id, my_ompt_task_id); - } - thr->th.ompt_thread_info.state = ompt_state_work_parallel; -#endif - } -#endif -} - - -void -xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads) -{ - int gtid = __kmp_entry_gtid(); - -#if OMPT_SUPPORT - ompt_frame_t *parent_frame, *frame; - - if (ompt_enabled) { - parent_frame = __ompt_get_task_frame_internal(0); - parent_frame->reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - - MKLOC(loc, "GOMP_parallel_start"); - KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid)); - - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { - if (num_threads != 0) { - __kmp_push_num_threads(&loc, gtid, num_threads); - } - __kmp_GOMP_fork_call(&loc, gtid, task, - (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data); - } - else { - __kmp_GOMP_serialized_parallel(&loc, gtid, task); - } - -#if OMPT_SUPPORT - if (ompt_enabled) { - frame = __ompt_get_task_frame_internal(0); - frame->exit_runtime_frame = __builtin_frame_address(1); - } -#endif -} - - -void -xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void) -{ - int gtid = __kmp_get_gtid(); - kmp_info_t *thr; - - thr = __kmp_threads[gtid]; - - MKLOC(loc, "GOMP_parallel_end"); - KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid)); - - -#if OMPT_SUPPORT - ompt_parallel_id_t parallel_id; - ompt_task_id_t serialized_task_id; - ompt_frame_t *ompt_frame = NULL; - - if (ompt_enabled) { - ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL); - parallel_id = team_info->parallel_id; - - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - serialized_task_id = task_info->task_id; - - // unlink if necessary. no-op if there is not a lightweight task. - ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr); - // GOMP allocates/frees lwt since it can't be kept on the stack - if (lwt) { - __kmp_free(lwt); - - } - } -#endif - - if (! thr->th.th_team->t.t_serialized) { - __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr, - thr->th.th_team); - -#if OMPT_SUPPORT - if (ompt_enabled) { - // Implicit task is finished here, in the barrier we might schedule deferred tasks, - // these don't see the implicit task on the stack - ompt_frame = __ompt_get_task_frame_internal(0); - ompt_frame->exit_runtime_frame = NULL; - } -#endif - - __kmp_join_call(&loc, gtid -#if OMPT_SUPPORT - , fork_context_gnu -#endif - ); - } - else { -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - parallel_id, serialized_task_id); - } -#endif - - __kmpc_end_serialized_parallel(&loc, gtid); - -#if OMPT_SUPPORT - if (ompt_enabled) { - // Record that we re-entered the runtime system in the frame that - // created the parallel region. - ompt_task_info_t *parent_task_info = __ompt_get_taskinfo(0); - - if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) { - ompt_callbacks.ompt_callback(ompt_event_parallel_end)( - parallel_id, parent_task_info->task_id, - OMPT_INVOKER(fork_context_gnu)); - } - - parent_task_info->frame.reenter_runtime_frame = NULL; - - thr->th.ompt_thread_info.state = - (((thr->th.th_team)->t.t_serialized) ? - ompt_state_work_serial : ompt_state_work_parallel); - } -#endif - } -} - - -// -// Loop worksharing constructs -// - -// -// The Gnu codegen passes in an exclusive upper bound for the overall range, -// but the libguide dispatch code expects an inclusive upper bound, hence the -// "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th -// argument to __kmp_GOMP_fork_call). -// -// Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub, -// but the Gnu codegen expects an excluside upper bound, so the adjustment -// "*p_ub += stride" compenstates for the discrepancy. -// -// Correction: the gnu codegen always adjusts the upper bound by +-1, not the -// stride value. We adjust the dispatch parameters accordingly (by +-1), but -// we still adjust p_ub by the actual stride value. -// -// The "runtime" versions do not take a chunk_sz parameter. -// -// The profile lib cannot support construct checking of unordered loops that -// are predetermined by the compiler to be statically scheduled, as the gcc -// codegen will not always emit calls to GOMP_loop_static_next() to get the -// next iteration. Instead, it emits inline code to call omp_get_thread_num() -// num and calculate the iteration space using the result. It doesn't do this -// with ordered static loop, so they can be checked. -// - -#define LOOP_START(func,schedule) \ - int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \ - long *p_ub) \ - { \ - int status; \ - long stride; \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ - gtid, lb, ub, str, chunk_sz )); \ - \ - if ((str > 0) ? (lb < ub) : (lb > ub)) { \ - KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ - (schedule) != kmp_sch_static); \ - status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ - (kmp_int *)p_ub, (kmp_int *)&stride); \ - if (status) { \ - KMP_DEBUG_ASSERT(stride == str); \ - *p_ub += (str > 0) ? 1 : -1; \ - } \ - } \ - else { \ - status = 0; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ - gtid, *p_lb, *p_ub, status)); \ - return status; \ - } - - -#define LOOP_RUNTIME_START(func,schedule) \ - int func (long lb, long ub, long str, long *p_lb, long *p_ub) \ - { \ - int status; \ - long stride; \ - long chunk_sz = 0; \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ - gtid, lb, ub, str, chunk_sz )); \ - \ - if ((str > 0) ? (lb < ub) : (lb > ub)) { \ - KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ - status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ - (kmp_int *)p_ub, (kmp_int *)&stride); \ - if (status) { \ - KMP_DEBUG_ASSERT(stride == str); \ - *p_ub += (str > 0) ? 1 : -1; \ - } \ - } \ - else { \ - status = 0; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ - gtid, *p_lb, *p_ub, status)); \ - return status; \ - } - - -#define LOOP_NEXT(func,fini_code) \ - int func(long *p_lb, long *p_ub) \ - { \ - int status; \ - long stride; \ - int gtid = __kmp_get_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d\n", gtid)); \ - \ - fini_code \ - status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ - (kmp_int *)p_ub, (kmp_int *)&stride); \ - if (status) { \ - *p_ub += (stride > 0) ? 1 : -1; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \ - "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \ - return status; \ - } - - -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {}) -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {}) -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {}) -LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {}) - -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) -LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) -LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime) -LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) - - -void -xexpand(KMP_API_NAME_GOMP_LOOP_END)(void) -{ - int gtid = __kmp_get_gtid(); - KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid)) - - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); - - KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid)) -} - - -void -xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) -{ - KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid())) -} - - -// -// Unsigned long long loop worksharing constructs -// -// These are new with gcc 4.4 -// - -#define LOOP_START_ULL(func,schedule) \ - int func (int up, unsigned long long lb, unsigned long long ub, \ - unsigned long long str, unsigned long long chunk_sz, \ - unsigned long long *p_lb, unsigned long long *p_ub) \ - { \ - int status; \ - long long str2 = up ? ((long long)str) : -((long long)str); \ - long long stride; \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - \ - KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \ - gtid, up, lb, ub, str, chunk_sz )); \ - \ - if ((str > 0) ? (lb < ub) : (lb > ub)) { \ - KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ - (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ - (schedule) != kmp_sch_static); \ - status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \ - (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ - if (status) { \ - KMP_DEBUG_ASSERT(stride == str2); \ - *p_ub += (str > 0) ? 1 : -1; \ - } \ - } \ - else { \ - status = 0; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ - gtid, *p_lb, *p_ub, status)); \ - return status; \ - } - - -#define LOOP_RUNTIME_START_ULL(func,schedule) \ - int func (int up, unsigned long long lb, unsigned long long ub, \ - unsigned long long str, unsigned long long *p_lb, \ - unsigned long long *p_ub) \ - { \ - int status; \ - long long str2 = up ? ((long long)str) : -((long long)str); \ - unsigned long long stride; \ - unsigned long long chunk_sz = 0; \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - \ - KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \ - gtid, up, lb, ub, str, chunk_sz )); \ - \ - if ((str > 0) ? (lb < ub) : (lb > ub)) { \ - KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ - (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \ - status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \ - (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ - if (status) { \ - KMP_DEBUG_ASSERT((long long)stride == str2); \ - *p_ub += (str > 0) ? 1 : -1; \ - } \ - } \ - else { \ - status = 0; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ - gtid, *p_lb, *p_ub, status)); \ - return status; \ - } - - -#define LOOP_NEXT_ULL(func,fini_code) \ - int func(unsigned long long *p_lb, unsigned long long *p_ub) \ - { \ - int status; \ - long long stride; \ - int gtid = __kmp_get_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d\n", gtid)); \ - \ - fini_code \ - status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ - (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ - if (status) { \ - *p_ub += (stride > 0) ? 1 : -1; \ - } \ - \ - KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \ - "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \ - return status; \ - } - - -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {}) -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {}) -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {}) -LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {}) - -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) -LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) -LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime) -LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \ - { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) - - -// -// Combined parallel / loop worksharing constructs -// -// There are no ull versions (yet). -// - -#define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \ - void func (void (*task) (void *), void *data, unsigned num_threads, \ - long lb, long ub, long str, long chunk_sz) \ - { \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ - gtid, lb, ub, str, chunk_sz )); \ - \ - ompt_pre(); \ - \ - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ - if (num_threads != 0) { \ - __kmp_push_num_threads(&loc, gtid, num_threads); \ - } \ - __kmp_GOMP_fork_call(&loc, gtid, task, \ - (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \ - task, data, num_threads, &loc, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ - } \ - else { \ - __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ - } \ - \ - KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ - (schedule) != kmp_sch_static); \ - \ - ompt_post(); \ - \ - KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \ - } - - - -#if OMPT_SUPPORT - -#define OMPT_LOOP_PRE() \ - ompt_frame_t *parent_frame; \ - if (ompt_enabled) { \ - parent_frame = __ompt_get_task_frame_internal(0); \ - parent_frame->reenter_runtime_frame = __builtin_frame_address(1); \ - } - - -#define OMPT_LOOP_POST() \ - if (ompt_enabled) { \ - parent_frame->reenter_runtime_frame = NULL; \ - } - -#else - -#define OMPT_LOOP_PRE() - -#define OMPT_LOOP_POST() - -#endif - - -PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), - kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), - kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), - kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), - kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) - - -// -// Tasking constructs -// - -void -xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *), - long arg_size, long arg_align, bool if_cond, unsigned gomp_flags) -{ - MKLOC(loc, "GOMP_task"); - int gtid = __kmp_entry_gtid(); - kmp_int32 flags = 0; - kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags; - - KA_TRACE(20, ("GOMP_task: T#%d\n", gtid)); - - // The low-order bit is the "tied" flag - if (gomp_flags & 1) { - input_flags->tiedness = 1; - } - // The second low-order bit is the "final" flag - if (gomp_flags & 2) { - input_flags->final = 1; - } - input_flags->native = 1; - // __kmp_task_alloc() sets up all other flags - - if (! if_cond) { - arg_size = 0; - } - - kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags, - sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0, - (kmp_routine_entry_t)func); - - if (arg_size > 0) { - if (arg_align > 0) { - task->shareds = (void *)((((size_t)task->shareds) - + arg_align - 1) / arg_align * arg_align); - } - //else error?? - - if (copy_func) { - (*copy_func)(task->shareds, data); - } - else { - KMP_MEMCPY(task->shareds, data, arg_size); - } - } - - if (if_cond) { - __kmpc_omp_task(&loc, gtid, task); - } - else { -#if OMPT_SUPPORT - ompt_thread_info_t oldInfo; - kmp_info_t *thread; - kmp_taskdata_t *taskdata; - if (ompt_enabled) { - // Store the threads states and restore them after the task - thread = __kmp_threads[ gtid ]; - taskdata = KMP_TASK_TO_TASKDATA(task); - oldInfo = thread->th.ompt_thread_info; - thread->th.ompt_thread_info.wait_id = 0; - thread->th.ompt_thread_info.state = ompt_state_work_parallel; - taskdata->ompt_task_info.frame.exit_runtime_frame = - __builtin_frame_address(0); - } -#endif - - __kmpc_omp_task_begin_if0(&loc, gtid, task); - func(data); - __kmpc_omp_task_complete_if0(&loc, gtid, task); - -#if OMPT_SUPPORT - if (ompt_enabled) { - thread->th.ompt_thread_info = oldInfo; - taskdata->ompt_task_info.frame.exit_runtime_frame = NULL; - } -#endif - } - - KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid)); -} - - -void -xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void) -{ - MKLOC(loc, "GOMP_taskwait"); - int gtid = __kmp_entry_gtid(); - - KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid)); - - __kmpc_omp_taskwait(&loc, gtid); - - KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid)); -} - - -// -// Sections worksharing constructs -// - -// -// For the sections construct, we initialize a dynamically scheduled loop -// worksharing construct with lb 1 and stride 1, and use the iteration #'s -// that its returns as sections ids. -// -// There are no special entry points for ordered sections, so we always use -// the dynamically scheduled workshare, even if the sections aren't ordered. -// - -unsigned -xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) -{ - int status; - kmp_int lb, ub, stride; - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_sections_start"); - KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid)); - - KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); - - status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); - if (status) { - KMP_DEBUG_ASSERT(stride == 1); - KMP_DEBUG_ASSERT(lb > 0); - KMP_ASSERT(lb == ub); - } - else { - lb = 0; - } - - KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid, - (unsigned)lb)); - return (unsigned)lb; -} - - -unsigned -xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) -{ - int status; - kmp_int lb, ub, stride; - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_sections_next"); - KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid)); - - status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride); - if (status) { - KMP_DEBUG_ASSERT(stride == 1); - KMP_DEBUG_ASSERT(lb > 0); - KMP_ASSERT(lb == ub); - } - else { - lb = 0; - } - - KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, - (unsigned)lb)); - return (unsigned)lb; -} - - -void -xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data, - unsigned num_threads, unsigned count) -{ - int gtid = __kmp_entry_gtid(); - -#if OMPT_SUPPORT - ompt_frame_t *parent_frame; - - if (ompt_enabled) { - parent_frame = __ompt_get_task_frame_internal(0); - parent_frame->reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - - MKLOC(loc, "GOMP_parallel_sections_start"); - KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid)); - - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { - if (num_threads != 0) { - __kmp_push_num_threads(&loc, gtid, num_threads); - } - __kmp_GOMP_fork_call(&loc, gtid, task, - (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data, - num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1, - (kmp_int)count, (kmp_int)1, (kmp_int)1); - } - else { - __kmp_GOMP_serialized_parallel(&loc, gtid, task); - } - -#if OMPT_SUPPORT - if (ompt_enabled) { - parent_frame->reenter_runtime_frame = NULL; - } -#endif - - KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); - - KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid)); -} - - -void -xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void) -{ - int gtid = __kmp_get_gtid(); - KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid)) - - __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL); - - KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid)) -} - - -void -xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) -{ - KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid())) -} - -// libgomp has an empty function for GOMP_taskyield as of 2013-10-10 -void -xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void) -{ - KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid())) - return; -} - -#if OMP_40_ENABLED // these are new GOMP_4.0 entry points - -void -xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_parallel"); - KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid)); - -#if OMPT_SUPPORT - ompt_task_info_t *parent_task_info, *task_info; - if (ompt_enabled) { - parent_task_info = __ompt_get_taskinfo(0); - parent_task_info->frame.reenter_runtime_frame = __builtin_frame_address(1); - } -#endif - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { - if (num_threads != 0) { - __kmp_push_num_threads(&loc, gtid, num_threads); - } - if(flags != 0) { - __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); - } - __kmp_GOMP_fork_call(&loc, gtid, task, - (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data); - } - else { - __kmp_GOMP_serialized_parallel(&loc, gtid, task); - } -#if OMPT_SUPPORT - if (ompt_enabled) { - task_info = __ompt_get_taskinfo(0); - task_info->frame.exit_runtime_frame = __builtin_frame_address(0); - } -#endif - task(data); - xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); -#if OMPT_SUPPORT - if (ompt_enabled) { - task_info->frame.exit_runtime_frame = NULL; - parent_task_info->frame.reenter_runtime_frame = NULL; - } -#endif -} - -void -xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data, - unsigned num_threads, unsigned count, unsigned flags) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_parallel_sections"); - KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid)); - - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { - if (num_threads != 0) { - __kmp_push_num_threads(&loc, gtid, num_threads); - } - if(flags != 0) { - __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); - } - __kmp_GOMP_fork_call(&loc, gtid, task, - (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data, - num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1, - (kmp_int)count, (kmp_int)1, (kmp_int)1); - } - else { - __kmp_GOMP_serialized_parallel(&loc, gtid, task); - } - - KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE); - - task(data); - xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); - KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid)); -} - -#define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \ - void func (void (*task) (void *), void *data, unsigned num_threads, \ - long lb, long ub, long str, long chunk_sz, unsigned flags) \ - { \ - int gtid = __kmp_entry_gtid(); \ - MKLOC(loc, #func); \ - KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ - gtid, lb, ub, str, chunk_sz )); \ - \ - ompt_pre(); \ - if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \ - if (num_threads != 0) { \ - __kmp_push_num_threads(&loc, gtid, num_threads); \ - } \ - if (flags != 0) { \ - __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \ - } \ - __kmp_GOMP_fork_call(&loc, gtid, task, \ - (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \ - task, data, num_threads, &loc, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ - } \ - else { \ - __kmp_GOMP_serialized_parallel(&loc, gtid, task); \ - } \ - \ - KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ - (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ - (schedule) != kmp_sch_static); \ - task(data); \ - xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \ - ompt_post(); \ - \ - KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \ - } - -PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static, - OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked, - OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked, - OMPT_LOOP_PRE, OMPT_LOOP_POST) -PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime, - OMPT_LOOP_PRE, OMPT_LOOP_POST) - - -void -xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void) -{ - int gtid = __kmp_entry_gtid(); - MKLOC(loc, "GOMP_taskgroup_start"); - KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid)); - - __kmpc_taskgroup(&loc, gtid); - - return; -} - -void -xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void) -{ - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_taskgroup_end"); - KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid)); - - __kmpc_end_taskgroup(&loc, gtid); - - return; -} - -#ifndef KMP_DEBUG -static -#endif /* KMP_DEBUG */ -kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) { - kmp_int32 cncl_kind = 0; - switch(gomp_kind) { - case 1: - cncl_kind = cancel_parallel; - break; - case 2: - cncl_kind = cancel_loop; - break; - case 4: - cncl_kind = cancel_sections; - break; - case 8: - cncl_kind = cancel_taskgroup; - break; - } - return cncl_kind; -} - -bool -xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) -{ - if(__kmp_omp_cancellation) { - KMP_FATAL(NoGompCancellation); - } - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_cancellation_point"); - KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid)); - - kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); - - return __kmpc_cancellationpoint(&loc, gtid, cncl_kind); -} - -bool -xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) -{ - if(__kmp_omp_cancellation) { - KMP_FATAL(NoGompCancellation); - } - KMP_FATAL(NoGompCancellation); - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_barrier_cancel"); - KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid)); - - return __kmpc_cancel_barrier(&loc, gtid); -} - -bool -xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) -{ - if(__kmp_omp_cancellation) { - KMP_FATAL(NoGompCancellation); - } else { - return FALSE; - } - - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_cancel"); - KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid)); - - kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which); - - if(do_cancel == FALSE) { - return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which); - } else { - return __kmpc_cancel(&loc, gtid, cncl_kind); - } -} - -bool -xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) -{ - if(__kmp_omp_cancellation) { - KMP_FATAL(NoGompCancellation); - } - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_sections_end_cancel"); - KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid)); - - return __kmpc_cancel_barrier(&loc, gtid); -} - -bool -xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) -{ - if(__kmp_omp_cancellation) { - KMP_FATAL(NoGompCancellation); - } - int gtid = __kmp_get_gtid(); - MKLOC(loc, "GOMP_loop_end_cancel"); - KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid)); - - return __kmpc_cancel_barrier(&loc, gtid); -} - -// All target functions are empty as of 2014-05-29 -void -xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target, - size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) -{ - return; -} - -void -xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum, - void **hostaddrs, size_t *sizes, unsigned char *kinds) -{ - return; -} - -void -xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) -{ - return; -} - -void -xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum, - void **hostaddrs, size_t *sizes, unsigned char *kinds) -{ - return; -} - -void -xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit) -{ - return; -} -#endif // OMP_40_ENABLED - - -/* - The following sections of code create aliases for the GOMP_* functions, - then create versioned symbols using the assembler directive .symver. - This is only pertinent for ELF .so library - xaliasify and xversionify are defined in kmp_ftn_os.h -*/ - -#ifdef KMP_USE_VERSION_SYMBOLS - -// GOMP_1.0 aliases -xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10); -xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_BARRIER, 10); -xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10); -xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10); -xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10); -xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10); -xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10); -xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10); -xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10); -xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10); -xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10); -xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10); -xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10); -xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10); - -// GOMP_2.0 aliases -xaliasify(KMP_API_NAME_GOMP_TASK, 20); -xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20); -xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20); - -// GOMP_3.0 aliases -xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30); - -// GOMP_4.0 aliases -// The GOMP_parallel* entry points below aren't OpenMP 4.0 related. -#if OMP_40_ENABLED -xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40); -xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40); -xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40); -xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40); -xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40); -xaliasify(KMP_API_NAME_GOMP_CANCEL, 40); -xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40); -xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40); -xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40); -xaliasify(KMP_API_NAME_GOMP_TARGET, 40); -xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40); -xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40); -xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40); -xaliasify(KMP_API_NAME_GOMP_TEAMS, 40); -#endif - -// GOMP_1.0 versioned symbols -xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0"); -xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0"); - -// GOMP_2.0 versioned symbols -xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0"); - -// GOMP_3.0 versioned symbols -xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0"); - -// GOMP_4.0 versioned symbols -#if OMP_40_ENABLED -xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0"); -xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0"); -#endif - -#endif // KMP_USE_VERSION_SYMBOLS - -#ifdef __cplusplus - } //extern "C" -#endif // __cplusplus - - Index: runtime/src/kmp_gsupport.cpp =================================================================== --- runtime/src/kmp_gsupport.cpp +++ runtime/src/kmp_gsupport.cpp @@ -1,5 +1,5 @@ /* - * kmp_gsupport.c + * kmp_gsupport.cpp */ Index: runtime/src/kmp_i18n.c =================================================================== --- runtime/src/kmp_i18n.c +++ runtime/src/kmp_i18n.c @@ -1,995 +0,0 @@ -/* - * kmp_i18n.c - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - - -#include "kmp_i18n.h" - -#include "kmp_os.h" -#include "kmp_debug.h" -#include "kmp.h" -#include "kmp_lock.h" -#include "kmp_io.h" // __kmp_printf. - -#include -#include -#include -#include -#include - -#include "kmp_i18n_default.inc" -#include "kmp_str.h" -#include "kmp_environment.h" - -#undef KMP_I18N_OK - -#define get_section( id ) ( (id) >> 16 ) -#define get_number( id ) ( (id) & 0xFFFF ) - -kmp_msg_t __kmp_msg_empty = { kmp_mt_dummy, 0, "", 0 }; -kmp_msg_t __kmp_msg_null = { kmp_mt_dummy, 0, NULL, 0 }; -static char const * no_message_available = "(No message available)"; - -enum kmp_i18n_cat_status { - KMP_I18N_CLOSED, // Not yet opened or closed. - KMP_I18N_OPENED, // Opened successfully, ready to use. - KMP_I18N_ABSENT // Opening failed, message catalog should not be used. -}; // enum kmp_i18n_cat_status -typedef enum kmp_i18n_cat_status kmp_i18n_cat_status_t; -static volatile kmp_i18n_cat_status_t status = KMP_I18N_CLOSED; - -/* - Message catalog is opened at first usage, so we have to synchronize opening to avoid race and - multiple openings. - - Closing does not require synchronization, because catalog is closed very late at library - shutting down, when no other threads are alive. -*/ - -static void __kmp_i18n_do_catopen(); -static kmp_bootstrap_lock_t lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ); - // `lock' variable may be placed into __kmp_i18n_catopen function because it is used only by - // that function. But we afraid a (buggy) compiler may treat it wrongly. So we put it outside of - // function just in case. - -void -__kmp_i18n_catopen( -) { - if ( status == KMP_I18N_CLOSED ) { - __kmp_acquire_bootstrap_lock( & lock ); - if ( status == KMP_I18N_CLOSED ) { - __kmp_i18n_do_catopen(); - }; // if - __kmp_release_bootstrap_lock( & lock ); - }; // if -} // func __kmp_i18n_catopen - - -/* - ================================================================================================ - Linux* OS and OS X* part. - ================================================================================================ -*/ - -#if KMP_OS_UNIX -#define KMP_I18N_OK - -#include - -#define KMP_I18N_NULLCAT ((nl_catd)( -1 )) -static nl_catd cat = KMP_I18N_NULLCAT; // !!! Shall it be volatile? -static char const * name = ( KMP_VERSION_MAJOR == 4 ? "libguide.cat" : "libomp.cat" ); - -/* - Useful links: - http://www.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html#tag_08_02 - http://www.opengroup.org/onlinepubs/000095399/functions/catopen.html - http://www.opengroup.org/onlinepubs/000095399/functions/setlocale.html -*/ - -void -__kmp_i18n_do_catopen( -) { - int english = 0; - char * lang = __kmp_env_get( "LANG" ); - // TODO: What about LC_ALL or LC_MESSAGES? - - KMP_DEBUG_ASSERT( status == KMP_I18N_CLOSED ); - KMP_DEBUG_ASSERT( cat == KMP_I18N_NULLCAT ); - - english = - lang == NULL || // In all these cases English language is used. - strcmp( lang, "" ) == 0 || - strcmp( lang, " " ) == 0 || - // Workaround for Fortran RTL bug DPD200137873 "Fortran runtime resets LANG env var - // to space if it is not set". - strcmp( lang, "C" ) == 0 || - strcmp( lang, "POSIX" ) == 0; - - if ( ! english ) { // English language is not yet detected, let us continue. - // Format of LANG is: [language[_territory][.codeset][@modifier]] - // Strip all parts except language. - char * tail = NULL; - __kmp_str_split( lang, '@', & lang, & tail ); - __kmp_str_split( lang, '.', & lang, & tail ); - __kmp_str_split( lang, '_', & lang, & tail ); - english = ( strcmp( lang, "en" ) == 0 ); - }; // if - - KMP_INTERNAL_FREE( lang ); - - // Do not try to open English catalog because internal messages are - // exact copy of messages in English catalog. - if ( english ) { - status = KMP_I18N_ABSENT; // mark catalog as absent so it will not be re-opened. - return; - } - - cat = catopen( name, 0 ); - // TODO: Why do we pass 0 in flags? - status = ( cat == KMP_I18N_NULLCAT ? KMP_I18N_ABSENT : KMP_I18N_OPENED ); - - if ( status == KMP_I18N_ABSENT ) { - if (__kmp_generate_warnings > kmp_warnings_low) { // AC: only issue warning in case explicitly asked to - int error = errno; // Save errno immediately. - char * nlspath = __kmp_env_get( "NLSPATH" ); - char * lang = __kmp_env_get( "LANG" ); - - // Infinite recursion will not occur -- status is KMP_I18N_ABSENT now, so - // __kmp_i18n_catgets() will not try to open catalog, but will return default message. - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantOpenMessageCatalog, name ), - err_code, - KMP_HNT( CheckEnvVar, "NLSPATH", nlspath ), - KMP_HNT( CheckEnvVar, "LANG", lang ), - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - - KMP_INFORM( WillUseDefaultMessages ); - KMP_INTERNAL_FREE( nlspath ); - KMP_INTERNAL_FREE( lang ); - } - } else { // status == KMP_I18N_OPENED - - int section = get_section( kmp_i18n_prp_Version ); - int number = get_number( kmp_i18n_prp_Version ); - char const * expected = __kmp_i18n_default_table.sect[ section ].str[ number ]; - // Expected version of the catalog. - kmp_str_buf_t version; // Actual version of the catalog. - __kmp_str_buf_init( & version ); - __kmp_str_buf_print( & version, "%s", catgets( cat, section, number, NULL ) ); - - // String returned by catgets is invalid after closing the catalog, so copy it. - if ( strcmp( version.str, expected ) != 0 ) { - __kmp_i18n_catclose(); // Close bad catalog. - status = KMP_I18N_ABSENT; // And mark it as absent. - if (__kmp_generate_warnings > kmp_warnings_low) { // AC: only issue warning in case explicitly asked to - // And now print a warning using default messages. - char const * name = "NLSPATH"; - char const * nlspath = __kmp_env_get( name ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( WrongMessageCatalog, name, version.str, expected ), - KMP_HNT( CheckEnvVar, name, nlspath ), - __kmp_msg_null - ); - KMP_INFORM( WillUseDefaultMessages ); - KMP_INTERNAL_FREE( (void *) nlspath ); - } // __kmp_generate_warnings - }; // if - __kmp_str_buf_free( & version ); - - }; // if - -} // func __kmp_i18n_do_catopen - - -void -__kmp_i18n_catclose( -) { - if ( status == KMP_I18N_OPENED ) { - KMP_DEBUG_ASSERT( cat != KMP_I18N_NULLCAT ); - catclose( cat ); - cat = KMP_I18N_NULLCAT; - }; // if - status = KMP_I18N_CLOSED; -} // func __kmp_i18n_catclose - - -char const * -__kmp_i18n_catgets( - kmp_i18n_id_t id -) { - - int section = get_section( id ); - int number = get_number( id ); - char const * message = NULL; - - if ( 1 <= section && section <= __kmp_i18n_default_table.size ) { - if ( 1 <= number && number <= __kmp_i18n_default_table.sect[ section ].size ) { - if ( status == KMP_I18N_CLOSED ) { - __kmp_i18n_catopen(); - }; // if - if ( status == KMP_I18N_OPENED ) { - message = - catgets( - cat, - section, number, - __kmp_i18n_default_table.sect[ section ].str[ number ] - ); - }; // if - if ( message == NULL ) { - message = __kmp_i18n_default_table.sect[ section ].str[ number ]; - }; // if - }; // if - }; // if - if ( message == NULL ) { - message = no_message_available; - }; // if - return message; - -} // func __kmp_i18n_catgets - - -#endif // KMP_OS_UNIX - -/* - ================================================================================================ - Windows* OS part. - ================================================================================================ -*/ - -#if KMP_OS_WINDOWS -#define KMP_I18N_OK - -#include "kmp_environment.h" -#include - -#define KMP_I18N_NULLCAT NULL -static HMODULE cat = KMP_I18N_NULLCAT; // !!! Shall it be volatile? -static char const * name = ( KMP_VERSION_MAJOR == 4 ? "libguide40ui.dll" : "libompui.dll" ); - -static kmp_i18n_table_t table = { 0, NULL }; - // Messages formatted by FormatMessage() should be freed, but catgets() interface assumes - // user will not free messages. So we cache all the retrieved messages in the table, which - // are freed at catclose(). -static UINT const default_code_page = CP_OEMCP; -static UINT code_page = default_code_page; - -static char const * ___catgets( kmp_i18n_id_t id ); -static UINT get_code_page(); -static void kmp_i18n_table_free( kmp_i18n_table_t * table ); - - -static UINT -get_code_page( -) { - - UINT cp = default_code_page; - char const * value = __kmp_env_get( "KMP_CODEPAGE" ); - if ( value != NULL ) { - if ( _stricmp( value, "ANSI" ) == 0 ) { - cp = CP_ACP; - } else if ( _stricmp( value, "OEM" ) == 0 ) { - cp = CP_OEMCP; - } else if ( _stricmp( value, "UTF-8" ) == 0 || _stricmp( value, "UTF8" ) == 0 ) { - cp = CP_UTF8; - } else if ( _stricmp( value, "UTF-7" ) == 0 || _stricmp( value, "UTF7" ) == 0 ) { - cp = CP_UTF7; - } else { - // !!! TODO: Issue a warning? - }; // if - }; // if - KMP_INTERNAL_FREE( (void *) value ); - return cp; - -} // func get_code_page - - -static void -kmp_i18n_table_free( - kmp_i18n_table_t * table -) { - int s; - int m; - for ( s = 0; s < table->size; ++ s ) { - for ( m = 0; m < table->sect[ s ].size; ++ m ) { - // Free message. - KMP_INTERNAL_FREE( (void *) table->sect[ s ].str[ m ] ); - table->sect[ s ].str[ m ] = NULL; - }; // for m - table->sect[ s ].size = 0; - // Free section itself. - KMP_INTERNAL_FREE ( (void *) table->sect[ s ].str ); - table->sect[ s ].str = NULL; - }; // for s - table->size = 0; - KMP_INTERNAL_FREE( (void *) table->sect ); - table->sect = NULL; -} // kmp_i18n_table_free - - -void -__kmp_i18n_do_catopen( -) { - - LCID locale_id = GetThreadLocale(); - WORD lang_id = LANGIDFROMLCID( locale_id ); - WORD primary_lang_id = PRIMARYLANGID( lang_id ); - kmp_str_buf_t path; - - KMP_DEBUG_ASSERT( status == KMP_I18N_CLOSED ); - KMP_DEBUG_ASSERT( cat == KMP_I18N_NULLCAT ); - - __kmp_str_buf_init( & path ); - - // Do not try to open English catalog because internal messages are - // exact copy of messages in English catalog. - if ( primary_lang_id == LANG_ENGLISH ) { - status = KMP_I18N_ABSENT; // mark catalog as absent so it will not be re-opened. - goto end; - }; // if - - // Construct resource DLL name. - /* - Simple - LoadLibrary( name ) - is not suitable due to security issue (see - http://www.microsoft.com/technet/security/advisory/2269637.mspx). We have to specify full - path to the message catalog. - */ - { - - // Get handle of our DLL first. - HMODULE handle; - BOOL brc = - GetModuleHandleEx( - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT, - reinterpret_cast< LPCSTR >( & __kmp_i18n_do_catopen ), - & handle - ); - if ( ! brc ) { // Error occurred. - status = KMP_I18N_ABSENT; // mark catalog as absent so it will not be re-opened. - goto end; - // TODO: Enable multiple messages (KMP_MSG) to be passed to __kmp_msg; and print - // a proper warning. - }; // if - - // Now get path to the our DLL. - for ( ; ; ) { - DWORD drc = GetModuleFileName( handle, path.str, path.size ); - if ( drc == 0 ) { // Error occurred. - status = KMP_I18N_ABSENT; - goto end; - }; // if - if ( drc < path.size ) { - path.used = drc; - break; - }; // if - __kmp_str_buf_reserve( & path, path.size * 2 ); - }; // forever - - // Now construct the name of message catalog. - kmp_str_fname fname; - __kmp_str_fname_init( & fname, path.str ); - __kmp_str_buf_clear( & path ); - __kmp_str_buf_print( & path, "%s%lu/%s", fname.dir, (unsigned long)( locale_id ), name ); - __kmp_str_fname_free( & fname ); - - } - - // For security reasons, use LoadLibraryEx() and load message catalog as a data file. - cat = LoadLibraryEx( path.str, NULL, LOAD_LIBRARY_AS_DATAFILE ); - status = ( cat == KMP_I18N_NULLCAT ? KMP_I18N_ABSENT : KMP_I18N_OPENED ); - - if ( status == KMP_I18N_ABSENT ) { - if (__kmp_generate_warnings > kmp_warnings_low) { // AC: only issue warning in case explicitly asked to - DWORD error = GetLastError(); - // Infinite recursion will not occur -- status is KMP_I18N_ABSENT now, so - // __kmp_i18n_catgets() will not try to open catalog but will return default message. - /* - If message catalog for another architecture found (e.g. OpenMP RTL - for IA-32 architecture opens libompui.dll for Intel(R) 64) - Windows* OS returns error 193 (ERROR_BAD_EXE_FORMAT). However, - FormatMessage fails to return a message for this error, so user - will see: - - OMP: Warning #2: Cannot open message catalog "1041\libompui.dll": - OMP: System error #193: (No system error message available) - OMP: Info #3: Default messages will be used. - - Issue a hint in this case to let cause of trouble more understandable. - */ - kmp_msg_t err_code = KMP_SYSERRCODE(error); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantOpenMessageCatalog, path.str ), - err_code, - ( error == ERROR_BAD_EXE_FORMAT ? KMP_HNT( BadExeFormat, path.str, KMP_ARCH_STR ) : __kmp_msg_null ), - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - - KMP_INFORM( WillUseDefaultMessages ); - } - } else { // status == KMP_I18N_OPENED - - int section = get_section( kmp_i18n_prp_Version ); - int number = get_number( kmp_i18n_prp_Version ); - char const * expected = __kmp_i18n_default_table.sect[ section ].str[ number ]; - kmp_str_buf_t version; // Actual version of the catalog. - __kmp_str_buf_init( & version ); - __kmp_str_buf_print( & version, "%s", ___catgets( kmp_i18n_prp_Version ) ); - // String returned by catgets is invalid after closing the catalog, so copy it. - if ( strcmp( version.str, expected ) != 0 ) { - // Close bad catalog. - __kmp_i18n_catclose(); - status = KMP_I18N_ABSENT; // And mark it as absent. - if (__kmp_generate_warnings > kmp_warnings_low) { - // And now print a warning using default messages. - __kmp_msg( - kmp_ms_warning, - KMP_MSG( WrongMessageCatalog, path.str, version.str, expected ), - __kmp_msg_null - ); - KMP_INFORM( WillUseDefaultMessages ); - } // __kmp_generate_warnings - }; // if - __kmp_str_buf_free( & version ); - - }; // if - code_page = get_code_page(); - - end: - __kmp_str_buf_free( & path ); - return; - -} // func __kmp_i18n_do_catopen - - -void -__kmp_i18n_catclose( -) { - if ( status == KMP_I18N_OPENED ) { - KMP_DEBUG_ASSERT( cat != KMP_I18N_NULLCAT ); - kmp_i18n_table_free( & table ); - FreeLibrary( cat ); - cat = KMP_I18N_NULLCAT; - }; // if - code_page = default_code_page; - status = KMP_I18N_CLOSED; -} // func __kmp_i18n_catclose - -/* - We use FormatMessage() to get strings from catalog, get system error messages, etc. - FormatMessage() tends to return Windows* OS-style end-of-lines, "\r\n". When string is printed, - printf() also replaces all the occurrences of "\n" with "\r\n" (again!), so sequences like - "\r\r\r\n" appear in output. It is not too good. - - Additional mess comes from message catalog: Our catalog source en_US.mc file (generated by - message-converter.pl) contains only "\n" characters, but en_US_msg_1033.bin file (produced by - mc.exe) may contain "\r\n" or just "\n". This mess goes from en_US_msg_1033.bin file to - message catalog, libompui.dll. For example, message - - Error - - (there is "\n" at the end) is compiled by mc.exe to "Error\r\n", while - - OMP: Error %1!d!: %2!s!\n - - (there is "\n" at the end as well) is compiled to "OMP: Error %1!d!: %2!s!\r\n\n". - - Thus, stripping all "\r" normalizes string and returns it to canonical form, so printf() will - produce correct end-of-line sequences. - - ___strip_crs() serves for this purpose: it removes all the occurrences of "\r" in-place and - returns new length of string. -*/ -static -int -___strip_crs( - char * str -) { - int in = 0; // Input character index. - int out = 0; // Output character index. - for ( ; ; ) { - if ( str[ in ] != '\r' ) { - str[ out ] = str[ in ]; - ++ out; - }; // if - if ( str[ in ] == 0 ) { - break; - }; // if - ++ in; - }; // forever - return out - 1; -} // func __strip_crs - - -static -char const * -___catgets( - kmp_i18n_id_t id -) { - - char * result = NULL; - PVOID addr = NULL; - wchar_t * wmsg = NULL; - DWORD wlen = 0; - char * msg = NULL; - int len = 0; - int rc; - - KMP_DEBUG_ASSERT( cat != KMP_I18N_NULLCAT ); - wlen = // wlen does *not* include terminating null. - FormatMessageW( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_HMODULE | - FORMAT_MESSAGE_IGNORE_INSERTS, - cat, - id, - 0, // LangId - (LPWSTR) & addr, - 0, // Size in elements, not in bytes. - NULL - ); - if ( wlen <= 0 ) { - goto end; - }; // if - wmsg = (wchar_t *) addr; // Warning: wmsg may be not nul-terminated! - - // Calculate length of multibyte message. - len = // Since wlen does not include terminating null, len does not include it also. - WideCharToMultiByte( - code_page, - 0, // Flags. - wmsg, wlen, // Wide buffer and size. - NULL, 0, // Buffer and size. - NULL, NULL // Default char and used default char. - ); - if ( len <= 0 ) { - goto end; - }; // if - - // Allocate memory. - msg = (char *) KMP_INTERNAL_MALLOC( len + 1 ); - - // Convert wide message to multibyte one. - rc = - WideCharToMultiByte( - code_page, - 0, // Flags. - wmsg, wlen, // Wide buffer and size. - msg, len, // Buffer and size. - NULL, NULL // Default char and used default char. - ); - if ( rc <= 0 || rc > len ) { - goto end; - }; // if - KMP_DEBUG_ASSERT( rc == len ); - len = rc; - msg[ len ] = 0; // Put terminating null to the end. - - // Stripping all "\r" before stripping last end-of-line simplifies the task. - len = ___strip_crs( msg ); - - // Every message in catalog is terminated with "\n". Strip it. - if ( len >= 1 && msg[ len - 1 ] == '\n' ) { - -- len; - msg[ len ] = 0; - }; // if - - // Everything looks ok. - result = msg; - msg = NULL; - - end: - - if ( msg != NULL ) { - KMP_INTERNAL_FREE( msg ); - }; // if - if ( wmsg != NULL ) { - LocalFree( wmsg ); - }; // if - - return result; - -} // ___catgets - - -char const * -__kmp_i18n_catgets( - kmp_i18n_id_t id -) { - - int section = get_section( id ); - int number = get_number( id ); - char const * message = NULL; - - if ( 1 <= section && section <= __kmp_i18n_default_table.size ) { - if ( 1 <= number && number <= __kmp_i18n_default_table.sect[ section ].size ) { - if ( status == KMP_I18N_CLOSED ) { - __kmp_i18n_catopen(); - }; // if - if ( cat != KMP_I18N_NULLCAT ) { - if ( table.size == 0 ) { - table.sect = (kmp_i18n_section_t *) - KMP_INTERNAL_CALLOC( - ( __kmp_i18n_default_table.size + 2 ), - sizeof( kmp_i18n_section_t ) - ); - table.size = __kmp_i18n_default_table.size; - }; // if - if ( table.sect[ section ].size == 0 ) { - table.sect[ section ].str = (const char **) - KMP_INTERNAL_CALLOC( - __kmp_i18n_default_table.sect[ section ].size + 2, - sizeof( char const * ) - ); - table.sect[ section ].size = __kmp_i18n_default_table.sect[ section ].size; - }; // if - if ( table.sect[ section ].str[ number ] == NULL ) { - table.sect[ section ].str[ number ] = ___catgets( id ); - }; // if - message = table.sect[ section ].str[ number ]; - }; // if - if ( message == NULL ) { - // Catalog is not opened or message is not found, return default message. - message = __kmp_i18n_default_table.sect[ section ].str[ number ]; - }; // if - }; // if - }; // if - if ( message == NULL ) { - message = no_message_available; - }; // if - return message; - -} // func __kmp_i18n_catgets - - -#endif // KMP_OS_WINDOWS - -// ------------------------------------------------------------------------------------------------- - -#ifndef KMP_I18N_OK - #error I18n support is not implemented for this OS. -#endif // KMP_I18N_OK - -// ------------------------------------------------------------------------------------------------- - -void -__kmp_i18n_dump_catalog( - kmp_str_buf_t * buffer -) { - - struct kmp_i18n_id_range_t { - kmp_i18n_id_t first; - kmp_i18n_id_t last; - }; // struct kmp_i18n_id_range_t - - static struct kmp_i18n_id_range_t ranges[] = { - { kmp_i18n_prp_first, kmp_i18n_prp_last }, - { kmp_i18n_str_first, kmp_i18n_str_last }, - { kmp_i18n_fmt_first, kmp_i18n_fmt_last }, - { kmp_i18n_msg_first, kmp_i18n_msg_last }, - { kmp_i18n_hnt_first, kmp_i18n_hnt_last } - }; // ranges - - int num_of_ranges = sizeof( ranges ) / sizeof( struct kmp_i18n_id_range_t ); - int range; - kmp_i18n_id_t id; - - for ( range = 0; range < num_of_ranges; ++ range ) { - __kmp_str_buf_print( buffer, "*** Set #%d ***\n", range + 1 ); - for ( id = (kmp_i18n_id_t)( ranges[ range ].first + 1 ); - id < ranges[ range ].last; - id = (kmp_i18n_id_t)( id + 1 ) ) { - __kmp_str_buf_print( buffer, "%d: <<%s>>\n", id, __kmp_i18n_catgets( id ) ); - }; // for id - }; // for range - - __kmp_printf( "%s", buffer->str ); - -} // __kmp_i18n_dump_catalog - -// ------------------------------------------------------------------------------------------------- - -kmp_msg_t -__kmp_msg_format( - unsigned id_arg, - ... -) { - - kmp_msg_t msg; - va_list args; - kmp_str_buf_t buffer; - __kmp_str_buf_init( & buffer ); - - va_start( args, id_arg ); - - // We use unsigned for the ID argument and explicitly cast it here to the - // right enumerator because variadic functions are not compatible with - // default promotions. - kmp_i18n_id_t id = (kmp_i18n_id_t)id_arg; - - #if KMP_OS_UNIX - // On Linux* OS and OS X*, printf() family functions process parameter numbers, for example: - // "%2$s %1$s". - __kmp_str_buf_vprint( & buffer, __kmp_i18n_catgets( id ), args ); - #elif KMP_OS_WINDOWS - // On Winodws, printf() family functions does not recognize GNU style parameter numbers, - // so we have to use FormatMessage() instead. It recognizes parameter numbers, e. g.: - // "%2!s! "%1!s!". - { - LPTSTR str = NULL; - int len; - FormatMessage( - FORMAT_MESSAGE_FROM_STRING | FORMAT_MESSAGE_ALLOCATE_BUFFER, - __kmp_i18n_catgets( id ), - 0, 0, - (LPTSTR)( & str ), - 0, - & args - ); - len = ___strip_crs( str ); - __kmp_str_buf_cat( & buffer, str, len ); - LocalFree( str ); - } - #else - #error - #endif - va_end( args ); - __kmp_str_buf_detach( & buffer ); - - msg.type = (kmp_msg_type_t)( id >> 16 ); - msg.num = id & 0xFFFF; - msg.str = buffer.str; - msg.len = buffer.used; - - return msg; - -} // __kmp_msg_format - -// ------------------------------------------------------------------------------------------------- - -static -char * -sys_error( - int err -) { - - char * message = NULL; - - #if KMP_OS_WINDOWS - - LPVOID buffer = NULL; - int len; - DWORD rc; - rc = - FormatMessage( - FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - err, - MAKELANGID( LANG_NEUTRAL, SUBLANG_DEFAULT ), // Default language. - (LPTSTR) & buffer, - 0, - NULL - ); - if ( rc > 0 ) { - // Message formatted. Copy it (so we can free it later with normal free(). - message = __kmp_str_format( "%s", (char *) buffer ); - len = ___strip_crs( message ); // Delete carriage returns if any. - // Strip trailing newlines. - while ( len > 0 && message[ len - 1 ] == '\n' ) { - -- len; - }; // while - message[ len ] = 0; - } else { - // FormatMessage() failed to format system error message. GetLastError() would give us - // error code, which we would convert to message... this it dangerous recursion, which - // cannot clarify original error, so we will not even start it. - }; // if - if ( buffer != NULL ) { - LocalFree( buffer ); - }; // if - - #else // Non-Windows* OS: Linux* OS or OS X* - - /* - There are 2 incompatible versions of strerror_r: - - char * strerror_r( int, char *, size_t ); // GNU version - int strerror_r( int, char *, size_t ); // XSI version - */ - - #if defined(__GLIBC__) && defined(_GNU_SOURCE) - - // GNU version of strerror_r. - - char buffer[ 2048 ]; - char * const err_msg = strerror_r( err, buffer, sizeof( buffer ) ); - // Do not eliminate this assignment to temporary variable, otherwise compiler would - // not issue warning if strerror_r() returns `int' instead of expected `char *'. - message = __kmp_str_format( "%s", err_msg ); - - #else // OS X*, FreeBSD* etc. - - // XSI version of strerror_r. - - int size = 2048; - char * buffer = (char *) KMP_INTERNAL_MALLOC( size ); - int rc; - if (buffer == NULL) { - KMP_FATAL(MemoryAllocFailed); - } - rc = strerror_r( err, buffer, size ); - if ( rc == -1 ) { - rc = errno; // XSI version sets errno. - }; // if - while ( rc == ERANGE ) { // ERANGE means the buffer is too small. - KMP_INTERNAL_FREE( buffer ); - size *= 2; - buffer = (char *) KMP_INTERNAL_MALLOC( size ); - if (buffer == NULL) { - KMP_FATAL(MemoryAllocFailed); - } - rc = strerror_r( err, buffer, size ); - if ( rc == -1 ) { - rc = errno; // XSI version sets errno. - }; // if - }; // while - if ( rc == 0 ) { - message = buffer; - } else { - // Buffer is unused. Free it. - KMP_INTERNAL_FREE( buffer ); - }; // if - - #endif - - #endif /* KMP_OS_WINDOWS */ - - if ( message == NULL ) { - // TODO: I18n this message. - message = __kmp_str_format( "%s", "(No system error message available)" ); - }; // if - return message; - -} // sys_error - -// ------------------------------------------------------------------------------------------------- - -kmp_msg_t -__kmp_msg_error_code( - int code -) { - - kmp_msg_t msg; - msg.type = kmp_mt_syserr; - msg.num = code; - msg.str = sys_error( code ); - msg.len = KMP_STRLEN( msg.str ); - return msg; - -} // __kmp_msg_error_code - -// ------------------------------------------------------------------------------------------------- - -kmp_msg_t -__kmp_msg_error_mesg( - char const * mesg -) { - - kmp_msg_t msg; - msg.type = kmp_mt_syserr; - msg.num = 0; - msg.str = __kmp_str_format( "%s", mesg ); - msg.len = KMP_STRLEN( msg.str ); - return msg; - -} // __kmp_msg_error_mesg - -// ------------------------------------------------------------------------------------------------- - -void -__kmp_msg( - kmp_msg_severity_t severity, - kmp_msg_t message, - ... -) { - - va_list args; - kmp_i18n_id_t format; // format identifier - kmp_msg_t fmsg; // formatted message - kmp_str_buf_t buffer; - - if ( severity != kmp_ms_fatal && __kmp_generate_warnings == kmp_warnings_off ) - return; // no reason to form a string in order to not print it - - __kmp_str_buf_init( & buffer ); - - // Format the primary message. - switch ( severity ) { - case kmp_ms_inform : { - format = kmp_i18n_fmt_Info; - } break; - case kmp_ms_warning : { - format = kmp_i18n_fmt_Warning; - } break; - case kmp_ms_fatal : { - format = kmp_i18n_fmt_Fatal; - } break; - default : { - KMP_DEBUG_ASSERT( 0 ); - }; - }; // switch - fmsg = __kmp_msg_format( format, message.num, message.str ); - __kmp_str_free(&message.str); - __kmp_str_buf_cat( & buffer, fmsg.str, fmsg.len ); - __kmp_str_free(&fmsg.str); - - // Format other messages. - va_start( args, message ); - for ( ; ; ) { - message = va_arg( args, kmp_msg_t ); - if ( message.type == kmp_mt_dummy && message.str == NULL ) { - break; - }; // if - if ( message.type == kmp_mt_dummy && message.str == __kmp_msg_empty.str ) { - continue; - }; // if - switch ( message.type ) { - case kmp_mt_hint : { - format = kmp_i18n_fmt_Hint; - } break; - case kmp_mt_syserr : { - format = kmp_i18n_fmt_SysErr; - } break; - default : { - KMP_DEBUG_ASSERT( 0 ); - }; - }; // switch - fmsg = __kmp_msg_format( format, message.num, message.str ); - __kmp_str_free(&message.str); - __kmp_str_buf_cat( & buffer, fmsg.str, fmsg.len ); - __kmp_str_free(&fmsg.str); - }; // forever - va_end( args ); - - // Print formatted messages. - // This lock prevents multiple fatal errors on the same problem. - // __kmp_acquire_bootstrap_lock( & lock ); // GEH - This lock causing tests to hang on OS X*. - __kmp_printf( "%s", buffer.str ); - __kmp_str_buf_free( & buffer ); - - if ( severity == kmp_ms_fatal ) { - #if KMP_OS_WINDOWS - __kmp_thread_sleep( 500 ); /* Delay to give message a chance to appear before reaping */ - #endif - __kmp_abort_process(); - }; // if - - // __kmp_release_bootstrap_lock( & lock ); // GEH - this lock causing tests to hang on OS X*. - -} // __kmp_msg - -// ------------------------------------------------------------------------------------------------- - -// end of file // Index: runtime/src/kmp_i18n.cpp =================================================================== --- runtime/src/kmp_i18n.cpp +++ runtime/src/kmp_i18n.cpp @@ -1,5 +1,5 @@ /* - * kmp_i18n.c + * kmp_i18n.cpp */ Index: runtime/src/kmp_import.c =================================================================== --- runtime/src/kmp_import.c +++ runtime/src/kmp_import.c @@ -1,42 +0,0 @@ -/* - * kmp_import.c - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -/* - ------------------------------------------------------------------------------------------------ - Object generated from this source file is linked to Windows* OS DLL import library (libompmd.lib) - only! It is not a part of regular static or dynamic OpenMP RTL. Any code that just needs to go - in the libompmd.lib (but not in libompmt.lib and libompmd.dll) should be placed in this - file. - ------------------------------------------------------------------------------------------------ -*/ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - These symbols are required for mutual exclusion with Microsoft OpenMP RTL (and compatibility - with MS Compiler). -*/ - -int _You_must_link_with_exactly_one_OpenMP_library = 1; -int _You_must_link_with_Intel_OpenMP_library = 1; -int _You_must_link_with_Microsoft_OpenMP_library = 1; - -#ifdef __cplusplus -} -#endif - -// end of file // Index: runtime/src/kmp_import.cpp =================================================================== --- runtime/src/kmp_import.cpp +++ runtime/src/kmp_import.cpp @@ -1,5 +1,5 @@ /* - * kmp_import.c + * kmp_import.cpp */ Index: runtime/src/kmp_io.c =================================================================== --- runtime/src/kmp_io.c +++ runtime/src/kmp_io.c @@ -1,248 +0,0 @@ -/* - * KMP_IO.c -- RTL IO - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include -#include -#include -#include -#include -#ifndef __ABSOFT_WIN -# include -#endif - -#include "kmp_os.h" -#include "kmp_lock.h" -#include "kmp_str.h" -#include "kmp_io.h" -#include "kmp.h" // KMP_GTID_DNE, __kmp_debug_buf, etc - -#if KMP_OS_WINDOWS -# pragma warning( push ) -# pragma warning( disable: 271 310 ) -# include -# pragma warning( pop ) -#endif - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -kmp_bootstrap_lock_t __kmp_stdio_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_stdio_lock ); /* Control stdio functions */ -kmp_bootstrap_lock_t __kmp_console_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_console_lock ); /* Control console initialization */ - -#if KMP_OS_WINDOWS - - # ifdef KMP_DEBUG - /* __kmp_stdout is used only for dev build */ - static HANDLE __kmp_stdout = NULL; - # endif - static HANDLE __kmp_stderr = NULL; - static int __kmp_console_exists = FALSE; - static kmp_str_buf_t __kmp_console_buf; - - static int - is_console( void ) - { - char buffer[ 128 ]; - DWORD rc = 0; - DWORD err = 0; - // Try to get console title. - SetLastError( 0 ); - // GetConsoleTitle does not reset last error in case of success or short buffer, - // so we need to clear it explicitly. - rc = GetConsoleTitle( buffer, sizeof( buffer ) ); - if ( rc == 0 ) { - // rc == 0 means getting console title failed. Let us find out why. - err = GetLastError(); - // err == 0 means buffer too short (we suppose console exists). - // In Window applications we usually have err == 6 (invalid handle). - }; // if - return rc > 0 || err == 0; - } - - void - __kmp_close_console( void ) - { - /* wait until user presses return before closing window */ - /* TODO only close if a window was opened */ - if( __kmp_console_exists ) { - #ifdef KMP_DEBUG - /* standard out is used only in dev build */ - __kmp_stdout = NULL; - #endif - __kmp_stderr = NULL; - __kmp_str_buf_free( &__kmp_console_buf ); - __kmp_console_exists = FALSE; - } - } - - /* For windows, call this before stdout, stderr, or stdin are used. - * It opens a console window and starts processing */ - static void - __kmp_redirect_output( void ) - { - __kmp_acquire_bootstrap_lock( &__kmp_console_lock ); - - if( ! __kmp_console_exists ) { - #ifdef KMP_DEBUG - /* standard out is used only in dev build */ - HANDLE ho; - #endif - HANDLE he; - - __kmp_str_buf_init( &__kmp_console_buf ); - - AllocConsole(); - // We do not check the result of AllocConsole because - // 1. the call is harmless - // 2. it is not clear how to communicate failue - // 3. we will detect failure later when we get handle(s) - - #ifdef KMP_DEBUG - ho = GetStdHandle( STD_OUTPUT_HANDLE ); - if ( ho == INVALID_HANDLE_VALUE || ho == NULL ) { - - DWORD err = GetLastError(); - // TODO: output error somehow (maybe message box) - __kmp_stdout = NULL; - - } else { - - __kmp_stdout = ho; // temporary code, need new global for ho - - } - #endif - he = GetStdHandle( STD_ERROR_HANDLE ); - if ( he == INVALID_HANDLE_VALUE || he == NULL ) { - - DWORD err = GetLastError(); - // TODO: output error somehow (maybe message box) - __kmp_stderr = NULL; - - } else { - - __kmp_stderr = he; // temporary code, need new global - } - __kmp_console_exists = TRUE; - } - __kmp_release_bootstrap_lock( &__kmp_console_lock ); - } - -#else - #define __kmp_stderr (stderr) -#endif /* KMP_OS_WINDOWS */ - -void -__kmp_vprintf( enum kmp_io __kmp_io, char const * format, va_list ap ) -{ - #if KMP_OS_WINDOWS - if( !__kmp_console_exists ) { - __kmp_redirect_output(); - } - if( ! __kmp_stderr && __kmp_io == kmp_err ) { - return; - } - #ifdef KMP_DEBUG - if( ! __kmp_stdout && __kmp_io == kmp_out ) { - return; - } - #endif - #endif /* KMP_OS_WINDOWS */ - - if ( __kmp_debug_buf && __kmp_debug_buffer != NULL ) { - - int dc = ( __kmp_debug_buf_atomic ? - KMP_TEST_THEN_INC32( & __kmp_debug_count) : __kmp_debug_count++ ) - % __kmp_debug_buf_lines; - char *db = & __kmp_debug_buffer[ dc * __kmp_debug_buf_chars ]; - int chars = 0; - - #ifdef KMP_DEBUG_PIDS - chars = KMP_SNPRINTF( db, __kmp_debug_buf_chars, "pid=%d: ", (kmp_int32)getpid() ); - #endif - chars += KMP_VSNPRINTF( db, __kmp_debug_buf_chars, format, ap ); - - if ( chars + 1 > __kmp_debug_buf_chars ) { - if ( chars + 1 > __kmp_debug_buf_warn_chars ) { - #if KMP_OS_WINDOWS - DWORD count; - __kmp_str_buf_print( &__kmp_console_buf, - "OMP warning: Debugging buffer overflow; increase KMP_DEBUG_BUF_CHARS to %d\n", - chars + 1 ); - WriteFile( __kmp_stderr, __kmp_console_buf.str, __kmp_console_buf.used, &count, NULL ); - __kmp_str_buf_clear( &__kmp_console_buf ); - #else - fprintf( __kmp_stderr, - "OMP warning: Debugging buffer overflow; increase KMP_DEBUG_BUF_CHARS to %d\n", - chars + 1 ); - fflush( __kmp_stderr ); - #endif - __kmp_debug_buf_warn_chars = chars + 1; - } - /* terminate string if overflow occurred */ - db[ __kmp_debug_buf_chars - 2 ] = '\n'; - db[ __kmp_debug_buf_chars - 1 ] = '\0'; - } - } else { - #if KMP_OS_WINDOWS - DWORD count; - #ifdef KMP_DEBUG_PIDS - __kmp_str_buf_print( &__kmp_console_buf, "pid=%d: ", - (kmp_int32)getpid() ); - #endif - __kmp_str_buf_vprint( &__kmp_console_buf, format, ap ); - WriteFile( - __kmp_stderr, - __kmp_console_buf.str, - __kmp_console_buf.used, - &count, - NULL - ); - __kmp_str_buf_clear( &__kmp_console_buf ); - #else - #ifdef KMP_DEBUG_PIDS - fprintf( __kmp_stderr, "pid=%d: ", (kmp_int32)getpid() ); - #endif - vfprintf( __kmp_stderr, format, ap ); - fflush( __kmp_stderr ); - #endif - } -} - -void -__kmp_printf( char const * format, ... ) -{ - va_list ap; - va_start( ap, format ); - - __kmp_acquire_bootstrap_lock( & __kmp_stdio_lock ); - __kmp_vprintf( kmp_err, format, ap ); - __kmp_release_bootstrap_lock( & __kmp_stdio_lock ); - - va_end( ap ); -} - -void -__kmp_printf_no_lock( char const * format, ... ) -{ - va_list ap; - va_start( ap, format ); - - __kmp_vprintf( kmp_err, format, ap ); - - va_end( ap ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ Index: runtime/src/kmp_io.cpp =================================================================== --- runtime/src/kmp_io.cpp +++ runtime/src/kmp_io.cpp @@ -1,5 +1,5 @@ /* - * KMP_IO.c -- RTL IO + * kmp_io.cpp -- RTL IO */ Index: runtime/src/kmp_itt.c =================================================================== --- runtime/src/kmp_itt.c +++ runtime/src/kmp_itt.c @@ -1,163 +0,0 @@ -#include "kmp_config.h" - -#if USE_ITT_BUILD -/* - * kmp_itt.c -- ITT Notify interface. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp_itt.h" - -#if KMP_DEBUG - #include "kmp_itt.inl" -#endif - - -#if USE_ITT_NOTIFY - - kmp_int32 __kmp_barrier_domain_count; - kmp_int32 __kmp_region_domain_count; - __itt_domain* __kmp_itt_barrier_domains[KMP_MAX_FRAME_DOMAINS]; - __itt_domain* __kmp_itt_region_domains[KMP_MAX_FRAME_DOMAINS]; - __itt_domain* __kmp_itt_imbalance_domains[KMP_MAX_FRAME_DOMAINS]; - kmp_int32 __kmp_itt_region_team_size[KMP_MAX_FRAME_DOMAINS]; - __itt_domain * metadata_domain = NULL; - __itt_string_handle * string_handle_imbl = NULL; - __itt_string_handle * string_handle_loop = NULL; - __itt_string_handle * string_handle_sngl = NULL; - - #include "kmp_version.h" - #include "kmp_i18n.h" - #include "kmp_str.h" - - KMP_BUILD_ASSERT( sizeof( kmp_itt_mark_t ) == sizeof( __itt_mark_type ) ); - - /* - Previously used warnings: - - KMP_WARNING( IttAllNotifDisabled ); - KMP_WARNING( IttObjNotifDisabled ); - KMP_WARNING( IttMarkNotifDisabled ); - KMP_WARNING( IttUnloadLibFailed, libittnotify ); - */ - - - kmp_int32 __kmp_itt_prepare_delay = 0; - kmp_bootstrap_lock_t __kmp_itt_debug_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_itt_debug_lock ); - -#endif // USE_ITT_NOTIFY - -void __kmp_itt_initialize() { - - // ITTNotify library is loaded and initialized at first call to any ittnotify function, - // so we do not need to explicitly load it any more. - // Jusr report OMP RTL version to ITTNotify. - - #if USE_ITT_NOTIFY - // Report OpenMP RTL version. - kmp_str_buf_t buf; - __itt_mark_type version; - __kmp_str_buf_init( & buf ); - __kmp_str_buf_print( - & buf, - "OMP RTL Version %d.%d.%d", - __kmp_version_major, - __kmp_version_minor, - __kmp_version_build - ); - if ( __itt_api_version_ptr != NULL ) { - __kmp_str_buf_print( & buf, ":%s", __itt_api_version() ); - }; // if - version = __itt_mark_create( buf.str ); - __itt_mark( version, NULL ); - __kmp_str_buf_free( & buf ); - #endif - -} // __kmp_itt_initialize - - -void __kmp_itt_destroy() { - #if USE_ITT_NOTIFY - __kmp_itt_fini_ittlib(); - #endif -} // __kmp_itt_destroy - - -extern "C" -void -__itt_error_handler( - __itt_error_code err, - va_list args -) { - - switch ( err ) { - case __itt_error_no_module : { - char const * library = va_arg( args, char const * ); -#if KMP_OS_WINDOWS - int sys_err = va_arg( args, int ); - kmp_msg_t err_code = KMP_SYSERRCODE( sys_err ); - __kmp_msg( kmp_ms_warning, KMP_MSG( IttLoadLibFailed, library ), err_code, __kmp_msg_null ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } -#else - char const * sys_err = va_arg( args, char const * ); - kmp_msg_t err_code = KMP_SYSERRMESG( sys_err ); - __kmp_msg( kmp_ms_warning, KMP_MSG( IttLoadLibFailed, library ), err_code, __kmp_msg_null ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } -#endif - } break; - case __itt_error_no_symbol : { - char const * library = va_arg( args, char const * ); - char const * symbol = va_arg( args, char const * ); - KMP_WARNING( IttLookupFailed, symbol, library ); - } break; - case __itt_error_unknown_group : { - char const * var = va_arg( args, char const * ); - char const * group = va_arg( args, char const * ); - KMP_WARNING( IttUnknownGroup, var, group ); - } break; - case __itt_error_env_too_long : { - char const * var = va_arg( args, char const * ); - size_t act_len = va_arg( args, size_t ); - size_t max_len = va_arg( args, size_t ); - KMP_WARNING( IttEnvVarTooLong, var, (unsigned long) act_len, (unsigned long) max_len ); - } break; - case __itt_error_cant_read_env : { - char const * var = va_arg( args, char const * ); - int sys_err = va_arg( args, int ); - kmp_msg_t err_code = KMP_ERR( sys_err ); - __kmp_msg( kmp_ms_warning, KMP_MSG( CantGetEnvVar, var ), err_code, __kmp_msg_null ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } break; - case __itt_error_system : { - char const * func = va_arg( args, char const * ); - int sys_err = va_arg( args, int ); - kmp_msg_t err_code = KMP_SYSERRCODE( sys_err ); - __kmp_msg( kmp_ms_warning, KMP_MSG( IttFunctionError, func ), err_code, __kmp_msg_null ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } break; - default : { - KMP_WARNING( IttUnknownError, err ); - }; - }; // switch - -} // __itt_error_handler - -#endif /* USE_ITT_BUILD */ Index: runtime/src/kmp_itt.cpp =================================================================== --- runtime/src/kmp_itt.cpp +++ runtime/src/kmp_itt.cpp @@ -2,7 +2,7 @@ #if USE_ITT_BUILD /* - * kmp_itt.c -- ITT Notify interface. + * kmp_itt.cpp -- ITT Notify interface. */ Index: runtime/src/kmp_lock.h =================================================================== --- runtime/src/kmp_lock.h +++ runtime/src/kmp_lock.h @@ -1287,7 +1287,7 @@ ? KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(l)) \ : *((kmp_indirect_lock_t **)(l)) ) -// Used once in kmp_error.c +// Used once in kmp_error.cpp extern kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p, kmp_uint32); Index: runtime/src/kmp_lock.cpp =================================================================== --- runtime/src/kmp_lock.cpp +++ runtime/src/kmp_lock.cpp @@ -2528,7 +2528,7 @@ // polling area has been reconfigured. Unless it is reconfigured, the // reloads stay in L1 cache and are cheap. // - // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.c !!! + // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!! // // The current implementation of KMP_WAIT_YIELD doesn't allow for mask // and poll to be re-read every spin iteration. @@ -3522,7 +3522,7 @@ kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing; -// This is used only in kmp_error.c when consistency checking is on. +// This is used only in kmp_error.cpp when consistency checking is on. kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) { Index: runtime/src/kmp_runtime.c =================================================================== --- runtime/src/kmp_runtime.c +++ runtime/src/kmp_runtime.c @@ -1,7683 +0,0 @@ -/* - * kmp_runtime.c -- KPTS runtime support library - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_atomic.h" -#include "kmp_wrapper_getpid.h" -#include "kmp_environment.h" -#include "kmp_itt.h" -#include "kmp_str.h" -#include "kmp_settings.h" -#include "kmp_i18n.h" -#include "kmp_io.h" -#include "kmp_error.h" -#include "kmp_stats.h" -#include "kmp_wait_release.h" -#include "kmp_affinity.h" - -#if OMPT_SUPPORT -#include "ompt-specific.h" -#endif - -/* these are temporary issues to be dealt with */ -#define KMP_USE_PRCTL 0 - -#if KMP_OS_WINDOWS -#include -#endif - -#include "tsan_annotations.h" - -#if defined(KMP_GOMP_COMPAT) -char const __kmp_version_alt_comp[] = KMP_VERSION_PREFIX "alternative compiler support: yes"; -#endif /* defined(KMP_GOMP_COMPAT) */ - -char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX "API version: " -#if OMP_45_ENABLED - "4.5 (201511)"; -#elif OMP_40_ENABLED - "4.0 (201307)"; -#else - "3.1 (201107)"; -#endif - -#ifdef KMP_DEBUG -char const __kmp_version_lock[] = KMP_VERSION_PREFIX "lock type: run time selectable"; -#endif /* KMP_DEBUG */ - -#define KMP_MIN( x, y ) ( (x) < (y) ? (x) : (y) ) - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -kmp_info_t __kmp_monitor; - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* Forward declarations */ - -void __kmp_cleanup( void ); - -static void __kmp_initialize_info( kmp_info_t *, kmp_team_t *, int tid, int gtid ); -static void __kmp_initialize_team( kmp_team_t * team, int new_nproc, kmp_internal_control_t * new_icvs, ident_t * loc ); -#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED -static void __kmp_partition_places( kmp_team_t *team, int update_master_only=0 ); -#endif -static void __kmp_do_serial_initialize( void ); -void __kmp_fork_barrier( int gtid, int tid ); -void __kmp_join_barrier( int gtid ); -void __kmp_setup_icv_copy( kmp_team_t *team, int new_nproc, kmp_internal_control_t * new_icvs, ident_t *loc ); - -#ifdef USE_LOAD_BALANCE -static int __kmp_load_balance_nproc( kmp_root_t * root, int set_nproc ); -#endif - -static int __kmp_expand_threads(int nWish, int nNeed); -#if KMP_OS_WINDOWS -static int __kmp_unregister_root_other_thread( int gtid ); -#endif -static void __kmp_unregister_library( void ); // called by __kmp_internal_end() -static void __kmp_reap_thread( kmp_info_t * thread, int is_root ); -static kmp_info_t *__kmp_thread_pool_insert_pt = NULL; - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* Calculate the identifier of the current thread */ -/* fast (and somewhat portable) way to get unique */ -/* identifier of executing thread. */ -/* returns KMP_GTID_DNE if we haven't been assigned a gtid */ - -int -__kmp_get_global_thread_id( ) -{ - int i; - kmp_info_t **other_threads; - size_t stack_data; - char *stack_addr; - size_t stack_size; - char *stack_base; - - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id: entering, nproc=%d all_nproc=%d\n", - __kmp_nth, __kmp_all_nth )); - - /* JPH - to handle the case where __kmpc_end(0) is called immediately prior to a - parallel region, made it return KMP_GTID_DNE to force serial_initialize by - caller. Had to handle KMP_GTID_DNE at all call-sites, or else guarantee - __kmp_init_gtid for this to work. */ - - if ( !TCR_4(__kmp_init_gtid) ) return KMP_GTID_DNE; - -#ifdef KMP_TDATA_GTID - if ( TCR_4(__kmp_gtid_mode) >= 3) { - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id: using TDATA\n" )); - return __kmp_gtid; - } -#endif - if ( TCR_4(__kmp_gtid_mode) >= 2) { - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id: using keyed TLS\n" )); - return __kmp_gtid_get_specific(); - } - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id: using internal alg.\n" )); - - stack_addr = (char*) & stack_data; - other_threads = __kmp_threads; - - /* - ATT: The code below is a source of potential bugs due to unsynchronized access to - __kmp_threads array. For example: - 1. Current thread loads other_threads[i] to thr and checks it, it is non-NULL. - 2. Current thread is suspended by OS. - 3. Another thread unregisters and finishes (debug versions of free() may fill memory - with something like 0xEF). - 4. Current thread is resumed. - 5. Current thread reads junk from *thr. - TODO: Fix it. - --ln - */ - - for( i = 0 ; i < __kmp_threads_capacity ; i++ ) { - - kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]); - if( !thr ) continue; - - stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize); - stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase); - - /* stack grows down -- search through all of the active threads */ - - if( stack_addr <= stack_base ) { - size_t stack_diff = stack_base - stack_addr; - - if( stack_diff <= stack_size ) { - /* The only way we can be closer than the allocated */ - /* stack size is if we are running on this thread. */ - KMP_DEBUG_ASSERT( __kmp_gtid_get_specific() == i ); - return i; - } - } - } - - /* get specific to try and determine our gtid */ - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id: internal alg. failed to find " - "thread, using TLS\n" )); - i = __kmp_gtid_get_specific(); - - /*fprintf( stderr, "=== %d\n", i ); */ /* GROO */ - - /* if we havn't been assigned a gtid, then return code */ - if( i<0 ) return i; - - /* dynamically updated stack window for uber threads to avoid get_specific call */ - if( ! TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow) ) { - KMP_FATAL( StackOverflow, i ); - } - - stack_base = (char *) other_threads[i]->th.th_info.ds.ds_stackbase; - if( stack_addr > stack_base ) { - TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr); - TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize, - other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr - stack_base); - } else { - TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize, stack_base - stack_addr); - } - - /* Reprint stack bounds for ubermaster since they have been refined */ - if ( __kmp_storage_map ) { - char *stack_end = (char *) other_threads[i]->th.th_info.ds.ds_stackbase; - char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize; - __kmp_print_storage_map_gtid( i, stack_beg, stack_end, - other_threads[i]->th.th_info.ds.ds_stacksize, - "th_%d stack (refinement)", i ); - } - return i; -} - -int -__kmp_get_global_thread_id_reg( ) -{ - int gtid; - - if ( !__kmp_init_serial ) { - gtid = KMP_GTID_DNE; - } else -#ifdef KMP_TDATA_GTID - if ( TCR_4(__kmp_gtid_mode) >= 3 ) { - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id_reg: using TDATA\n" )); - gtid = __kmp_gtid; - } else -#endif - if ( TCR_4(__kmp_gtid_mode) >= 2 ) { - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id_reg: using keyed TLS\n" )); - gtid = __kmp_gtid_get_specific(); - } else { - KA_TRACE( 1000, ( "*** __kmp_get_global_thread_id_reg: using internal alg.\n" )); - gtid = __kmp_get_global_thread_id(); - } - - /* we must be a new uber master sibling thread */ - if( gtid == KMP_GTID_DNE ) { - KA_TRACE( 10, ( "__kmp_get_global_thread_id_reg: Encountered new root thread. " - "Registering a new gtid.\n" )); - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - if( !__kmp_init_serial ) { - __kmp_do_serial_initialize(); - gtid = __kmp_gtid_get_specific(); - } else { - gtid = __kmp_register_root(FALSE); - } - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - /*__kmp_printf( "+++ %d\n", gtid ); */ /* GROO */ - } - - KMP_DEBUG_ASSERT( gtid >=0 ); - - return gtid; -} - -/* caller must hold forkjoin_lock */ -void -__kmp_check_stack_overlap( kmp_info_t *th ) -{ - int f; - char *stack_beg = NULL; - char *stack_end = NULL; - int gtid; - - KA_TRACE(10,("__kmp_check_stack_overlap: called\n")); - if ( __kmp_storage_map ) { - stack_end = (char *) th->th.th_info.ds.ds_stackbase; - stack_beg = stack_end - th->th.th_info.ds.ds_stacksize; - - gtid = __kmp_gtid_from_thread( th ); - - if (gtid == KMP_GTID_MONITOR) { - __kmp_print_storage_map_gtid( gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, - "th_%s stack (%s)", "mon", - ( th->th.th_info.ds.ds_stackgrow ) ? "initial" : "actual" ); - } else { - __kmp_print_storage_map_gtid( gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize, - "th_%d stack (%s)", gtid, - ( th->th.th_info.ds.ds_stackgrow ) ? "initial" : "actual" ); - } - } - - /* No point in checking ubermaster threads since they use refinement and cannot overlap */ - gtid = __kmp_gtid_from_thread( th ); - if ( __kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) - { - KA_TRACE(10,("__kmp_check_stack_overlap: performing extensive checking\n")); - if ( stack_beg == NULL ) { - stack_end = (char *) th->th.th_info.ds.ds_stackbase; - stack_beg = stack_end - th->th.th_info.ds.ds_stacksize; - } - - for( f=0 ; f < __kmp_threads_capacity ; f++ ) { - kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]); - - if( f_th && f_th != th ) { - char *other_stack_end = (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase); - char *other_stack_beg = other_stack_end - - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize); - if((stack_beg > other_stack_beg && stack_beg < other_stack_end) || - (stack_end > other_stack_beg && stack_end < other_stack_end)) { - - /* Print the other stack values before the abort */ - if ( __kmp_storage_map ) - __kmp_print_storage_map_gtid( -1, other_stack_beg, other_stack_end, - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize), - "th_%d stack (overlapped)", - __kmp_gtid_from_thread( f_th ) ); - - __kmp_msg( kmp_ms_fatal, KMP_MSG( StackOverlap ), KMP_HNT( ChangeStackLimit ), __kmp_msg_null ); - } - } - } - } - KA_TRACE(10,("__kmp_check_stack_overlap: returning\n")); -} - - -/* ------------------------------------------------------------------------ */ - -/* ------------------------------------------------------------------------ */ - -void -__kmp_infinite_loop( void ) -{ - static int done = FALSE; - - while (! done) { - KMP_YIELD( 1 ); - } -} - -#define MAX_MESSAGE 512 - -void -__kmp_print_storage_map_gtid( int gtid, void *p1, void *p2, size_t size, char const *format, ...) { - char buffer[MAX_MESSAGE]; - va_list ap; - - va_start( ap, format); - KMP_SNPRINTF( buffer, sizeof(buffer), "OMP storage map: %p %p%8lu %s\n", p1, p2, (unsigned long) size, format ); - __kmp_acquire_bootstrap_lock( & __kmp_stdio_lock ); - __kmp_vprintf( kmp_err, buffer, ap ); -#if KMP_PRINT_DATA_PLACEMENT - int node; - if(gtid >= 0) { - if(p1 <= p2 && (char*)p2 - (char*)p1 == size) { - if( __kmp_storage_map_verbose ) { - node = __kmp_get_host_node(p1); - if(node < 0) /* doesn't work, so don't try this next time */ - __kmp_storage_map_verbose = FALSE; - else { - char *last; - int lastNode; - int localProc = __kmp_get_cpu_from_gtid(gtid); - - const int page_size = KMP_GET_PAGE_SIZE(); - - p1 = (void *)( (size_t)p1 & ~((size_t)page_size - 1) ); - p2 = (void *)( ((size_t) p2 - 1) & ~((size_t)page_size - 1) ); - if(localProc >= 0) - __kmp_printf_no_lock(" GTID %d localNode %d\n", gtid, localProc>>1); - else - __kmp_printf_no_lock(" GTID %d\n", gtid); -# if KMP_USE_PRCTL -/* The more elaborate format is disabled for now because of the prctl hanging bug. */ - do { - last = p1; - lastNode = node; - /* This loop collates adjacent pages with the same host node. */ - do { - (char*)p1 += page_size; - } while(p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode); - __kmp_printf_no_lock(" %p-%p memNode %d\n", last, - (char*)p1 - 1, lastNode); - } while(p1 <= p2); -# else - __kmp_printf_no_lock(" %p-%p memNode %d\n", p1, - (char*)p1 + (page_size - 1), __kmp_get_host_node(p1)); - if(p1 < p2) { - __kmp_printf_no_lock(" %p-%p memNode %d\n", p2, - (char*)p2 + (page_size - 1), __kmp_get_host_node(p2)); - } -# endif - } - } - } else - __kmp_printf_no_lock(" %s\n", KMP_I18N_STR( StorageMapWarning ) ); - } -#endif /* KMP_PRINT_DATA_PLACEMENT */ - __kmp_release_bootstrap_lock( & __kmp_stdio_lock ); -} - -void -__kmp_warn( char const * format, ... ) -{ - char buffer[MAX_MESSAGE]; - va_list ap; - - if ( __kmp_generate_warnings == kmp_warnings_off ) { - return; - } - - va_start( ap, format ); - - KMP_SNPRINTF( buffer, sizeof(buffer) , "OMP warning: %s\n", format ); - __kmp_acquire_bootstrap_lock( & __kmp_stdio_lock ); - __kmp_vprintf( kmp_err, buffer, ap ); - __kmp_release_bootstrap_lock( & __kmp_stdio_lock ); - - va_end( ap ); -} - -void -__kmp_abort_process() -{ - - // Later threads may stall here, but that's ok because abort() will kill them. - __kmp_acquire_bootstrap_lock( & __kmp_exit_lock ); - - if ( __kmp_debug_buf ) { - __kmp_dump_debug_buffer(); - }; // if - - if ( KMP_OS_WINDOWS ) { - // Let other threads know of abnormal termination and prevent deadlock - // if abort happened during library initialization or shutdown - __kmp_global.g.g_abort = SIGABRT; - - /* - On Windows* OS by default abort() causes pop-up error box, which stalls nightly testing. - Unfortunately, we cannot reliably suppress pop-up error boxes. _set_abort_behavior() - works well, but this function is not available in VS7 (this is not problem for DLL, but - it is a problem for static OpenMP RTL). SetErrorMode (and so, timelimit utility) does - not help, at least in some versions of MS C RTL. - - It seems following sequence is the only way to simulate abort() and avoid pop-up error - box. - */ - raise( SIGABRT ); - _exit( 3 ); // Just in case, if signal ignored, exit anyway. - } else { - abort(); - }; // if - - __kmp_infinite_loop(); - __kmp_release_bootstrap_lock( & __kmp_exit_lock ); - -} // __kmp_abort_process - -void -__kmp_abort_thread( void ) -{ - // TODO: Eliminate g_abort global variable and this function. - // In case of abort just call abort(), it will kill all the threads. - __kmp_infinite_loop(); -} // __kmp_abort_thread - -/* ------------------------------------------------------------------------ */ - -/* - * Print out the storage map for the major kmp_info_t thread data structures - * that are allocated together. - */ - -static void -__kmp_print_thread_storage_map( kmp_info_t *thr, int gtid ) -{ - __kmp_print_storage_map_gtid( gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d", gtid ); - - __kmp_print_storage_map_gtid( gtid, &thr->th.th_info, &thr->th.th_team, sizeof(kmp_desc_t), - "th_%d.th_info", gtid ); - - __kmp_print_storage_map_gtid( gtid, &thr->th.th_local, &thr->th.th_pri_head, sizeof(kmp_local_t), - "th_%d.th_local", gtid ); - - __kmp_print_storage_map_gtid( gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier], - sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid ); - - __kmp_print_storage_map_gtid( gtid, &thr->th.th_bar[bs_plain_barrier], - &thr->th.th_bar[bs_plain_barrier+1], - sizeof(kmp_balign_t), "th_%d.th_bar[plain]", gtid); - - __kmp_print_storage_map_gtid( gtid, &thr->th.th_bar[bs_forkjoin_barrier], - &thr->th.th_bar[bs_forkjoin_barrier+1], - sizeof(kmp_balign_t), "th_%d.th_bar[forkjoin]", gtid); - - #if KMP_FAST_REDUCTION_BARRIER - __kmp_print_storage_map_gtid( gtid, &thr->th.th_bar[bs_reduction_barrier], - &thr->th.th_bar[bs_reduction_barrier+1], - sizeof(kmp_balign_t), "th_%d.th_bar[reduction]", gtid); - #endif // KMP_FAST_REDUCTION_BARRIER -} - -/* - * Print out the storage map for the major kmp_team_t team data structures - * that are allocated together. - */ - -static void -__kmp_print_team_storage_map( const char *header, kmp_team_t *team, int team_id, int num_thr ) -{ - int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2; - __kmp_print_storage_map_gtid( -1, team, team + 1, sizeof(kmp_team_t), "%s_%d", - header, team_id ); - - __kmp_print_storage_map_gtid( -1, &team->t.t_bar[0], &team->t.t_bar[bs_last_barrier], - sizeof(kmp_balign_team_t) * bs_last_barrier, "%s_%d.t_bar", header, team_id ); - - - __kmp_print_storage_map_gtid( -1, &team->t.t_bar[bs_plain_barrier], &team->t.t_bar[bs_plain_barrier+1], - sizeof(kmp_balign_team_t), "%s_%d.t_bar[plain]", header, team_id ); - - __kmp_print_storage_map_gtid( -1, &team->t.t_bar[bs_forkjoin_barrier], &team->t.t_bar[bs_forkjoin_barrier+1], - sizeof(kmp_balign_team_t), "%s_%d.t_bar[forkjoin]", header, team_id ); - - #if KMP_FAST_REDUCTION_BARRIER - __kmp_print_storage_map_gtid( -1, &team->t.t_bar[bs_reduction_barrier], &team->t.t_bar[bs_reduction_barrier+1], - sizeof(kmp_balign_team_t), "%s_%d.t_bar[reduction]", header, team_id ); - #endif // KMP_FAST_REDUCTION_BARRIER - - __kmp_print_storage_map_gtid( -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr], - sizeof(kmp_disp_t) * num_thr, "%s_%d.t_dispatch", header, team_id ); - - __kmp_print_storage_map_gtid( -1, &team->t.t_threads[0], &team->t.t_threads[num_thr], - sizeof(kmp_info_t *) * num_thr, "%s_%d.t_threads", header, team_id ); - - __kmp_print_storage_map_gtid( -1, &team->t.t_disp_buffer[0], &team->t.t_disp_buffer[num_disp_buff], - sizeof(dispatch_shared_info_t) * num_disp_buff, "%s_%d.t_disp_buffer", - header, team_id ); - - - __kmp_print_storage_map_gtid( -1, &team->t.t_taskq, &team->t.t_copypriv_data, - sizeof(kmp_taskq_t), "%s_%d.t_taskq", header, team_id ); -} - -static void __kmp_init_allocator() {} -static void __kmp_fini_allocator() {} - -/* ------------------------------------------------------------------------ */ - -#ifdef KMP_DYNAMIC_LIB -# if KMP_OS_WINDOWS - -static void -__kmp_reset_lock( kmp_bootstrap_lock_t* lck ) { - // TODO: Change to __kmp_break_bootstrap_lock(). - __kmp_init_bootstrap_lock( lck ); // make the lock released -} - -static void -__kmp_reset_locks_on_process_detach( int gtid_req ) { - int i; - int thread_count; - - // PROCESS_DETACH is expected to be called by a thread - // that executes ProcessExit() or FreeLibrary(). - // OS terminates other threads (except the one calling ProcessExit or FreeLibrary). - // So, it might be safe to access the __kmp_threads[] without taking the forkjoin_lock. - // However, in fact, some threads can be still alive here, although being about to be terminated. - // The threads in the array with ds_thread==0 are most suspicious. - // Actually, it can be not safe to access the __kmp_threads[]. - - // TODO: does it make sense to check __kmp_roots[] ? - - // Let's check that there are no other alive threads registered with the OMP lib. - while( 1 ) { - thread_count = 0; - for( i = 0; i < __kmp_threads_capacity; ++i ) { - if( !__kmp_threads ) continue; - kmp_info_t* th = __kmp_threads[ i ]; - if( th == NULL ) continue; - int gtid = th->th.th_info.ds.ds_gtid; - if( gtid == gtid_req ) continue; - if( gtid < 0 ) continue; - DWORD exit_val; - int alive = __kmp_is_thread_alive( th, &exit_val ); - if( alive ) { - ++thread_count; - } - } - if( thread_count == 0 ) break; // success - } - - // Assume that I'm alone. - - // Now it might be probably safe to check and reset locks. - // __kmp_forkjoin_lock and __kmp_stdio_lock are expected to be reset. - __kmp_reset_lock( &__kmp_forkjoin_lock ); - #ifdef KMP_DEBUG - __kmp_reset_lock( &__kmp_stdio_lock ); - #endif // KMP_DEBUG -} - -BOOL WINAPI -DllMain( HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved ) { - //__kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - - switch( fdwReason ) { - - case DLL_PROCESS_ATTACH: - KA_TRACE( 10, ("DllMain: PROCESS_ATTACH\n" )); - - return TRUE; - - case DLL_PROCESS_DETACH: - KA_TRACE( 10, ("DllMain: PROCESS_DETACH T#%d\n", - __kmp_gtid_get_specific() )); - - if( lpReserved != NULL ) - { - // lpReserved is used for telling the difference: - // lpReserved == NULL when FreeLibrary() was called, - // lpReserved != NULL when the process terminates. - // When FreeLibrary() is called, worker threads remain alive. - // So they will release the forkjoin lock by themselves. - // When the process terminates, worker threads disappear triggering - // the problem of unreleased forkjoin lock as described below. - - // A worker thread can take the forkjoin lock. - // The problem comes up if that worker thread becomes dead - // before it releases the forkjoin lock. - // The forkjoin lock remains taken, while the thread - // executing DllMain()->PROCESS_DETACH->__kmp_internal_end_library() below - // will try to take the forkjoin lock and will always fail, - // so that the application will never finish [normally]. - // This scenario is possible if __kmpc_end() has not been executed. - // It looks like it's not a corner case, but common cases: - // - the main function was compiled by an alternative compiler; - // - the main function was compiled by icl but without /Qopenmp (application with plugins); - // - application terminates by calling C exit(), Fortran CALL EXIT() or Fortran STOP. - // - alive foreign thread prevented __kmpc_end from doing cleanup. - - // This is a hack to work around the problem. - // TODO: !!! to figure out something better. - __kmp_reset_locks_on_process_detach( __kmp_gtid_get_specific() ); - } - - __kmp_internal_end_library( __kmp_gtid_get_specific() ); - - return TRUE; - - case DLL_THREAD_ATTACH: - KA_TRACE( 10, ("DllMain: THREAD_ATTACH\n" )); - - /* if we wanted to register new siblings all the time here call - * __kmp_get_gtid(); */ - return TRUE; - - case DLL_THREAD_DETACH: - KA_TRACE( 10, ("DllMain: THREAD_DETACH T#%d\n", - __kmp_gtid_get_specific() )); - - __kmp_internal_end_thread( __kmp_gtid_get_specific() ); - return TRUE; - } - - return TRUE; -} - -# endif /* KMP_OS_WINDOWS */ -#endif /* KMP_DYNAMIC_LIB */ - - -/* ------------------------------------------------------------------------ */ - -/* Change the library type to "status" and return the old type */ -/* called from within initialization routines where __kmp_initz_lock is held */ -int -__kmp_change_library( int status ) -{ - int old_status; - - old_status = __kmp_yield_init & 1; // check whether KMP_LIBRARY=throughput (even init count) - - if (status) { - __kmp_yield_init |= 1; // throughput => turnaround (odd init count) - } - else { - __kmp_yield_init &= ~1; // turnaround => throughput (even init count) - } - - return old_status; // return previous setting of whether KMP_LIBRARY=throughput -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* __kmp_parallel_deo -- - * Wait until it's our turn. - */ -void -__kmp_parallel_deo( int *gtid_ref, int *cid_ref, ident_t *loc_ref ) -{ - int gtid = *gtid_ref; -#ifdef BUILD_PARALLEL_ORDERED - kmp_team_t *team = __kmp_team_from_gtid( gtid ); -#endif /* BUILD_PARALLEL_ORDERED */ - - if( __kmp_env_consistency_check ) { - if( __kmp_threads[gtid]->th.th_root->r.r_active ) -#if KMP_USE_DYNAMIC_LOCK - __kmp_push_sync( gtid, ct_ordered_in_parallel, loc_ref, NULL, 0 ); -#else - __kmp_push_sync( gtid, ct_ordered_in_parallel, loc_ref, NULL ); -#endif - } -#ifdef BUILD_PARALLEL_ORDERED - if( !team->t.t_serialized ) { - KMP_MB(); - KMP_WAIT_YIELD(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid( gtid ), KMP_EQ, NULL); - KMP_MB(); - } -#endif /* BUILD_PARALLEL_ORDERED */ -} - -/* __kmp_parallel_dxo -- - * Signal the next task. - */ - -void -__kmp_parallel_dxo( int *gtid_ref, int *cid_ref, ident_t *loc_ref ) -{ - int gtid = *gtid_ref; -#ifdef BUILD_PARALLEL_ORDERED - int tid = __kmp_tid_from_gtid( gtid ); - kmp_team_t *team = __kmp_team_from_gtid( gtid ); -#endif /* BUILD_PARALLEL_ORDERED */ - - if( __kmp_env_consistency_check ) { - if( __kmp_threads[gtid]->th.th_root->r.r_active ) - __kmp_pop_sync( gtid, ct_ordered_in_parallel, loc_ref ); - } -#ifdef BUILD_PARALLEL_ORDERED - if ( ! team->t.t_serialized ) { - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* use the tid of the next thread in this team */ - /* TODO repleace with general release procedure */ - team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc ); - -#if OMPT_SUPPORT && OMPT_BLAME - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_release_ordered)) { - /* accept blame for "ordered" waiting */ - kmp_info_t *this_thread = __kmp_threads[gtid]; - ompt_callbacks.ompt_callback(ompt_event_release_ordered)( - this_thread->th.ompt_thread_info.wait_id); - } -#endif - - KMP_MB(); /* Flush all pending memory write invalidates. */ - } -#endif /* BUILD_PARALLEL_ORDERED */ -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* The BARRIER for a SINGLE process section is always explicit */ - -int -__kmp_enter_single( int gtid, ident_t *id_ref, int push_ws ) -{ - int status; - kmp_info_t *th; - kmp_team_t *team; - - if( ! TCR_4(__kmp_init_parallel) ) - __kmp_parallel_initialize(); - - th = __kmp_threads[ gtid ]; - team = th->th.th_team; - status = 0; - - th->th.th_ident = id_ref; - - if ( team->t.t_serialized ) { - status = 1; - } else { - kmp_int32 old_this = th->th.th_local.this_construct; - - ++th->th.th_local.this_construct; - /* try to set team count to thread count--success means thread got the - single block - */ - /* TODO: Should this be acquire or release? */ - if (team->t.t_construct == old_this) { - status = KMP_COMPARE_AND_STORE_ACQ32(&team->t.t_construct, old_this, - th->th.th_local.this_construct); - } -#if USE_ITT_BUILD - if ( __itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 && KMP_MASTER_GTID(gtid) && -#if OMP_40_ENABLED - th->th.th_teams_microtask == NULL && -#endif - team->t.t_active_level == 1 ) - { // Only report metadata by master of active team at level 1 - __kmp_itt_metadata_single( id_ref ); - } -#endif /* USE_ITT_BUILD */ - } - - if( __kmp_env_consistency_check ) { - if (status && push_ws) { - __kmp_push_workshare( gtid, ct_psingle, id_ref ); - } else { - __kmp_check_workshare( gtid, ct_psingle, id_ref ); - } - } -#if USE_ITT_BUILD - if ( status ) { - __kmp_itt_single_start( gtid ); - } -#endif /* USE_ITT_BUILD */ - return status; -} - -void -__kmp_exit_single( int gtid ) -{ -#if USE_ITT_BUILD - __kmp_itt_single_end( gtid ); -#endif /* USE_ITT_BUILD */ - if( __kmp_env_consistency_check ) - __kmp_pop_workshare( gtid, ct_psingle, NULL ); -} - - -/* - * determine if we can go parallel or must use a serialized parallel region and - * how many threads we can use - * set_nproc is the number of threads requested for the team - * returns 0 if we should serialize or only use one thread, - * otherwise the number of threads to use - * The forkjoin lock is held by the caller. - */ -static int -__kmp_reserve_threads( kmp_root_t *root, kmp_team_t *parent_team, - int master_tid, int set_nthreads -#if OMP_40_ENABLED - , int enter_teams -#endif /* OMP_40_ENABLED */ -) -{ - int capacity; - int new_nthreads; - KMP_DEBUG_ASSERT( __kmp_init_serial ); - KMP_DEBUG_ASSERT( root && parent_team ); - - // - // If dyn-var is set, dynamically adjust the number of desired threads, - // according to the method specified by dynamic_mode. - // - new_nthreads = set_nthreads; - if ( ! get__dynamic_2( parent_team, master_tid ) ) { - ; - } -#ifdef USE_LOAD_BALANCE - else if ( __kmp_global.g.g_dynamic_mode == dynamic_load_balance ) { - new_nthreads = __kmp_load_balance_nproc( root, set_nthreads ); - if ( new_nthreads == 1 ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d load balance reduced reservation to 1 thread\n", - master_tid )); - return 1; - } - if ( new_nthreads < set_nthreads ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d load balance reduced reservation to %d threads\n", - master_tid, new_nthreads )); - } - } -#endif /* USE_LOAD_BALANCE */ - else if ( __kmp_global.g.g_dynamic_mode == dynamic_thread_limit ) { - new_nthreads = __kmp_avail_proc - __kmp_nth + (root->r.r_active ? 1 - : root->r.r_hot_team->t.t_nproc); - if ( new_nthreads <= 1 ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d thread limit reduced reservation to 1 thread\n", - master_tid )); - return 1; - } - if ( new_nthreads < set_nthreads ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d thread limit reduced reservation to %d threads\n", - master_tid, new_nthreads )); - } - else { - new_nthreads = set_nthreads; - } - } - else if ( __kmp_global.g.g_dynamic_mode == dynamic_random ) { - if ( set_nthreads > 2 ) { - new_nthreads = __kmp_get_random( parent_team->t.t_threads[master_tid] ); - new_nthreads = ( new_nthreads % set_nthreads ) + 1; - if ( new_nthreads == 1 ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d dynamic random reduced reservation to 1 thread\n", - master_tid )); - return 1; - } - if ( new_nthreads < set_nthreads ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d dynamic random reduced reservation to %d threads\n", - master_tid, new_nthreads )); - } - } - } - else { - KMP_ASSERT( 0 ); - } - - // - // Respect KMP_ALL_THREADS, KMP_MAX_THREADS, OMP_THREAD_LIMIT. - // - if ( __kmp_nth + new_nthreads - ( root->r.r_active ? 1 : - root->r.r_hot_team->t.t_nproc ) > __kmp_max_nth ) { - int tl_nthreads = __kmp_max_nth - __kmp_nth + ( root->r.r_active ? 1 : - root->r.r_hot_team->t.t_nproc ); - if ( tl_nthreads <= 0 ) { - tl_nthreads = 1; - } - - // - // If dyn-var is false, emit a 1-time warning. - // - if ( ! get__dynamic_2( parent_team, master_tid ) - && ( ! __kmp_reserve_warn ) ) { - __kmp_reserve_warn = 1; - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantFormThrTeam, set_nthreads, tl_nthreads ), - KMP_HNT( Unset_ALL_THREADS ), - __kmp_msg_null - ); - } - if ( tl_nthreads == 1 ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d KMP_ALL_THREADS reduced reservation to 1 thread\n", - master_tid )); - return 1; - } - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d KMP_ALL_THREADS reduced reservation to %d threads\n", - master_tid, tl_nthreads )); - new_nthreads = tl_nthreads; - } - - // - // Check if the threads array is large enough, or needs expanding. - // - // See comment in __kmp_register_root() about the adjustment if - // __kmp_threads[0] == NULL. - // - capacity = __kmp_threads_capacity; - if ( TCR_PTR(__kmp_threads[0]) == NULL ) { - --capacity; - } - if ( __kmp_nth + new_nthreads - ( root->r.r_active ? 1 : - root->r.r_hot_team->t.t_nproc ) > capacity ) { - // - // Expand the threads array. - // - int slotsRequired = __kmp_nth + new_nthreads - ( root->r.r_active ? 1 : - root->r.r_hot_team->t.t_nproc ) - capacity; - int slotsAdded = __kmp_expand_threads(slotsRequired, slotsRequired); - if ( slotsAdded < slotsRequired ) { - // - // The threads array was not expanded enough. - // - new_nthreads -= ( slotsRequired - slotsAdded ); - KMP_ASSERT( new_nthreads >= 1 ); - - // - // If dyn-var is false, emit a 1-time warning. - // - if ( ! get__dynamic_2( parent_team, master_tid ) - && ( ! __kmp_reserve_warn ) ) { - __kmp_reserve_warn = 1; - if ( __kmp_tp_cached ) { - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantFormThrTeam, set_nthreads, new_nthreads ), - KMP_HNT( Set_ALL_THREADPRIVATE, __kmp_tp_capacity ), - KMP_HNT( PossibleSystemLimitOnThreads ), - __kmp_msg_null - ); - } - else { - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantFormThrTeam, set_nthreads, new_nthreads ), - KMP_HNT( SystemLimitOnThreads ), - __kmp_msg_null - ); - } - } - } - } - - if ( new_nthreads == 1 ) { - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d serializing team after reclaiming dead roots and rechecking; requested %d threads\n", - __kmp_get_gtid(), set_nthreads ) ); - return 1; - } - - KC_TRACE( 10, ( "__kmp_reserve_threads: T#%d allocating %d threads; requested %d threads\n", - __kmp_get_gtid(), new_nthreads, set_nthreads )); - return new_nthreads; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* allocate threads from the thread pool and assign them to the new team */ -/* we are assured that there are enough threads available, because we - * checked on that earlier within critical section forkjoin */ - -static void -__kmp_fork_team_threads( kmp_root_t *root, kmp_team_t *team, - kmp_info_t *master_th, int master_gtid ) -{ - int i; - int use_hot_team; - - KA_TRACE( 10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc ) ); - KMP_DEBUG_ASSERT( master_gtid == __kmp_get_gtid() ); - KMP_MB(); - - /* first, let's setup the master thread */ - master_th->th.th_info.ds.ds_tid = 0; - master_th->th.th_team = team; - master_th->th.th_team_nproc = team->t.t_nproc; - master_th->th.th_team_master = master_th; - master_th->th.th_team_serialized = FALSE; - master_th->th.th_dispatch = & team->t.t_dispatch[ 0 ]; - - /* make sure we are not the optimized hot team */ -#if KMP_NESTED_HOT_TEAMS - use_hot_team = 0; - kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams; - if( hot_teams ) { // hot teams array is not allocated if KMP_HOT_TEAMS_MAX_LEVEL=0 - int level = team->t.t_active_level - 1; // index in array of hot teams - if( master_th->th.th_teams_microtask ) { // are we inside the teams? - if( master_th->th.th_teams_size.nteams > 1 ) { - ++level; // level was not increased in teams construct for team_of_masters - } - if( team->t.t_pkfn != (microtask_t)__kmp_teams_master && - master_th->th.th_teams_level == team->t.t_level ) { - ++level; // level was not increased in teams construct for team_of_workers before the parallel - } // team->t.t_level will be increased inside parallel - } - if( level < __kmp_hot_teams_max_level ) { - if( hot_teams[level].hot_team ) { - // hot team has already been allocated for given level - KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team); - use_hot_team = 1; // the team is ready to use - } else { - use_hot_team = 0; // AC: threads are not allocated yet - hot_teams[level].hot_team = team; // remember new hot team - hot_teams[level].hot_team_nth = team->t.t_nproc; - } - } else { - use_hot_team = 0; - } - } -#else - use_hot_team = team == root->r.r_hot_team; -#endif - if ( !use_hot_team ) { - - /* install the master thread */ - team->t.t_threads[ 0 ] = master_th; - __kmp_initialize_info( master_th, team, 0, master_gtid ); - - /* now, install the worker threads */ - for ( i=1 ; i < team->t.t_nproc ; i++ ) { - - /* fork or reallocate a new thread and install it in team */ - kmp_info_t *thr = __kmp_allocate_thread( root, team, i ); - team->t.t_threads[ i ] = thr; - KMP_DEBUG_ASSERT( thr ); - KMP_DEBUG_ASSERT( thr->th.th_team == team ); - /* align team and thread arrived states */ - KA_TRACE( 20, ("__kmp_fork_team_threads: T#%d(%d:%d) init arrived T#%d(%d:%d) join =%llu, plain=%llu\n", - __kmp_gtid_from_tid( 0, team ), team->t.t_id, 0, - __kmp_gtid_from_tid( i, team ), team->t.t_id, i, - team->t.t_bar[ bs_forkjoin_barrier ].b_arrived, - team->t.t_bar[ bs_plain_barrier ].b_arrived ) ); -#if OMP_40_ENABLED - thr->th.th_teams_microtask = master_th->th.th_teams_microtask; - thr->th.th_teams_level = master_th->th.th_teams_level; - thr->th.th_teams_size = master_th->th.th_teams_size; -#endif - { // Initialize threads' barrier data. - int b; - kmp_balign_t * balign = team->t.t_threads[ i ]->th.th_bar; - for ( b = 0; b < bs_last_barrier; ++ b ) { - balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived; - KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#if USE_DEBUGGER - balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived; -#endif - }; // for b - } - } - -#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED - __kmp_partition_places( team ); -#endif - - } - - KMP_MB(); -} - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -// -// Propagate any changes to the floating point control registers out to the team -// We try to avoid unnecessary writes to the relevant cache line in the team structure, -// so we don't make changes unless they are needed. -// -inline static void -propagateFPControl(kmp_team_t * team) -{ - if ( __kmp_inherit_fp_control ) { - kmp_int16 x87_fpu_control_word; - kmp_uint32 mxcsr; - - // Get master values of FPU control flags (both X87 and vector) - __kmp_store_x87_fpu_control_word( &x87_fpu_control_word ); - __kmp_store_mxcsr( &mxcsr ); - mxcsr &= KMP_X86_MXCSR_MASK; - - // There is no point looking at t_fp_control_saved here. - // If it is TRUE, we still have to update the values if they are different from those we now have. - // If it is FALSE we didn't save anything yet, but our objective is the same. We have to ensure - // that the values in the team are the same as those we have. - // So, this code achieves what we need whether or not t_fp_control_saved is true. - // By checking whether the value needs updating we avoid unnecessary writes that would put the - // cache-line into a written state, causing all threads in the team to have to read it again. - KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word); - KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr); - // Although we don't use this value, other code in the runtime wants to know whether it should restore them. - // So we must ensure it is correct. - KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE); - } - else { - // Similarly here. Don't write to this cache-line in the team structure unless we have to. - KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE); - } -} - -// Do the opposite, setting the hardware registers to the updated values from the team. -inline static void -updateHWFPControl(kmp_team_t * team) -{ - if ( __kmp_inherit_fp_control && team->t.t_fp_control_saved ) { - // - // Only reset the fp control regs if they have been changed in the team. - // the parallel region that we are exiting. - // - kmp_int16 x87_fpu_control_word; - kmp_uint32 mxcsr; - __kmp_store_x87_fpu_control_word( &x87_fpu_control_word ); - __kmp_store_mxcsr( &mxcsr ); - mxcsr &= KMP_X86_MXCSR_MASK; - - if ( team->t.t_x87_fpu_control_word != x87_fpu_control_word ) { - __kmp_clear_x87_fpu_status_word(); - __kmp_load_x87_fpu_control_word( &team->t.t_x87_fpu_control_word ); - } - - if ( team->t.t_mxcsr != mxcsr ) { - __kmp_load_mxcsr( &team->t.t_mxcsr ); - } - } -} -#else -# define propagateFPControl(x) ((void)0) -# define updateHWFPControl(x) ((void)0) -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -static void -__kmp_alloc_argv_entries( int argc, kmp_team_t *team, int realloc ); // forward declaration - -/* - * Run a parallel region that has been serialized, so runs only in a team of the single master thread. - */ -void -__kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) -{ - kmp_info_t *this_thr; - kmp_team_t *serial_team; - - KC_TRACE( 10, ("__kmpc_serialized_parallel: called by T#%d\n", global_tid ) ); - - /* Skip all this code for autopar serialized loops since it results in - unacceptable overhead */ - if( loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR ) ) - return; - - if( ! TCR_4( __kmp_init_parallel ) ) - __kmp_parallel_initialize(); - - this_thr = __kmp_threads[ global_tid ]; - serial_team = this_thr->th.th_serial_team; - - /* utilize the serialized team held by this thread */ - KMP_DEBUG_ASSERT( serial_team ); - KMP_MB(); - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KMP_DEBUG_ASSERT(this_thr->th.th_task_team == this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]); - KMP_DEBUG_ASSERT( serial_team->t.t_task_team[this_thr->th.th_task_state] == NULL ); - KA_TRACE( 20, ( "__kmpc_serialized_parallel: T#%d pushing task_team %p / team %p, new task_team = NULL\n", - global_tid, this_thr->th.th_task_team, this_thr->th.th_team ) ); - this_thr->th.th_task_team = NULL; - } - -#if OMP_40_ENABLED - kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind; - if ( this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false ) { - proc_bind = proc_bind_false; - } - else if ( proc_bind == proc_bind_default ) { - // - // No proc_bind clause was specified, so use the current value - // of proc-bind-var for this parallel region. - // - proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind; - } - // - // Reset for next parallel region - // - this_thr->th.th_set_proc_bind = proc_bind_default; -#endif /* OMP_40_ENABLED */ - - if( this_thr->th.th_team != serial_team ) { - // Nested level will be an index in the nested nthreads array - int level = this_thr->th.th_team->t.t_level; - - if( serial_team->t.t_serialized ) { - /* this serial team was already used - * TODO increase performance by making this locks more specific */ - kmp_team_t *new_team; - - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - -#if OMPT_SUPPORT - ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(global_tid); -#endif - - new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1, -#if OMPT_SUPPORT - ompt_parallel_id, -#endif -#if OMP_40_ENABLED - proc_bind, -#endif - & this_thr->th.th_current_task->td_icvs, - 0 USE_NESTED_HOT_ARG(NULL) ); - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - KMP_ASSERT( new_team ); - - /* setup new serialized team and install it */ - new_team->t.t_threads[0] = this_thr; - new_team->t.t_parent = this_thr->th.th_team; - serial_team = new_team; - this_thr->th.th_serial_team = serial_team; - - KF_TRACE( 10, ( "__kmpc_serialized_parallel: T#%d allocated new serial team %p\n", - global_tid, serial_team ) ); - - - /* TODO the above breaks the requirement that if we run out of - * resources, then we can still guarantee that serialized teams - * are ok, since we may need to allocate a new one */ - } else { - KF_TRACE( 10, ( "__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n", - global_tid, serial_team ) ); - } - - /* we have to initialize this serial team */ - KMP_DEBUG_ASSERT( serial_team->t.t_threads ); - KMP_DEBUG_ASSERT( serial_team->t.t_threads[0] == this_thr ); - KMP_DEBUG_ASSERT( this_thr->th.th_team != serial_team ); - serial_team->t.t_ident = loc; - serial_team->t.t_serialized = 1; - serial_team->t.t_nproc = 1; - serial_team->t.t_parent = this_thr->th.th_team; - serial_team->t.t_sched = this_thr->th.th_team->t.t_sched; - this_thr->th.th_team = serial_team; - serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid; - - KF_TRACE( 10, ( "__kmpc_serialized_parallel: T#d curtask=%p\n", - global_tid, this_thr->th.th_current_task ) ); - KMP_ASSERT( this_thr->th.th_current_task->td_flags.executing == 1 ); - this_thr->th.th_current_task->td_flags.executing = 0; - - __kmp_push_current_task_to_thread( this_thr, serial_team, 0 ); - - /* TODO: GEH: do the ICVs work for nested serialized teams? Don't we need an implicit task for - each serialized task represented by team->t.t_serialized? */ - copy_icvs( - & this_thr->th.th_current_task->td_icvs, - & this_thr->th.th_current_task->td_parent->td_icvs ); - - // Thread value exists in the nested nthreads array for the next nested level - if ( __kmp_nested_nth.used && ( level + 1 < __kmp_nested_nth.used ) ) { - this_thr->th.th_current_task->td_icvs.nproc = __kmp_nested_nth.nth[ level + 1 ]; - } - -#if OMP_40_ENABLED - if ( __kmp_nested_proc_bind.used && ( level + 1 < __kmp_nested_proc_bind.used ) ) { - this_thr->th.th_current_task->td_icvs.proc_bind - = __kmp_nested_proc_bind.bind_types[ level + 1 ]; - } -#endif /* OMP_40_ENABLED */ - -#if USE_DEBUGGER - serial_team->t.t_pkfn = (microtask_t)( ~0 ); // For the debugger. -#endif - this_thr->th.th_info.ds.ds_tid = 0; - - /* set thread cache values */ - this_thr->th.th_team_nproc = 1; - this_thr->th.th_team_master = this_thr; - this_thr->th.th_team_serialized = 1; - - serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1; - serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level; - - propagateFPControl (serial_team); - - /* check if we need to allocate dispatch buffers stack */ - KMP_DEBUG_ASSERT(serial_team->t.t_dispatch); - if ( !serial_team->t.t_dispatch->th_disp_buffer ) { - serial_team->t.t_dispatch->th_disp_buffer = (dispatch_private_info_t *) - __kmp_allocate( sizeof( dispatch_private_info_t ) ); - } - this_thr->th.th_dispatch = serial_team->t.t_dispatch; - -#if OMPT_SUPPORT - ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(global_tid); - __ompt_team_assign_id(serial_team, ompt_parallel_id); -#endif - - KMP_MB(); - - } else { - /* this serialized team is already being used, - * that's fine, just add another nested level */ - KMP_DEBUG_ASSERT( this_thr->th.th_team == serial_team ); - KMP_DEBUG_ASSERT( serial_team->t.t_threads ); - KMP_DEBUG_ASSERT( serial_team->t.t_threads[0] == this_thr ); - ++ serial_team->t.t_serialized; - this_thr->th.th_team_serialized = serial_team->t.t_serialized; - - // Nested level will be an index in the nested nthreads array - int level = this_thr->th.th_team->t.t_level; - // Thread value exists in the nested nthreads array for the next nested level - if ( __kmp_nested_nth.used && ( level + 1 < __kmp_nested_nth.used ) ) { - this_thr->th.th_current_task->td_icvs.nproc = __kmp_nested_nth.nth[ level + 1 ]; - } - serial_team->t.t_level++; - KF_TRACE( 10, ( "__kmpc_serialized_parallel: T#%d increasing nesting level of serial team %p to %d\n", - global_tid, serial_team, serial_team->t.t_level ) ); - - /* allocate/push dispatch buffers stack */ - KMP_DEBUG_ASSERT(serial_team->t.t_dispatch); - { - dispatch_private_info_t * disp_buffer = (dispatch_private_info_t *) - __kmp_allocate( sizeof( dispatch_private_info_t ) ); - disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer; - serial_team->t.t_dispatch->th_disp_buffer = disp_buffer; - } - this_thr->th.th_dispatch = serial_team->t.t_dispatch; - - KMP_MB(); - } - - if ( __kmp_env_consistency_check ) - __kmp_push_parallel( global_tid, NULL ); - -} - -/* most of the work for a fork */ -/* return true if we really went parallel, false if serialized */ -int -__kmp_fork_call( - ident_t * loc, - int gtid, - enum fork_context_e call_context, // Intel, GNU, ... - kmp_int32 argc, -#if OMPT_SUPPORT - void *unwrapped_task, -#endif - microtask_t microtask, - launch_t invoker, -/* TODO: revert workaround for Intel(R) 64 tracker #96 */ -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - va_list * ap -#else - va_list ap -#endif - ) -{ - void **argv; - int i; - int master_tid; - int master_this_cons; - kmp_team_t *team; - kmp_team_t *parent_team; - kmp_info_t *master_th; - kmp_root_t *root; - int nthreads; - int master_active; - int master_set_numthreads; - int level; -#if OMP_40_ENABLED - int active_level; - int teams_level; -#endif -#if KMP_NESTED_HOT_TEAMS - kmp_hot_team_ptr_t **p_hot_teams; -#endif - { // KMP_TIME_BLOCK - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call); - KMP_COUNT_VALUE(OMP_PARALLEL_args, argc); - - KA_TRACE( 20, ("__kmp_fork_call: enter T#%d\n", gtid )); - if ( __kmp_stkpadding > 0 && __kmp_root[gtid] != NULL ) { - /* Some systems prefer the stack for the root thread(s) to start with */ - /* some gap from the parent stack to prevent false sharing. */ - void *dummy = KMP_ALLOCA(__kmp_stkpadding); - /* These 2 lines below are so this does not get optimized out */ - if ( __kmp_stkpadding > KMP_MAX_STKPADDING ) - __kmp_stkpadding += (short)((kmp_int64)dummy); - } - - /* initialize if needed */ - KMP_DEBUG_ASSERT( __kmp_init_serial ); // AC: potentially unsafe, not in sync with shutdown - if( ! TCR_4(__kmp_init_parallel) ) - __kmp_parallel_initialize(); - - /* setup current data */ - master_th = __kmp_threads[ gtid ]; // AC: potentially unsafe, not in sync with shutdown - parent_team = master_th->th.th_team; - master_tid = master_th->th.th_info.ds.ds_tid; - master_this_cons = master_th->th.th_local.this_construct; - root = master_th->th.th_root; - master_active = root->r.r_active; - master_set_numthreads = master_th->th.th_set_nproc; - -#if OMPT_SUPPORT - ompt_parallel_id_t ompt_parallel_id; - ompt_task_id_t ompt_task_id; - ompt_frame_t *ompt_frame; - ompt_task_id_t my_task_id; - ompt_parallel_id_t my_parallel_id; - - if (ompt_enabled) { - ompt_parallel_id = __ompt_parallel_id_new(gtid); - ompt_task_id = __ompt_get_task_id_internal(0); - ompt_frame = __ompt_get_task_frame_internal(0); - } -#endif - - // Nested level will be an index in the nested nthreads array - level = parent_team->t.t_level; - active_level = parent_team->t.t_active_level; // is used to launch non-serial teams even if nested is not allowed -#if OMP_40_ENABLED - teams_level = master_th->th.th_teams_level; // needed to check nesting inside the teams -#endif -#if KMP_NESTED_HOT_TEAMS - p_hot_teams = &master_th->th.th_hot_teams; - if( *p_hot_teams == NULL && __kmp_hot_teams_max_level > 0 ) { - *p_hot_teams = (kmp_hot_team_ptr_t*)__kmp_allocate( - sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level); - (*p_hot_teams)[0].hot_team = root->r.r_hot_team; - (*p_hot_teams)[0].hot_team_nth = 1; // it is either actual or not needed (when active_level > 0) - } -#endif - -#if OMPT_SUPPORT - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) { - int team_size = master_set_numthreads; - - ompt_callbacks.ompt_callback(ompt_event_parallel_begin)( - ompt_task_id, ompt_frame, ompt_parallel_id, - team_size, unwrapped_task, OMPT_INVOKER(call_context)); - } -#endif - - master_th->th.th_ident = loc; - -#if OMP_40_ENABLED - if ( master_th->th.th_teams_microtask && - ap && microtask != (microtask_t)__kmp_teams_master && level == teams_level ) { - // AC: This is start of parallel that is nested inside teams construct. - // The team is actual (hot), all workers are ready at the fork barrier. - // No lock needed to initialize the team a bit, then free workers. - parent_team->t.t_ident = loc; - __kmp_alloc_argv_entries( argc, parent_team, TRUE ); - parent_team->t.t_argc = argc; - argv = (void**)parent_team->t.t_argv; - for( i=argc-1; i >= 0; --i ) -/* TODO: revert workaround for Intel(R) 64 tracker #96 */ -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - *argv++ = va_arg( *ap, void * ); -#else - *argv++ = va_arg( ap, void * ); -#endif - /* Increment our nested depth levels, but not increase the serialization */ - if ( parent_team == master_th->th.th_serial_team ) { - // AC: we are in serialized parallel - __kmpc_serialized_parallel(loc, gtid); - KMP_DEBUG_ASSERT( parent_team->t.t_serialized > 1 ); - parent_team->t.t_serialized--; // AC: need this in order enquiry functions - // work correctly, will restore at join time - -#if OMPT_SUPPORT - void *dummy; - void **exit_runtime_p; - - ompt_lw_taskteam_t lw_taskteam; - - if (ompt_enabled) { - __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, - unwrapped_task, ompt_parallel_id); - lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid); - exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame); - - __ompt_lw_taskteam_link(&lw_taskteam, master_th); - -#if OMPT_TRACE - /* OMPT implicit task begin */ - my_task_id = lw_taskteam.ompt_task_info.task_id; - my_parallel_id = parent_team->t.ompt_team_info.parallel_id; - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - my_parallel_id, my_task_id); - } -#endif - - /* OMPT state */ - master_th->th.ompt_thread_info.state = ompt_state_work_parallel; - } else { - exit_runtime_p = &dummy; - } -#endif - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - __kmp_invoke_microtask( microtask, gtid, 0, argc, parent_team->t.t_argv -#if OMPT_SUPPORT - , exit_runtime_p -#endif - ); - } - -#if OMPT_SUPPORT - *exit_runtime_p = NULL; - if (ompt_enabled) { -#if OMPT_TRACE - lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL; - - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - ompt_parallel_id, ompt_task_id); - } - - __ompt_lw_taskteam_unlink(master_th); - // reset clear the task id only after unlinking the task - lw_taskteam.ompt_task_info.task_id = ompt_task_id_none; -#endif - - if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) { - ompt_callbacks.ompt_callback(ompt_event_parallel_end)( - ompt_parallel_id, ompt_task_id, - OMPT_INVOKER(call_context)); - } - master_th->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - return TRUE; - } - - parent_team->t.t_pkfn = microtask; -#if OMPT_SUPPORT - parent_team->t.ompt_team_info.microtask = unwrapped_task; -#endif - parent_team->t.t_invoke = invoker; - KMP_TEST_THEN_INC32( (kmp_int32*) &root->r.r_in_parallel ); - parent_team->t.t_active_level ++; - parent_team->t.t_level ++; - - /* Change number of threads in the team if requested */ - if ( master_set_numthreads ) { // The parallel has num_threads clause - if ( master_set_numthreads < master_th->th.th_teams_size.nth ) { - // AC: only can reduce the number of threads dynamically, cannot increase - kmp_info_t **other_threads = parent_team->t.t_threads; - parent_team->t.t_nproc = master_set_numthreads; - for ( i = 0; i < master_set_numthreads; ++i ) { - other_threads[i]->th.th_team_nproc = master_set_numthreads; - } - // Keep extra threads hot in the team for possible next parallels - } - master_th->th.th_set_nproc = 0; - } - -#if USE_DEBUGGER - if ( __kmp_debugging ) { // Let debugger override number of threads. - int nth = __kmp_omp_num_threads( loc ); - if ( nth > 0 ) { // 0 means debugger does not want to change number of threads. - master_set_numthreads = nth; - }; // if - }; // if -#endif - - KF_TRACE( 10, ( "__kmp_fork_call: before internal fork: root=%p, team=%p, master_th=%p, gtid=%d\n", root, parent_team, master_th, gtid ) ); - __kmp_internal_fork( loc, gtid, parent_team ); - KF_TRACE( 10, ( "__kmp_fork_call: after internal fork: root=%p, team=%p, master_th=%p, gtid=%d\n", root, parent_team, master_th, gtid ) ); - - /* Invoke microtask for MASTER thread */ - KA_TRACE( 20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", - gtid, parent_team->t.t_id, parent_team->t.t_pkfn ) ); - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - if (! parent_team->t.t_invoke( gtid )) { - KMP_ASSERT2( 0, "cannot invoke microtask for MASTER thread" ); - } - } - KA_TRACE( 20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", - gtid, parent_team->t.t_id, parent_team->t.t_pkfn ) ); - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 20, ("__kmp_fork_call: parallel exit T#%d\n", gtid )); - - return TRUE; - } // Parallel closely nested in teams construct -#endif /* OMP_40_ENABLED */ - -#if KMP_DEBUG - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KMP_DEBUG_ASSERT(master_th->th.th_task_team == parent_team->t.t_task_team[master_th->th.th_task_state]); - } -#endif - - if ( parent_team->t.t_active_level >= master_th->th.th_current_task->td_icvs.max_active_levels ) { - nthreads = 1; - } else { -#if OMP_40_ENABLED - int enter_teams = ((ap==NULL && active_level==0)||(ap && teams_level>0 && teams_level==level)); -#endif - nthreads = master_set_numthreads ? - master_set_numthreads : get__nproc_2( parent_team, master_tid ); // TODO: get nproc directly from current task - - // Check if we need to take forkjoin lock? (no need for serialized parallel out of teams construct). - // This code moved here from __kmp_reserve_threads() to speedup nested serialized parallels. - if (nthreads > 1) { - if ( ( !get__nested(master_th) && (root->r.r_in_parallel -#if OMP_40_ENABLED - && !enter_teams -#endif /* OMP_40_ENABLED */ - ) ) || ( __kmp_library == library_serial ) ) { - KC_TRACE( 10, ( "__kmp_fork_call: T#%d serializing team; requested %d threads\n", - gtid, nthreads )); - nthreads = 1; - } - } - if ( nthreads > 1 ) { - /* determine how many new threads we can use */ - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - - nthreads = __kmp_reserve_threads(root, parent_team, master_tid, nthreads -#if OMP_40_ENABLED -/* AC: If we execute teams from parallel region (on host), then teams should be created - but each can only have 1 thread if nesting is disabled. If teams called from serial region, - then teams and their threads should be created regardless of the nesting setting. */ - , enter_teams -#endif /* OMP_40_ENABLED */ - ); - if ( nthreads == 1 ) { - // Free lock for single thread execution here; - // for multi-thread execution it will be freed later - // after team of threads created and initialized - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - } - } - } - KMP_DEBUG_ASSERT( nthreads > 0 ); - - /* If we temporarily changed the set number of threads then restore it now */ - master_th->th.th_set_nproc = 0; - - /* create a serialized parallel region? */ - if ( nthreads == 1 ) { - /* josh todo: hypothetical question: what do we do for OS X*? */ -#if KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) - void * args[ argc ]; -#else - void * * args = (void**) KMP_ALLOCA( argc * sizeof( void * ) ); -#endif /* KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) */ - - KA_TRACE( 20, ("__kmp_fork_call: T#%d serializing parallel region\n", gtid )); - - __kmpc_serialized_parallel(loc, gtid); - - if ( call_context == fork_context_intel ) { - /* TODO this sucks, use the compiler itself to pass args! :) */ - master_th->th.th_serial_team->t.t_ident = loc; -#if OMP_40_ENABLED - if ( !ap ) { - // revert change made in __kmpc_serialized_parallel() - master_th->th.th_serial_team->t.t_level--; - // Get args from parent team for teams construct - -#if OMPT_SUPPORT - void *dummy; - void **exit_runtime_p; - - ompt_lw_taskteam_t lw_taskteam; - - if (ompt_enabled) { - __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, - unwrapped_task, ompt_parallel_id); - lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid); - exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame); - - __ompt_lw_taskteam_link(&lw_taskteam, master_th); - -#if OMPT_TRACE - my_task_id = lw_taskteam.ompt_task_info.task_id; - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - ompt_parallel_id, my_task_id); - } -#endif - - /* OMPT state */ - master_th->th.ompt_thread_info.state = ompt_state_work_parallel; - } else { - exit_runtime_p = &dummy; - } -#endif - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - __kmp_invoke_microtask( microtask, gtid, 0, argc, parent_team->t.t_argv -#if OMPT_SUPPORT - , exit_runtime_p -#endif - ); - } - -#if OMPT_SUPPORT - *exit_runtime_p = NULL; - if (ompt_enabled) { - lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL; - -#if OMPT_TRACE - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - ompt_parallel_id, ompt_task_id); - } -#endif - - __ompt_lw_taskteam_unlink(master_th); - // reset clear the task id only after unlinking the task - lw_taskteam.ompt_task_info.task_id = ompt_task_id_none; - - if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) { - ompt_callbacks.ompt_callback(ompt_event_parallel_end)( - ompt_parallel_id, ompt_task_id, - OMPT_INVOKER(call_context)); - } - master_th->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - } else if ( microtask == (microtask_t)__kmp_teams_master ) { - KMP_DEBUG_ASSERT( master_th->th.th_team == master_th->th.th_serial_team ); - team = master_th->th.th_team; - //team->t.t_pkfn = microtask; - team->t.t_invoke = invoker; - __kmp_alloc_argv_entries( argc, team, TRUE ); - team->t.t_argc = argc; - argv = (void**) team->t.t_argv; - if ( ap ) { - for( i=argc-1; i >= 0; --i ) -// TODO: revert workaround for Intel(R) 64 tracker #96 -# if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - *argv++ = va_arg( *ap, void * ); -# else - *argv++ = va_arg( ap, void * ); -# endif - } else { - for( i=0; i < argc; ++i ) - // Get args from parent team for teams construct - argv[i] = parent_team->t.t_argv[i]; - } - // AC: revert change made in __kmpc_serialized_parallel() - // because initial code in teams should have level=0 - team->t.t_level--; - // AC: call special invoker for outer "parallel" of the teams construct - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - invoker(gtid); - } - } else { -#endif /* OMP_40_ENABLED */ - argv = args; - for( i=argc-1; i >= 0; --i ) -// TODO: revert workaround for Intel(R) 64 tracker #96 -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - *argv++ = va_arg( *ap, void * ); -#else - *argv++ = va_arg( ap, void * ); -#endif - KMP_MB(); - -#if OMPT_SUPPORT - void *dummy; - void **exit_runtime_p; - - ompt_lw_taskteam_t lw_taskteam; - - if (ompt_enabled) { - __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, - unwrapped_task, ompt_parallel_id); - lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid); - exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame); - - __ompt_lw_taskteam_link(&lw_taskteam, master_th); - -#if OMPT_TRACE - /* OMPT implicit task begin */ - my_task_id = lw_taskteam.ompt_task_info.task_id; - my_parallel_id = ompt_parallel_id; - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - my_parallel_id, my_task_id); - } -#endif - - /* OMPT state */ - master_th->th.ompt_thread_info.state = ompt_state_work_parallel; - } else { - exit_runtime_p = &dummy; - } -#endif - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - __kmp_invoke_microtask( microtask, gtid, 0, argc, args -#if OMPT_SUPPORT - , exit_runtime_p -#endif - ); - } - -#if OMPT_SUPPORT - *exit_runtime_p = NULL; - if (ompt_enabled) { -#if OMPT_TRACE - lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL; - - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - my_parallel_id, my_task_id); - } -#endif - - __ompt_lw_taskteam_unlink(master_th); - // reset clear the task id only after unlinking the task - lw_taskteam.ompt_task_info.task_id = ompt_task_id_none; - - if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) { - ompt_callbacks.ompt_callback(ompt_event_parallel_end)( - ompt_parallel_id, ompt_task_id, - OMPT_INVOKER(call_context)); - } - master_th->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif -#if OMP_40_ENABLED - } -#endif /* OMP_40_ENABLED */ - } - else if ( call_context == fork_context_gnu ) { -#if OMPT_SUPPORT - ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *) - __kmp_allocate(sizeof(ompt_lw_taskteam_t)); - __ompt_lw_taskteam_init(lwt, master_th, gtid, - unwrapped_task, ompt_parallel_id); - - lwt->ompt_task_info.task_id = __ompt_task_id_new(gtid); - lwt->ompt_task_info.frame.exit_runtime_frame = NULL; - __ompt_lw_taskteam_link(lwt, master_th); -#endif - - // we were called from GNU native code - KA_TRACE( 20, ("__kmp_fork_call: T#%d serial exit\n", gtid )); - return FALSE; - } - else { - KMP_ASSERT2( call_context < fork_context_last, "__kmp_fork_call: unknown fork_context parameter" ); - } - - - KA_TRACE( 20, ("__kmp_fork_call: T#%d serial exit\n", gtid )); - KMP_MB(); - return FALSE; - } - - // GEH: only modify the executing flag in the case when not serialized - // serialized case is handled in kmpc_serialized_parallel - KF_TRACE( 10, ( "__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, curtask=%p, curtask_max_aclevel=%d\n", - parent_team->t.t_active_level, master_th, master_th->th.th_current_task, - master_th->th.th_current_task->td_icvs.max_active_levels ) ); - // TODO: GEH - cannot do this assertion because root thread not set up as executing - // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 ); - master_th->th.th_current_task->td_flags.executing = 0; - -#if OMP_40_ENABLED - if ( !master_th->th.th_teams_microtask || level > teams_level ) -#endif /* OMP_40_ENABLED */ - { - /* Increment our nested depth level */ - KMP_TEST_THEN_INC32( (kmp_int32*) &root->r.r_in_parallel ); - } - - // See if we need to make a copy of the ICVs. - int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc; - if ((level+1 < __kmp_nested_nth.used) && (__kmp_nested_nth.nth[level+1] != nthreads_icv)) { - nthreads_icv = __kmp_nested_nth.nth[level+1]; - } - else { - nthreads_icv = 0; // don't update - } - -#if OMP_40_ENABLED - // Figure out the proc_bind_policy for the new team. - kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind; - kmp_proc_bind_t proc_bind_icv = proc_bind_default; // proc_bind_default means don't update - if ( master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false ) { - proc_bind = proc_bind_false; - } - else { - if (proc_bind == proc_bind_default) { - // No proc_bind clause specified; use current proc-bind-var for this parallel region - proc_bind = master_th->th.th_current_task->td_icvs.proc_bind; - } - /* else: The proc_bind policy was specified explicitly on parallel clause. This - overrides proc-bind-var for this parallel region, but does not change proc-bind-var. */ - // Figure the value of proc-bind-var for the child threads. - if ((level+1 < __kmp_nested_proc_bind.used) - && (__kmp_nested_proc_bind.bind_types[level+1] != master_th->th.th_current_task->td_icvs.proc_bind)) { - proc_bind_icv = __kmp_nested_proc_bind.bind_types[level+1]; - } - } - - // Reset for next parallel region - master_th->th.th_set_proc_bind = proc_bind_default; -#endif /* OMP_40_ENABLED */ - - if ((nthreads_icv > 0) -#if OMP_40_ENABLED - || (proc_bind_icv != proc_bind_default) -#endif /* OMP_40_ENABLED */ - ) { - kmp_internal_control_t new_icvs; - copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs); - new_icvs.next = NULL; - if (nthreads_icv > 0) { - new_icvs.nproc = nthreads_icv; - } - -#if OMP_40_ENABLED - if (proc_bind_icv != proc_bind_default) { - new_icvs.proc_bind = proc_bind_icv; - } -#endif /* OMP_40_ENABLED */ - - /* allocate a new parallel team */ - KF_TRACE( 10, ( "__kmp_fork_call: before __kmp_allocate_team\n" ) ); - team = __kmp_allocate_team(root, nthreads, nthreads, -#if OMPT_SUPPORT - ompt_parallel_id, -#endif -#if OMP_40_ENABLED - proc_bind, -#endif - &new_icvs, argc USE_NESTED_HOT_ARG(master_th) ); - } else { - /* allocate a new parallel team */ - KF_TRACE( 10, ( "__kmp_fork_call: before __kmp_allocate_team\n" ) ); - team = __kmp_allocate_team(root, nthreads, nthreads, -#if OMPT_SUPPORT - ompt_parallel_id, -#endif -#if OMP_40_ENABLED - proc_bind, -#endif - &master_th->th.th_current_task->td_icvs, argc - USE_NESTED_HOT_ARG(master_th) ); - } - KF_TRACE( 10, ( "__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team ) ); - - /* setup the new team */ - KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid); - KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons); - KMP_CHECK_UPDATE(team->t.t_ident, loc); - KMP_CHECK_UPDATE(team->t.t_parent, parent_team); - KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask); -#if OMPT_SUPPORT - KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.microtask, unwrapped_task); -#endif - KMP_CHECK_UPDATE(team->t.t_invoke, invoker); /* TODO move this to root, maybe */ - // TODO: parent_team->t.t_level == INT_MAX ??? -#if OMP_40_ENABLED - if ( !master_th->th.th_teams_microtask || level > teams_level ) { -#endif /* OMP_40_ENABLED */ - int new_level = parent_team->t.t_level + 1; - KMP_CHECK_UPDATE(team->t.t_level, new_level); - new_level = parent_team->t.t_active_level + 1; - KMP_CHECK_UPDATE(team->t.t_active_level, new_level); -#if OMP_40_ENABLED - } else { - // AC: Do not increase parallel level at start of the teams construct - int new_level = parent_team->t.t_level; - KMP_CHECK_UPDATE(team->t.t_level, new_level); - new_level = parent_team->t.t_active_level; - KMP_CHECK_UPDATE(team->t.t_active_level, new_level); - } -#endif /* OMP_40_ENABLED */ - kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid); - if (team->t.t_sched.r_sched_type != new_sched.r_sched_type || team->t.t_sched.chunk != new_sched.chunk) - team->t.t_sched = new_sched; // set master's schedule as new run-time schedule - -#if OMP_40_ENABLED - KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq); -#endif - - // Update the floating point rounding in the team if required. - propagateFPControl(team); - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - // Set master's task team to team's task team. Unless this is hot team, it should be NULL. -#if 0 - // Patch out an assertion that trips while the runtime seems to operate correctly. - // Avoiding the preconditions that cause the assertion to trip has been promised as a forthcoming patch. - KMP_DEBUG_ASSERT(master_th->th.th_task_team == parent_team->t.t_task_team[master_th->th.th_task_state]); -#endif - KA_TRACE( 20, ( "__kmp_fork_call: Master T#%d pushing task_team %p / team %p, new task_team %p / team %p\n", - __kmp_gtid_from_thread( master_th ), master_th->th.th_task_team, - parent_team, team->t.t_task_team[master_th->th.th_task_state], team ) ); - - if ( active_level || master_th->th.th_task_team ) { - // Take a memo of master's task_state - KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack); - if (master_th->th.th_task_state_top >= master_th->th.th_task_state_stack_sz) { // increase size - kmp_uint32 new_size = 2*master_th->th.th_task_state_stack_sz; - kmp_uint8 *old_stack, *new_stack; - kmp_uint32 i; - new_stack = (kmp_uint8 *)__kmp_allocate(new_size); - for (i=0; ith.th_task_state_stack_sz; ++i) { - new_stack[i] = master_th->th.th_task_state_memo_stack[i]; - } - for (i=master_th->th.th_task_state_stack_sz; ith.th_task_state_memo_stack; - master_th->th.th_task_state_memo_stack = new_stack; - master_th->th.th_task_state_stack_sz = new_size; - __kmp_free(old_stack); - } - // Store master's task_state on stack - master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] = master_th->th.th_task_state; - master_th->th.th_task_state_top++; -#if KMP_NESTED_HOT_TEAMS - if (team == master_th->th.th_hot_teams[active_level].hot_team) { // Restore master's nested state if nested hot team - master_th->th.th_task_state = master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top]; - } - else { -#endif - master_th->th.th_task_state = 0; -#if KMP_NESTED_HOT_TEAMS - } -#endif - } -#if !KMP_NESTED_HOT_TEAMS - KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) || (team == root->r.r_hot_team)); -#endif - } - - KA_TRACE( 20, ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n", - gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id, team->t.t_nproc )); - KMP_DEBUG_ASSERT( team != root->r.r_hot_team || - ( team->t.t_master_tid == 0 && - ( team->t.t_parent == root->r.r_root_team || team->t.t_parent->t.t_serialized ) )); - KMP_MB(); - - /* now, setup the arguments */ - argv = (void**)team->t.t_argv; -#if OMP_40_ENABLED - if ( ap ) { -#endif /* OMP_40_ENABLED */ - for ( i=argc-1; i >= 0; --i ) { -// TODO: revert workaround for Intel(R) 64 tracker #96 -#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX - void *new_argv = va_arg(*ap, void *); -#else - void *new_argv = va_arg(ap, void *); -#endif - KMP_CHECK_UPDATE(*argv, new_argv); - argv++; - } -#if OMP_40_ENABLED - } else { - for ( i=0; i < argc; ++i ) { - // Get args from parent team for teams construct - KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]); - } - } -#endif /* OMP_40_ENABLED */ - - /* now actually fork the threads */ - KMP_CHECK_UPDATE(team->t.t_master_active, master_active); - if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong - root->r.r_active = TRUE; - - __kmp_fork_team_threads( root, team, master_th, gtid ); - __kmp_setup_icv_copy( team, nthreads, &master_th->th.th_current_task->td_icvs, loc ); - -#if OMPT_SUPPORT - master_th->th.ompt_thread_info.state = ompt_state_work_parallel; -#endif - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - -#if USE_ITT_BUILD - if ( team->t.t_active_level == 1 // only report frames at level 1 -# if OMP_40_ENABLED - && !master_th->th.th_teams_microtask // not in teams construct -# endif /* OMP_40_ENABLED */ - ) { -#if USE_ITT_NOTIFY - if ( ( __itt_frame_submit_v3_ptr || KMP_ITT_DEBUG ) && - ( __kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 1 ) ) - { - kmp_uint64 tmp_time = 0; - if ( __itt_get_timestamp_ptr ) - tmp_time = __itt_get_timestamp(); - // Internal fork - report frame begin - master_th->th.th_frame_time = tmp_time; - if ( __kmp_forkjoin_frames_mode == 3 ) - team->t.t_region_time = tmp_time; - } else // only one notification scheme (either "submit" or "forking/joined", not both) -#endif /* USE_ITT_NOTIFY */ - if ( ( __itt_frame_begin_v3_ptr || KMP_ITT_DEBUG ) && - __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode ) - { // Mark start of "parallel" region for VTune. - __kmp_itt_region_forking(gtid, team->t.t_nproc, 0); - } - } -#endif /* USE_ITT_BUILD */ - - /* now go on and do the work */ - KMP_DEBUG_ASSERT( team == __kmp_threads[gtid]->th.th_team ); - KMP_MB(); - KF_TRACE(10, ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n", - root, team, master_th, gtid)); - -#if USE_ITT_BUILD - if ( __itt_stack_caller_create_ptr ) { - team->t.t_stack_id = __kmp_itt_stack_caller_create(); // create new stack stitching id before entering fork barrier - } -#endif /* USE_ITT_BUILD */ - -#if OMP_40_ENABLED - if ( ap ) // AC: skip __kmp_internal_fork at teams construct, let only master threads execute -#endif /* OMP_40_ENABLED */ - { - __kmp_internal_fork( loc, gtid, team ); - KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, master_th=%p, gtid=%d\n", - root, team, master_th, gtid)); - } - - if (call_context == fork_context_gnu) { - KA_TRACE( 20, ("__kmp_fork_call: parallel exit T#%d\n", gtid )); - return TRUE; - } - - /* Invoke microtask for MASTER thread */ - KA_TRACE( 20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", - gtid, team->t.t_id, team->t.t_pkfn ) ); - } // END of timer KMP_fork_call block - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - if (! team->t.t_invoke( gtid )) { - KMP_ASSERT2( 0, "cannot invoke microtask for MASTER thread" ); - } - } - KA_TRACE( 20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", - gtid, team->t.t_id, team->t.t_pkfn ) ); - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 20, ("__kmp_fork_call: parallel exit T#%d\n", gtid )); - -#if OMPT_SUPPORT - if (ompt_enabled) { - master_th->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - - return TRUE; -} - -#if OMPT_SUPPORT -static inline void -__kmp_join_restore_state( - kmp_info_t *thread, - kmp_team_t *team) -{ - // restore state outside the region - thread->th.ompt_thread_info.state = ((team->t.t_serialized) ? - ompt_state_work_serial : ompt_state_work_parallel); -} - -static inline void -__kmp_join_ompt( - kmp_info_t *thread, - kmp_team_t *team, - ompt_parallel_id_t parallel_id, - fork_context_e fork_context) -{ - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) { - ompt_callbacks.ompt_callback(ompt_event_parallel_end)( - parallel_id, task_info->task_id, OMPT_INVOKER(fork_context)); - } - - task_info->frame.reenter_runtime_frame = NULL; - __kmp_join_restore_state(thread,team); -} -#endif - -void -__kmp_join_call(ident_t *loc, int gtid -#if OMPT_SUPPORT - , enum fork_context_e fork_context -#endif -#if OMP_40_ENABLED - , int exit_teams -#endif /* OMP_40_ENABLED */ -) -{ - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call); - kmp_team_t *team; - kmp_team_t *parent_team; - kmp_info_t *master_th; - kmp_root_t *root; - int master_active; - int i; - - KA_TRACE( 20, ("__kmp_join_call: enter T#%d\n", gtid )); - - /* setup current data */ - master_th = __kmp_threads[ gtid ]; - root = master_th->th.th_root; - team = master_th->th.th_team; - parent_team = team->t.t_parent; - - master_th->th.th_ident = loc; - -#if OMPT_SUPPORT - if (ompt_enabled) { - master_th->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - -#if KMP_DEBUG - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KA_TRACE( 20, ( "__kmp_join_call: T#%d, old team = %p old task_team = %p, th_task_team = %p\n", - __kmp_gtid_from_thread( master_th ), team, - team->t.t_task_team[master_th->th.th_task_state], master_th->th.th_task_team) ); - KMP_DEBUG_ASSERT( master_th->th.th_task_team == team->t.t_task_team[master_th->th.th_task_state] ); - } -#endif - - if( team->t.t_serialized ) { -#if OMP_40_ENABLED - if ( master_th->th.th_teams_microtask ) { - // We are in teams construct - int level = team->t.t_level; - int tlevel = master_th->th.th_teams_level; - if ( level == tlevel ) { - // AC: we haven't incremented it earlier at start of teams construct, - // so do it here - at the end of teams construct - team->t.t_level++; - } else if ( level == tlevel + 1 ) { - // AC: we are exiting parallel inside teams, need to increment serialization - // in order to restore it in the next call to __kmpc_end_serialized_parallel - team->t.t_serialized++; - } - } -#endif /* OMP_40_ENABLED */ - __kmpc_end_serialized_parallel( loc, gtid ); - -#if OMPT_SUPPORT - if (ompt_enabled) { - __kmp_join_restore_state(master_th, parent_team); - } -#endif - - return; - } - - master_active = team->t.t_master_active; - -#if OMP_40_ENABLED - if (!exit_teams) -#endif /* OMP_40_ENABLED */ - { - // AC: No barrier for internal teams at exit from teams construct. - // But there is barrier for external team (league). - __kmp_internal_join( loc, gtid, team ); - } -#if OMP_40_ENABLED - else { - master_th->th.th_task_state = 0; // AC: no tasking in teams (out of any parallel) - } -#endif /* OMP_40_ENABLED */ - - KMP_MB(); - -#if OMPT_SUPPORT - ompt_parallel_id_t parallel_id = team->t.ompt_team_info.parallel_id; -#endif - -#if USE_ITT_BUILD - if ( __itt_stack_caller_create_ptr ) { - __kmp_itt_stack_caller_destroy( (__itt_caller)team->t.t_stack_id ); // destroy the stack stitching id after join barrier - } - - // Mark end of "parallel" region for VTune. - if ( team->t.t_active_level == 1 -# if OMP_40_ENABLED - && !master_th->th.th_teams_microtask /* not in teams construct */ -# endif /* OMP_40_ENABLED */ - ) { - master_th->th.th_ident = loc; - // only one notification scheme (either "submit" or "forking/joined", not both) - if ( ( __itt_frame_submit_v3_ptr || KMP_ITT_DEBUG ) && __kmp_forkjoin_frames_mode == 3 ) - __kmp_itt_frame_submit( gtid, team->t.t_region_time, master_th->th.th_frame_time, - 0, loc, master_th->th.th_team_nproc, 1 ); - else if ( ( __itt_frame_end_v3_ptr || KMP_ITT_DEBUG ) && - ! __kmp_forkjoin_frames_mode && __kmp_forkjoin_frames ) - __kmp_itt_region_joined( gtid ); - } // active_level == 1 -#endif /* USE_ITT_BUILD */ - -#if OMP_40_ENABLED - if ( master_th->th.th_teams_microtask && - !exit_teams && - team->t.t_pkfn != (microtask_t)__kmp_teams_master && - team->t.t_level == master_th->th.th_teams_level + 1 ) { - // AC: We need to leave the team structure intact at the end - // of parallel inside the teams construct, so that at the next - // parallel same (hot) team works, only adjust nesting levels - - /* Decrement our nested depth level */ - team->t.t_level --; - team->t.t_active_level --; - KMP_TEST_THEN_DEC32( (kmp_int32*) &root->r.r_in_parallel ); - - /* Restore number of threads in the team if needed */ - if ( master_th->th.th_team_nproc < master_th->th.th_teams_size.nth ) { - int old_num = master_th->th.th_team_nproc; - int new_num = master_th->th.th_teams_size.nth; - kmp_info_t **other_threads = team->t.t_threads; - team->t.t_nproc = new_num; - for ( i = 0; i < old_num; ++i ) { - other_threads[i]->th.th_team_nproc = new_num; - } - // Adjust states of non-used threads of the team - for ( i = old_num; i < new_num; ++i ) { - // Re-initialize thread's barrier data. - int b; - kmp_balign_t * balign = other_threads[i]->th.th_bar; - for ( b = 0; b < bs_last_barrier; ++ b ) { - balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived; - KMP_DEBUG_ASSERT(balign[ b ].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#if USE_DEBUGGER - balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived; -#endif - } - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - // Synchronize thread's task state - other_threads[i]->th.th_task_state = master_th->th.th_task_state; - } - } - } - -#if OMPT_SUPPORT - if (ompt_enabled) { - __kmp_join_ompt(master_th, parent_team, parallel_id, fork_context); - } -#endif - - return; - } -#endif /* OMP_40_ENABLED */ - - /* do cleanup and restore the parent team */ - master_th->th.th_info .ds.ds_tid = team->t.t_master_tid; - master_th->th.th_local.this_construct = team->t.t_master_this_cons; - - master_th->th.th_dispatch = - & parent_team->t.t_dispatch[ team->t.t_master_tid ]; - - /* jc: The following lock has instructions with REL and ACQ semantics, - separating the parallel user code called in this parallel region - from the serial user code called after this function returns. - */ - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - -#if OMP_40_ENABLED - if ( !master_th->th.th_teams_microtask || team->t.t_level > master_th->th.th_teams_level ) -#endif /* OMP_40_ENABLED */ - { - /* Decrement our nested depth level */ - KMP_TEST_THEN_DEC32( (kmp_int32*) &root->r.r_in_parallel ); - } - KMP_DEBUG_ASSERT( root->r.r_in_parallel >= 0 ); - -#if OMPT_SUPPORT && OMPT_TRACE - if(ompt_enabled){ - ompt_task_info_t *task_info = __ompt_get_taskinfo(0); - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - parallel_id, task_info->task_id); - } - task_info->frame.exit_runtime_frame = NULL; - task_info->task_id = 0; - } -#endif - - KF_TRACE( 10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", - 0, master_th, team ) ); - __kmp_pop_current_task_from_thread( master_th ); - -#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED - // - // Restore master thread's partition. - // - master_th->th.th_first_place = team->t.t_first_place; - master_th->th.th_last_place = team->t.t_last_place; -#endif /* OMP_40_ENABLED */ - - updateHWFPControl (team); - - if ( root->r.r_active != master_active ) - root->r.r_active = master_active; - - __kmp_free_team( root, team USE_NESTED_HOT_ARG(master_th) ); // this will free worker threads - - /* this race was fun to find. make sure the following is in the critical - * region otherwise assertions may fail occasionally since the old team - * may be reallocated and the hierarchy appears inconsistent. it is - * actually safe to run and won't cause any bugs, but will cause those - * assertion failures. it's only one deref&assign so might as well put this - * in the critical region */ - master_th->th.th_team = parent_team; - master_th->th.th_team_nproc = parent_team->t.t_nproc; - master_th->th.th_team_master = parent_team->t.t_threads[0]; - master_th->th.th_team_serialized = parent_team->t.t_serialized; - - /* restore serialized team, if need be */ - if( parent_team->t.t_serialized && - parent_team != master_th->th.th_serial_team && - parent_team != root->r.r_root_team ) { - __kmp_free_team( root, master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL) ); - master_th->th.th_serial_team = parent_team; - } - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - if (master_th->th.th_task_state_top > 0) { // Restore task state from memo stack - KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack); - // Remember master's state if we re-use this nested hot team - master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] = master_th->th.th_task_state; - --master_th->th.th_task_state_top; // pop - // Now restore state at this level - master_th->th.th_task_state = master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top]; - } - // Copy the task team from the parent team to the master thread - master_th->th.th_task_team = parent_team->t.t_task_team[master_th->th.th_task_state]; - KA_TRACE( 20, ( "__kmp_join_call: Master T#%d restoring task_team %p / team %p\n", - __kmp_gtid_from_thread( master_th ), master_th->th.th_task_team, parent_team ) ); - } - - // TODO: GEH - cannot do this assertion because root thread not set up as executing - // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 0 ); - master_th->th.th_current_task->td_flags.executing = 1; - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - -#if OMPT_SUPPORT - if (ompt_enabled) { - __kmp_join_ompt(master_th, parent_team, parallel_id, fork_context); - } -#endif - - KMP_MB(); - KA_TRACE( 20, ("__kmp_join_call: exit T#%d\n", gtid )); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* Check whether we should push an internal control record onto the - serial team stack. If so, do it. */ -void -__kmp_save_internal_controls ( kmp_info_t * thread ) -{ - - if ( thread->th.th_team != thread->th.th_serial_team ) { - return; - } - if (thread->th.th_team->t.t_serialized > 1) { - int push = 0; - - if (thread->th.th_team->t.t_control_stack_top == NULL) { - push = 1; - } else { - if ( thread->th.th_team->t.t_control_stack_top->serial_nesting_level != - thread->th.th_team->t.t_serialized ) { - push = 1; - } - } - if (push) { /* push a record on the serial team's stack */ - kmp_internal_control_t * control = (kmp_internal_control_t *) __kmp_allocate(sizeof(kmp_internal_control_t)); - - copy_icvs( control, & thread->th.th_current_task->td_icvs ); - - control->serial_nesting_level = thread->th.th_team->t.t_serialized; - - control->next = thread->th.th_team->t.t_control_stack_top; - thread->th.th_team->t.t_control_stack_top = control; - } - } -} - -/* Changes set_nproc */ -void -__kmp_set_num_threads( int new_nth, int gtid ) -{ - kmp_info_t *thread; - kmp_root_t *root; - - KF_TRACE( 10, ("__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth )); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - if (new_nth < 1) - new_nth = 1; - else if (new_nth > __kmp_max_nth) - new_nth = __kmp_max_nth; - - KMP_COUNT_VALUE(OMP_set_numthreads, new_nth); - thread = __kmp_threads[gtid]; - - __kmp_save_internal_controls( thread ); - - set__nproc( thread, new_nth ); - - // - // If this omp_set_num_threads() call will cause the hot team size to be - // reduced (in the absence of a num_threads clause), then reduce it now, - // rather than waiting for the next parallel region. - // - root = thread->th.th_root; - if ( __kmp_init_parallel && ( ! root->r.r_active ) - && ( root->r.r_hot_team->t.t_nproc > new_nth ) -#if KMP_NESTED_HOT_TEAMS - && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode -#endif - ) { - kmp_team_t *hot_team = root->r.r_hot_team; - int f; - - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - - // Release the extra threads we don't need any more. - for ( f = new_nth; f < hot_team->t.t_nproc; f++ ) { - KMP_DEBUG_ASSERT( hot_team->t.t_threads[f] != NULL ); - if ( __kmp_tasking_mode != tskm_immediate_exec) { - // When decreasing team size, threads no longer in the team should unref task team. - hot_team->t.t_threads[f]->th.th_task_team = NULL; - } - __kmp_free_thread( hot_team->t.t_threads[f] ); - hot_team->t.t_threads[f] = NULL; - } - hot_team->t.t_nproc = new_nth; -#if KMP_NESTED_HOT_TEAMS - if( thread->th.th_hot_teams ) { - KMP_DEBUG_ASSERT( hot_team == thread->th.th_hot_teams[0].hot_team ); - thread->th.th_hot_teams[0].hot_team_nth = new_nth; - } -#endif - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - - // - // Update the t_nproc field in the threads that are still active. - // - for( f=0 ; f < new_nth; f++ ) { - KMP_DEBUG_ASSERT( hot_team->t.t_threads[f] != NULL ); - hot_team->t.t_threads[f]->th.th_team_nproc = new_nth; - } - // Special flag in case omp_set_num_threads() call - hot_team->t.t_size_changed = -1; - } -} - -/* Changes max_active_levels */ -void -__kmp_set_max_active_levels( int gtid, int max_active_levels ) -{ - kmp_info_t *thread; - - KF_TRACE( 10, ( "__kmp_set_max_active_levels: new max_active_levels for thread %d = (%d)\n", gtid, max_active_levels ) ); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - // validate max_active_levels - if( max_active_levels < 0 ) { - KMP_WARNING( ActiveLevelsNegative, max_active_levels ); - // We ignore this call if the user has specified a negative value. - // The current setting won't be changed. The last valid setting will be used. - // A warning will be issued (if warnings are allowed as controlled by the KMP_WARNINGS env var). - KF_TRACE( 10, ( "__kmp_set_max_active_levels: the call is ignored: new max_active_levels for thread %d = (%d)\n", gtid, max_active_levels ) ); - return; - } - if( max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT ) { - // it's OK, the max_active_levels is within the valid range: [ 0; KMP_MAX_ACTIVE_LEVELS_LIMIT ] - // We allow a zero value. (implementation defined behavior) - } else { - KMP_WARNING( ActiveLevelsExceedLimit, max_active_levels, KMP_MAX_ACTIVE_LEVELS_LIMIT ); - max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT; - // Current upper limit is MAX_INT. (implementation defined behavior) - // If the input exceeds the upper limit, we correct the input to be the upper limit. (implementation defined behavior) - // Actually, the flow should never get here until we use MAX_INT limit. - } - KF_TRACE( 10, ( "__kmp_set_max_active_levels: after validation: new max_active_levels for thread %d = (%d)\n", gtid, max_active_levels ) ); - - thread = __kmp_threads[ gtid ]; - - __kmp_save_internal_controls( thread ); - - set__max_active_levels( thread, max_active_levels ); - -} - -/* Gets max_active_levels */ -int -__kmp_get_max_active_levels( int gtid ) -{ - kmp_info_t *thread; - - KF_TRACE( 10, ( "__kmp_get_max_active_levels: thread %d\n", gtid ) ); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - thread = __kmp_threads[ gtid ]; - KMP_DEBUG_ASSERT( thread->th.th_current_task ); - KF_TRACE( 10, ( "__kmp_get_max_active_levels: thread %d, curtask=%p, curtask_maxaclevel=%d\n", - gtid, thread->th.th_current_task, thread->th.th_current_task->td_icvs.max_active_levels ) ); - return thread->th.th_current_task->td_icvs.max_active_levels; -} - -/* Changes def_sched_var ICV values (run-time schedule kind and chunk) */ -void -__kmp_set_schedule( int gtid, kmp_sched_t kind, int chunk ) -{ - kmp_info_t *thread; -// kmp_team_t *team; - - KF_TRACE( 10, ("__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n", gtid, (int)kind, chunk )); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - // Check if the kind parameter is valid, correct if needed. - // Valid parameters should fit in one of two intervals - standard or extended: - // , , , , , - // 2008-01-25: 0, 1 - 4, 5, 100, 101 - 102, 103 - if ( kind <= kmp_sched_lower || kind >= kmp_sched_upper || - ( kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std ) ) - { - // TODO: Hint needs attention in case we change the default schedule. - __kmp_msg( - kmp_ms_warning, - KMP_MSG( ScheduleKindOutOfRange, kind ), - KMP_HNT( DefaultScheduleKindUsed, "static, no chunk" ), - __kmp_msg_null - ); - kind = kmp_sched_default; - chunk = 0; // ignore chunk value in case of bad kind - } - - thread = __kmp_threads[ gtid ]; - - __kmp_save_internal_controls( thread ); - - if ( kind < kmp_sched_upper_std ) { - if ( kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK ) { - // differ static chunked vs. unchunked: - // chunk should be invalid to indicate unchunked schedule (which is the default) - thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static; - } else { - thread->th.th_current_task->td_icvs.sched.r_sched_type = __kmp_sch_map[ kind - kmp_sched_lower - 1 ]; - } - } else { - // __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std - kmp_sched_lower - 2 ]; - thread->th.th_current_task->td_icvs.sched.r_sched_type = - __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std - kmp_sched_lower - 2 ]; - } - if ( kind == kmp_sched_auto ) { - // ignore parameter chunk for schedule auto - thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK; - } else { - thread->th.th_current_task->td_icvs.sched.chunk = chunk; - } -} - -/* Gets def_sched_var ICV values */ -void -__kmp_get_schedule( int gtid, kmp_sched_t * kind, int * chunk ) -{ - kmp_info_t *thread; - enum sched_type th_type; - - KF_TRACE( 10, ("__kmp_get_schedule: thread %d\n", gtid )); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - thread = __kmp_threads[ gtid ]; - - th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type; - - switch ( th_type ) { - case kmp_sch_static: - case kmp_sch_static_greedy: - case kmp_sch_static_balanced: - *kind = kmp_sched_static; - *chunk = 0; // chunk was not set, try to show this fact via zero value - return; - case kmp_sch_static_chunked: - *kind = kmp_sched_static; - break; - case kmp_sch_dynamic_chunked: - *kind = kmp_sched_dynamic; - break; - case kmp_sch_guided_chunked: - case kmp_sch_guided_iterative_chunked: - case kmp_sch_guided_analytical_chunked: - *kind = kmp_sched_guided; - break; - case kmp_sch_auto: - *kind = kmp_sched_auto; - break; - case kmp_sch_trapezoidal: - *kind = kmp_sched_trapezoidal; - break; -#if KMP_STATIC_STEAL_ENABLED - case kmp_sch_static_steal: - *kind = kmp_sched_static_steal; - break; -#endif - default: - KMP_FATAL( UnknownSchedulingType, th_type ); - } - - *chunk = thread->th.th_current_task->td_icvs.sched.chunk; -} - -int -__kmp_get_ancestor_thread_num( int gtid, int level ) { - - int ii, dd; - kmp_team_t *team; - kmp_info_t *thr; - - KF_TRACE( 10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level )); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - // validate level - if( level == 0 ) return 0; - if( level < 0 ) return -1; - thr = __kmp_threads[ gtid ]; - team = thr->th.th_team; - ii = team->t.t_level; - if( level > ii ) return -1; - -#if OMP_40_ENABLED - if( thr->th.th_teams_microtask ) { - // AC: we are in teams region where multiple nested teams have same level - int tlevel = thr->th.th_teams_level; // the level of the teams construct - if( level <= tlevel ) { // otherwise usual algorithm works (will not touch the teams) - KMP_DEBUG_ASSERT( ii >= tlevel ); - // AC: As we need to pass by the teams league, we need to artificially increase ii - if ( ii == tlevel ) { - ii += 2; // three teams have same level - } else { - ii ++; // two teams have same level - } - } - } -#endif - - if( ii == level ) return __kmp_tid_from_gtid( gtid ); - - dd = team->t.t_serialized; - level++; - while( ii > level ) - { - for( dd = team->t.t_serialized; ( dd > 0 ) && ( ii > level ); dd--, ii-- ) - { - } - if( ( team->t.t_serialized ) && ( !dd ) ) { - team = team->t.t_parent; - continue; - } - if( ii > level ) { - team = team->t.t_parent; - dd = team->t.t_serialized; - ii--; - } - } - - return ( dd > 1 ) ? ( 0 ) : ( team->t.t_master_tid ); -} - -int -__kmp_get_team_size( int gtid, int level ) { - - int ii, dd; - kmp_team_t *team; - kmp_info_t *thr; - - KF_TRACE( 10, ("__kmp_get_team_size: thread %d %d\n", gtid, level )); - KMP_DEBUG_ASSERT( __kmp_init_serial ); - - // validate level - if( level == 0 ) return 1; - if( level < 0 ) return -1; - thr = __kmp_threads[ gtid ]; - team = thr->th.th_team; - ii = team->t.t_level; - if( level > ii ) return -1; - -#if OMP_40_ENABLED - if( thr->th.th_teams_microtask ) { - // AC: we are in teams region where multiple nested teams have same level - int tlevel = thr->th.th_teams_level; // the level of the teams construct - if( level <= tlevel ) { // otherwise usual algorithm works (will not touch the teams) - KMP_DEBUG_ASSERT( ii >= tlevel ); - // AC: As we need to pass by the teams league, we need to artificially increase ii - if ( ii == tlevel ) { - ii += 2; // three teams have same level - } else { - ii ++; // two teams have same level - } - } - } -#endif - - while( ii > level ) - { - for( dd = team->t.t_serialized; ( dd > 0 ) && ( ii > level ); dd--, ii-- ) - { - } - if( team->t.t_serialized && ( !dd ) ) { - team = team->t.t_parent; - continue; - } - if( ii > level ) { - team = team->t.t_parent; - ii--; - } - } - - return team->t.t_nproc; -} - -kmp_r_sched_t -__kmp_get_schedule_global() { -// This routine created because pairs (__kmp_sched, __kmp_chunk) and (__kmp_static, __kmp_guided) -// may be changed by kmp_set_defaults independently. So one can get the updated schedule here. - - kmp_r_sched_t r_sched; - - // create schedule from 4 globals: __kmp_sched, __kmp_chunk, __kmp_static, __kmp_guided - // __kmp_sched should keep original value, so that user can set KMP_SCHEDULE multiple times, - // and thus have different run-time schedules in different roots (even in OMP 2.5) - if ( __kmp_sched == kmp_sch_static ) { - r_sched.r_sched_type = __kmp_static; // replace STATIC with more detailed schedule (balanced or greedy) - } else if ( __kmp_sched == kmp_sch_guided_chunked ) { - r_sched.r_sched_type = __kmp_guided; // replace GUIDED with more detailed schedule (iterative or analytical) - } else { - r_sched.r_sched_type = __kmp_sched; // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other - } - - if ( __kmp_chunk < KMP_DEFAULT_CHUNK ) { // __kmp_chunk may be wrong here (if it was not ever set) - r_sched.chunk = KMP_DEFAULT_CHUNK; - } else { - r_sched.chunk = __kmp_chunk; - } - - return r_sched; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - - -/* - * Allocate (realloc == FALSE) * or reallocate (realloc == TRUE) - * at least argc number of *t_argv entries for the requested team. - */ -static void -__kmp_alloc_argv_entries( int argc, kmp_team_t *team, int realloc ) -{ - - KMP_DEBUG_ASSERT( team ); - if( !realloc || argc > team->t.t_max_argc ) { - - KA_TRACE( 100, ( "__kmp_alloc_argv_entries: team %d: needed entries=%d, current entries=%d\n", - team->t.t_id, argc, ( realloc ) ? team->t.t_max_argc : 0 )); - /* if previously allocated heap space for args, free them */ - if ( realloc && team->t.t_argv != &team->t.t_inline_argv[0] ) - __kmp_free( (void *) team->t.t_argv ); - - if ( argc <= KMP_INLINE_ARGV_ENTRIES ) { - /* use unused space in the cache line for arguments */ - team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES; - KA_TRACE( 100, ( "__kmp_alloc_argv_entries: team %d: inline allocate %d argv entries\n", - team->t.t_id, team->t.t_max_argc )); - team->t.t_argv = &team->t.t_inline_argv[0]; - if ( __kmp_storage_map ) { - __kmp_print_storage_map_gtid( -1, &team->t.t_inline_argv[0], - &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES], - (sizeof(void *) * KMP_INLINE_ARGV_ENTRIES), - "team_%d.t_inline_argv", - team->t.t_id ); - } - } else { - /* allocate space for arguments in the heap */ - team->t.t_max_argc = ( argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1 )) ? - KMP_MIN_MALLOC_ARGV_ENTRIES : 2 * argc; - KA_TRACE( 100, ( "__kmp_alloc_argv_entries: team %d: dynamic allocate %d argv entries\n", - team->t.t_id, team->t.t_max_argc )); - team->t.t_argv = (void**) __kmp_page_allocate( sizeof(void*) * team->t.t_max_argc ); - if ( __kmp_storage_map ) { - __kmp_print_storage_map_gtid( -1, &team->t.t_argv[0], &team->t.t_argv[team->t.t_max_argc], - sizeof(void *) * team->t.t_max_argc, "team_%d.t_argv", - team->t.t_id ); - } - } - } -} - -static void -__kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) -{ - int i; - int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2; - team->t.t_threads = (kmp_info_t**) __kmp_allocate( sizeof(kmp_info_t*) * max_nth ); - team->t.t_disp_buffer = (dispatch_shared_info_t*) - __kmp_allocate( sizeof(dispatch_shared_info_t) * num_disp_buff ); - team->t.t_dispatch = (kmp_disp_t*) __kmp_allocate( sizeof(kmp_disp_t) * max_nth ); - team->t.t_implicit_task_taskdata = (kmp_taskdata_t*) __kmp_allocate( sizeof(kmp_taskdata_t) * max_nth ); - team->t.t_max_nproc = max_nth; - - /* setup dispatch buffers */ - for(i = 0 ; i < num_disp_buff; ++i) { - team->t.t_disp_buffer[i].buffer_index = i; -#if OMP_45_ENABLED - team->t.t_disp_buffer[i].doacross_buf_idx = i; -#endif - } -} - -static void -__kmp_free_team_arrays(kmp_team_t *team) { - /* Note: this does not free the threads in t_threads (__kmp_free_threads) */ - int i; - for ( i = 0; i < team->t.t_max_nproc; ++ i ) { - if ( team->t.t_dispatch[ i ].th_disp_buffer != NULL ) { - __kmp_free( team->t.t_dispatch[ i ].th_disp_buffer ); - team->t.t_dispatch[ i ].th_disp_buffer = NULL; - }; // if - }; // for - __kmp_free(team->t.t_threads); - __kmp_free(team->t.t_disp_buffer); - __kmp_free(team->t.t_dispatch); - __kmp_free(team->t.t_implicit_task_taskdata); - team->t.t_threads = NULL; - team->t.t_disp_buffer = NULL; - team->t.t_dispatch = NULL; - team->t.t_implicit_task_taskdata = 0; -} - -static void -__kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) { - kmp_info_t **oldThreads = team->t.t_threads; - - __kmp_free(team->t.t_disp_buffer); - __kmp_free(team->t.t_dispatch); - __kmp_free(team->t.t_implicit_task_taskdata); - __kmp_allocate_team_arrays(team, max_nth); - - KMP_MEMCPY(team->t.t_threads, oldThreads, team->t.t_nproc * sizeof (kmp_info_t*)); - - __kmp_free(oldThreads); -} - -static kmp_internal_control_t -__kmp_get_global_icvs( void ) { - - kmp_r_sched_t r_sched = __kmp_get_schedule_global(); // get current state of scheduling globals - -#if OMP_40_ENABLED - KMP_DEBUG_ASSERT( __kmp_nested_proc_bind.used > 0 ); -#endif /* OMP_40_ENABLED */ - - kmp_internal_control_t g_icvs = { - 0, //int serial_nesting_level; //corresponds to the value of the th_team_serialized field - (kmp_int8)__kmp_dflt_nested, //int nested; //internal control for nested parallelism (per thread) - (kmp_int8)__kmp_global.g.g_dynamic, //internal control for dynamic adjustment of threads (per thread) - (kmp_int8)__kmp_env_blocktime, //int bt_set; //internal control for whether blocktime is explicitly set - __kmp_dflt_blocktime, //int blocktime; //internal control for blocktime -#if KMP_USE_MONITOR - __kmp_bt_intervals, //int bt_intervals; //internal control for blocktime intervals -#endif - __kmp_dflt_team_nth, //int nproc; //internal control for # of threads for next parallel region (per thread) - // (use a max ub on value if __kmp_parallel_initialize not called yet) - __kmp_dflt_max_active_levels, //int max_active_levels; //internal control for max_active_levels - r_sched, //kmp_r_sched_t sched; //internal control for runtime schedule {sched,chunk} pair -#if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0], - __kmp_default_device, -#endif /* OMP_40_ENABLED */ - NULL //struct kmp_internal_control *next; - }; - - return g_icvs; -} - -static kmp_internal_control_t -__kmp_get_x_global_icvs( const kmp_team_t *team ) { - - kmp_internal_control_t gx_icvs; - gx_icvs.serial_nesting_level = 0; // probably =team->t.t_serial like in save_inter_controls - copy_icvs( & gx_icvs, & team->t.t_threads[0]->th.th_current_task->td_icvs ); - gx_icvs.next = NULL; - - return gx_icvs; -} - -static void -__kmp_initialize_root( kmp_root_t *root ) -{ - int f; - kmp_team_t *root_team; - kmp_team_t *hot_team; - int hot_team_max_nth; - kmp_r_sched_t r_sched = __kmp_get_schedule_global(); // get current state of scheduling globals - kmp_internal_control_t r_icvs = __kmp_get_global_icvs(); - KMP_DEBUG_ASSERT( root ); - KMP_ASSERT( ! root->r.r_begin ); - - /* setup the root state structure */ - __kmp_init_lock( &root->r.r_begin_lock ); - root->r.r_begin = FALSE; - root->r.r_active = FALSE; - root->r.r_in_parallel = 0; - root->r.r_blocktime = __kmp_dflt_blocktime; - root->r.r_nested = __kmp_dflt_nested; - - /* setup the root team for this task */ - /* allocate the root team structure */ - KF_TRACE( 10, ( "__kmp_initialize_root: before root_team\n" ) ); - - root_team = - __kmp_allocate_team( - root, - 1, // new_nproc - 1, // max_nproc -#if OMPT_SUPPORT - 0, // root parallel id -#endif -#if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0], -#endif - &r_icvs, - 0 // argc - USE_NESTED_HOT_ARG(NULL) // master thread is unknown - ); -#if USE_DEBUGGER - // Non-NULL value should be assigned to make the debugger display the root team. - TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)( ~ 0 )); -#endif - - KF_TRACE( 10, ( "__kmp_initialize_root: after root_team = %p\n", root_team ) ); - - root->r.r_root_team = root_team; - root_team->t.t_control_stack_top = NULL; - - /* initialize root team */ - root_team->t.t_threads[0] = NULL; - root_team->t.t_nproc = 1; - root_team->t.t_serialized = 1; - // TODO???: root_team->t.t_max_active_levels = __kmp_dflt_max_active_levels; - root_team->t.t_sched.r_sched_type = r_sched.r_sched_type; - root_team->t.t_sched.chunk = r_sched.chunk; - KA_TRACE( 20, ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n", - root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE )); - - /* setup the hot team for this task */ - /* allocate the hot team structure */ - KF_TRACE( 10, ( "__kmp_initialize_root: before hot_team\n" ) ); - - hot_team = - __kmp_allocate_team( - root, - 1, // new_nproc - __kmp_dflt_team_nth_ub * 2, // max_nproc -#if OMPT_SUPPORT - 0, // root parallel id -#endif -#if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0], -#endif - &r_icvs, - 0 // argc - USE_NESTED_HOT_ARG(NULL) // master thread is unknown - ); - KF_TRACE( 10, ( "__kmp_initialize_root: after hot_team = %p\n", hot_team ) ); - - root->r.r_hot_team = hot_team; - root_team->t.t_control_stack_top = NULL; - - /* first-time initialization */ - hot_team->t.t_parent = root_team; - - /* initialize hot team */ - hot_team_max_nth = hot_team->t.t_max_nproc; - for ( f = 0; f < hot_team_max_nth; ++ f ) { - hot_team->t.t_threads[ f ] = NULL; - }; // for - hot_team->t.t_nproc = 1; - // TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels; - hot_team->t.t_sched.r_sched_type = r_sched.r_sched_type; - hot_team->t.t_sched.chunk = r_sched.chunk; - hot_team->t.t_size_changed = 0; -} - -#ifdef KMP_DEBUG - - -typedef struct kmp_team_list_item { - kmp_team_p const * entry; - struct kmp_team_list_item * next; -} kmp_team_list_item_t; -typedef kmp_team_list_item_t * kmp_team_list_t; - - -static void -__kmp_print_structure_team_accum( // Add team to list of teams. - kmp_team_list_t list, // List of teams. - kmp_team_p const * team // Team to add. -) { - - // List must terminate with item where both entry and next are NULL. - // Team is added to the list only once. - // List is sorted in ascending order by team id. - // Team id is *not* a key. - - kmp_team_list_t l; - - KMP_DEBUG_ASSERT( list != NULL ); - if ( team == NULL ) { - return; - }; // if - - __kmp_print_structure_team_accum( list, team->t.t_parent ); - __kmp_print_structure_team_accum( list, team->t.t_next_pool ); - - // Search list for the team. - l = list; - while ( l->next != NULL && l->entry != team ) { - l = l->next; - }; // while - if ( l->next != NULL ) { - return; // Team has been added before, exit. - }; // if - - // Team is not found. Search list again for insertion point. - l = list; - while ( l->next != NULL && l->entry->t.t_id <= team->t.t_id ) { - l = l->next; - }; // while - - // Insert team. - { - kmp_team_list_item_t * item = - (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC( sizeof( kmp_team_list_item_t ) ); - * item = * l; - l->entry = team; - l->next = item; - } - -} - -static void -__kmp_print_structure_team( - char const * title, - kmp_team_p const * team - -) { - __kmp_printf( "%s", title ); - if ( team != NULL ) { - __kmp_printf( "%2x %p\n", team->t.t_id, team ); - } else { - __kmp_printf( " - (nil)\n" ); - }; // if -} - -static void -__kmp_print_structure_thread( - char const * title, - kmp_info_p const * thread - -) { - __kmp_printf( "%s", title ); - if ( thread != NULL ) { - __kmp_printf( "%2d %p\n", thread->th.th_info.ds.ds_gtid, thread ); - } else { - __kmp_printf( " - (nil)\n" ); - }; // if -} - -void -__kmp_print_structure( - void -) { - - kmp_team_list_t list; - - // Initialize list of teams. - list = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC( sizeof( kmp_team_list_item_t ) ); - list->entry = NULL; - list->next = NULL; - - __kmp_printf( "\n------------------------------\nGlobal Thread Table\n------------------------------\n" ); - { - int gtid; - for ( gtid = 0; gtid < __kmp_threads_capacity; ++ gtid ) { - __kmp_printf( "%2d", gtid ); - if ( __kmp_threads != NULL ) { - __kmp_printf( " %p", __kmp_threads[ gtid ] ); - }; // if - if ( __kmp_root != NULL ) { - __kmp_printf( " %p", __kmp_root[ gtid ] ); - }; // if - __kmp_printf( "\n" ); - }; // for gtid - } - - // Print out __kmp_threads array. - __kmp_printf( "\n------------------------------\nThreads\n------------------------------\n" ); - if ( __kmp_threads != NULL ) { - int gtid; - for ( gtid = 0; gtid < __kmp_threads_capacity; ++ gtid ) { - kmp_info_t const * thread = __kmp_threads[ gtid ]; - if ( thread != NULL ) { - __kmp_printf( "GTID %2d %p:\n", gtid, thread ); - __kmp_printf( " Our Root: %p\n", thread->th.th_root ); - __kmp_print_structure_team( " Our Team: ", thread->th.th_team ); - __kmp_print_structure_team( " Serial Team: ", thread->th.th_serial_team ); - __kmp_printf( " Threads: %2d\n", thread->th.th_team_nproc ); - __kmp_print_structure_thread( " Master: ", thread->th.th_team_master ); - __kmp_printf( " Serialized?: %2d\n", thread->th.th_team_serialized ); - __kmp_printf( " Set NProc: %2d\n", thread->th.th_set_nproc ); -#if OMP_40_ENABLED - __kmp_printf( " Set Proc Bind: %2d\n", thread->th.th_set_proc_bind ); -#endif - __kmp_print_structure_thread( " Next in pool: ", thread->th.th_next_pool ); - __kmp_printf( "\n" ); - __kmp_print_structure_team_accum( list, thread->th.th_team ); - __kmp_print_structure_team_accum( list, thread->th.th_serial_team ); - }; // if - }; // for gtid - } else { - __kmp_printf( "Threads array is not allocated.\n" ); - }; // if - - // Print out __kmp_root array. - __kmp_printf( "\n------------------------------\nUbers\n------------------------------\n" ); - if ( __kmp_root != NULL ) { - int gtid; - for ( gtid = 0; gtid < __kmp_threads_capacity; ++ gtid ) { - kmp_root_t const * root = __kmp_root[ gtid ]; - if ( root != NULL ) { - __kmp_printf( "GTID %2d %p:\n", gtid, root ); - __kmp_print_structure_team( " Root Team: ", root->r.r_root_team ); - __kmp_print_structure_team( " Hot Team: ", root->r.r_hot_team ); - __kmp_print_structure_thread( " Uber Thread: ", root->r.r_uber_thread ); - __kmp_printf( " Active?: %2d\n", root->r.r_active ); - __kmp_printf( " Nested?: %2d\n", root->r.r_nested ); - __kmp_printf( " In Parallel: %2d\n", root->r.r_in_parallel ); - __kmp_printf( "\n" ); - __kmp_print_structure_team_accum( list, root->r.r_root_team ); - __kmp_print_structure_team_accum( list, root->r.r_hot_team ); - }; // if - }; // for gtid - } else { - __kmp_printf( "Ubers array is not allocated.\n" ); - }; // if - - __kmp_printf( "\n------------------------------\nTeams\n------------------------------\n" ); - while ( list->next != NULL ) { - kmp_team_p const * team = list->entry; - int i; - __kmp_printf( "Team %2x %p:\n", team->t.t_id, team ); - __kmp_print_structure_team( " Parent Team: ", team->t.t_parent ); - __kmp_printf( " Master TID: %2d\n", team->t.t_master_tid ); - __kmp_printf( " Max threads: %2d\n", team->t.t_max_nproc ); - __kmp_printf( " Levels of serial: %2d\n", team->t.t_serialized ); - __kmp_printf( " Number threads: %2d\n", team->t.t_nproc ); - for ( i = 0; i < team->t.t_nproc; ++ i ) { - __kmp_printf( " Thread %2d: ", i ); - __kmp_print_structure_thread( "", team->t.t_threads[ i ] ); - }; // for i - __kmp_print_structure_team( " Next in pool: ", team->t.t_next_pool ); - __kmp_printf( "\n" ); - list = list->next; - }; // while - - // Print out __kmp_thread_pool and __kmp_team_pool. - __kmp_printf( "\n------------------------------\nPools\n------------------------------\n" ); - __kmp_print_structure_thread( "Thread pool: ", (kmp_info_t *)__kmp_thread_pool ); - __kmp_print_structure_team( "Team pool: ", (kmp_team_t *)__kmp_team_pool ); - __kmp_printf( "\n" ); - - // Free team list. - while ( list != NULL ) { - kmp_team_list_item_t * item = list; - list = list->next; - KMP_INTERNAL_FREE( item ); - }; // while - -} - -#endif - - -//--------------------------------------------------------------------------- -// Stuff for per-thread fast random number generator -// Table of primes - -static const unsigned __kmp_primes[] = { - 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, - 0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b, - 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231, - 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, - 0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801, - 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3, - 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, - 0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b, - 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9, - 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, - 0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7, - 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7, - 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, - 0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b, - 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3, - 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f -}; - -//--------------------------------------------------------------------------- -// __kmp_get_random: Get a random number using a linear congruential method. - -unsigned short -__kmp_get_random( kmp_info_t * thread ) -{ - unsigned x = thread->th.th_x; - unsigned short r = x>>16; - - thread->th.th_x = x*thread->th.th_a+1; - - KA_TRACE(30, ("__kmp_get_random: THREAD: %d, RETURN: %u\n", - thread->th.th_info.ds.ds_tid, r) ); - - return r; -} -//-------------------------------------------------------- -// __kmp_init_random: Initialize a random number generator - -void -__kmp_init_random( kmp_info_t * thread ) -{ - unsigned seed = thread->th.th_info.ds.ds_tid; - - thread->th.th_a = __kmp_primes[seed%(sizeof(__kmp_primes)/sizeof(__kmp_primes[0]))]; - thread->th.th_x = (seed+1)*thread->th.th_a+1; - KA_TRACE(30, ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a) ); -} - - -#if KMP_OS_WINDOWS -/* reclaim array entries for root threads that are already dead, returns number reclaimed */ -static int -__kmp_reclaim_dead_roots(void) { - int i, r = 0; - - for(i = 0; i < __kmp_threads_capacity; ++i) { - if( KMP_UBER_GTID( i ) && - !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) && - !__kmp_root[i]->r.r_active ) { // AC: reclaim only roots died in non-active state - r += __kmp_unregister_root_other_thread(i); - } - } - return r; -} -#endif - -/* - This function attempts to create free entries in __kmp_threads and __kmp_root, and returns the number of - free entries generated. - - For Windows* OS static library, the first mechanism used is to reclaim array entries for root threads that are - already dead. - - On all platforms, expansion is attempted on the arrays __kmp_threads_ and __kmp_root, with appropriate - update to __kmp_threads_capacity. Array capacity is increased by doubling with clipping to - __kmp_tp_capacity, if threadprivate cache array has been created. - Synchronization with __kmpc_threadprivate_cached is done using __kmp_tp_cached_lock. - - After any dead root reclamation, if the clipping value allows array expansion to result in the generation - of a total of nWish free slots, the function does that expansion. If not, but the clipping value allows - array expansion to result in the generation of a total of nNeed free slots, the function does that expansion. - Otherwise, nothing is done beyond the possible initial root thread reclamation. However, if nNeed is zero, - a best-effort attempt is made to fulfil nWish as far as possible, i.e. the function will attempt to create - as many free slots as possible up to nWish. - - If any argument is negative, the behavior is undefined. -*/ -static int -__kmp_expand_threads(int nWish, int nNeed) { - int added = 0; - int old_tp_cached; - int __kmp_actual_max_nth; - - if(nNeed > nWish) /* normalize the arguments */ - nWish = nNeed; -#if KMP_OS_WINDOWS && !defined KMP_DYNAMIC_LIB -/* only for Windows static library */ - /* reclaim array entries for root threads that are already dead */ - added = __kmp_reclaim_dead_roots(); - - if(nNeed) { - nNeed -= added; - if(nNeed < 0) - nNeed = 0; - } - if(nWish) { - nWish -= added; - if(nWish < 0) - nWish = 0; - } -#endif - if(nWish <= 0) - return added; - - while(1) { - int nTarget; - int minimumRequiredCapacity; - int newCapacity; - kmp_info_t **newThreads; - kmp_root_t **newRoot; - - // - // Note that __kmp_threads_capacity is not bounded by __kmp_max_nth. - // If __kmp_max_nth is set to some value less than __kmp_sys_max_nth - // by the user via OMP_THREAD_LIMIT, then __kmp_threads_capacity may - // become > __kmp_max_nth in one of two ways: - // - // 1) The initialization thread (gtid = 0) exits. __kmp_threads[0] - // may not be resused by another thread, so we may need to increase - // __kmp_threads_capacity to __kmp_max_threads + 1. - // - // 2) New foreign root(s) are encountered. We always register new - // foreign roots. This may cause a smaller # of threads to be - // allocated at subsequent parallel regions, but the worker threads - // hang around (and eventually go to sleep) and need slots in the - // __kmp_threads[] array. - // - // Anyway, that is the reason for moving the check to see if - // __kmp_max_threads was exceeded into __kmp_reseerve_threads() - // instead of having it performed here. -BB - // - old_tp_cached = __kmp_tp_cached; - __kmp_actual_max_nth = old_tp_cached ? __kmp_tp_capacity : __kmp_sys_max_nth; - KMP_DEBUG_ASSERT(__kmp_actual_max_nth >= __kmp_threads_capacity); - - /* compute expansion headroom to check if we can expand and whether to aim for nWish or nNeed */ - nTarget = nWish; - if(__kmp_actual_max_nth - __kmp_threads_capacity < nTarget) { - /* can't fulfil nWish, so try nNeed */ - if(nNeed) { - nTarget = nNeed; - if(__kmp_actual_max_nth - __kmp_threads_capacity < nTarget) { - /* possible expansion too small -- give up */ - break; - } - } else { - /* best-effort */ - nTarget = __kmp_actual_max_nth - __kmp_threads_capacity; - if(!nTarget) { - /* can expand at all -- give up */ - break; - } - } - } - minimumRequiredCapacity = __kmp_threads_capacity + nTarget; - - newCapacity = __kmp_threads_capacity; - do{ - newCapacity = - newCapacity <= (__kmp_actual_max_nth >> 1) ? - (newCapacity << 1) : - __kmp_actual_max_nth; - } while(newCapacity < minimumRequiredCapacity); - newThreads = (kmp_info_t**) __kmp_allocate((sizeof(kmp_info_t*) + sizeof(kmp_root_t*)) * newCapacity + CACHE_LINE); - newRoot = (kmp_root_t**) ((char*)newThreads + sizeof(kmp_info_t*) * newCapacity ); - KMP_MEMCPY(newThreads, __kmp_threads, __kmp_threads_capacity * sizeof(kmp_info_t*)); - KMP_MEMCPY(newRoot, __kmp_root, __kmp_threads_capacity * sizeof(kmp_root_t*)); - memset(newThreads + __kmp_threads_capacity, 0, - (newCapacity - __kmp_threads_capacity) * sizeof(kmp_info_t*)); - memset(newRoot + __kmp_threads_capacity, 0, - (newCapacity - __kmp_threads_capacity) * sizeof(kmp_root_t*)); - - if(!old_tp_cached && __kmp_tp_cached && newCapacity > __kmp_tp_capacity) { - /* __kmp_tp_cached has changed, i.e. __kmpc_threadprivate_cached has allocated a threadprivate cache - while we were allocating the expanded array, and our new capacity is larger than the threadprivate - cache capacity, so we should deallocate the expanded arrays and try again. This is the first check - of a double-check pair. - */ - __kmp_free(newThreads); - continue; /* start over and try again */ - } - __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); - if(!old_tp_cached && __kmp_tp_cached && newCapacity > __kmp_tp_capacity) { - /* Same check as above, but this time with the lock so we can be sure if we can succeed. */ - __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); - __kmp_free(newThreads); - continue; /* start over and try again */ - } else { - /* success */ - // __kmp_free( __kmp_threads ); // ATT: It leads to crash. Need to be investigated. - // - *(kmp_info_t**volatile*)&__kmp_threads = newThreads; - *(kmp_root_t**volatile*)&__kmp_root = newRoot; - added += newCapacity - __kmp_threads_capacity; - *(volatile int*)&__kmp_threads_capacity = newCapacity; - __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); - break; /* succeeded, so we can exit the loop */ - } - } - return added; -} - -/* register the current thread as a root thread and obtain our gtid */ -/* we must have the __kmp_initz_lock held at this point */ -/* Argument TRUE only if are the thread that calls from __kmp_do_serial_initialize() */ -int -__kmp_register_root( int initial_thread ) -{ - kmp_info_t *root_thread; - kmp_root_t *root; - int gtid; - int capacity; - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - KA_TRACE( 20, ("__kmp_register_root: entered\n")); - KMP_MB(); - - - /* - 2007-03-02: - - If initial thread did not invoke OpenMP RTL yet, and this thread is not an initial one, - "__kmp_all_nth >= __kmp_threads_capacity" condition does not work as expected -- it may - return false (that means there is at least one empty slot in __kmp_threads array), but it - is possible the only free slot is #0, which is reserved for initial thread and so cannot be - used for this one. Following code workarounds this bug. - - However, right solution seems to be not reserving slot #0 for initial thread because: - (1) there is no magic in slot #0, - (2) we cannot detect initial thread reliably (the first thread which does serial - initialization may be not a real initial thread). - */ - capacity = __kmp_threads_capacity; - if ( ! initial_thread && TCR_PTR(__kmp_threads[0]) == NULL ) { - -- capacity; - }; // if - - /* see if there are too many threads */ - if ( __kmp_all_nth >= capacity && !__kmp_expand_threads( 1, 1 ) ) { - if ( __kmp_tp_cached ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantRegisterNewThread ), - KMP_HNT( Set_ALL_THREADPRIVATE, __kmp_tp_capacity ), - KMP_HNT( PossibleSystemLimitOnThreads ), - __kmp_msg_null - ); - } - else { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantRegisterNewThread ), - KMP_HNT( SystemLimitOnThreads ), - __kmp_msg_null - ); - } - }; // if - - /* find an available thread slot */ - /* Don't reassign the zero slot since we need that to only be used by initial - thread */ - for( gtid=(initial_thread ? 0 : 1) ; TCR_PTR(__kmp_threads[gtid]) != NULL ; gtid++ ) - ; - KA_TRACE( 1, ("__kmp_register_root: found slot in threads array: T#%d\n", gtid )); - KMP_ASSERT( gtid < __kmp_threads_capacity ); - - /* update global accounting */ - __kmp_all_nth ++; - TCW_4(__kmp_nth, __kmp_nth + 1); - - // - // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) - // for low numbers of procs, and method #2 (keyed API call) for higher - // numbers of procs. - // - if ( __kmp_adjust_gtid_mode ) { - if ( __kmp_all_nth >= __kmp_tls_gtid_min ) { - if ( TCR_4(__kmp_gtid_mode) != 2) { - TCW_4(__kmp_gtid_mode, 2); - } - } - else { - if (TCR_4(__kmp_gtid_mode) != 1 ) { - TCW_4(__kmp_gtid_mode, 1); - } - } - } - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime to zero if necessary */ - /* Middle initialization might not have occurred yet */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - if ( __kmp_nth > __kmp_avail_proc ) { - __kmp_zero_bt = TRUE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - - /* setup this new hierarchy */ - if( ! ( root = __kmp_root[gtid] )) { - root = __kmp_root[gtid] = (kmp_root_t*) __kmp_allocate( sizeof(kmp_root_t) ); - KMP_DEBUG_ASSERT( ! root->r.r_root_team ); - } - -#if KMP_STATS_ENABLED - // Initialize stats as soon as possible (right after gtid assignment). - __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid); - KMP_START_EXPLICIT_TIMER(OMP_worker_thread_life); - KMP_SET_THREAD_STATE(SERIAL_REGION); - KMP_INIT_PARTITIONED_TIMERS(OMP_serial); -#endif - __kmp_initialize_root( root ); - - /* setup new root thread structure */ - if( root->r.r_uber_thread ) { - root_thread = root->r.r_uber_thread; - } else { - root_thread = (kmp_info_t*) __kmp_allocate( sizeof(kmp_info_t) ); - if ( __kmp_storage_map ) { - __kmp_print_thread_storage_map( root_thread, gtid ); - } - root_thread->th.th_info .ds.ds_gtid = gtid; - root_thread->th.th_root = root; - if( __kmp_env_consistency_check ) { - root_thread->th.th_cons = __kmp_allocate_cons_stack( gtid ); - } - #if USE_FAST_MEMORY - __kmp_initialize_fast_memory( root_thread ); - #endif /* USE_FAST_MEMORY */ - - #if KMP_USE_BGET - KMP_DEBUG_ASSERT( root_thread->th.th_local.bget_data == NULL ); - __kmp_initialize_bget( root_thread ); - #endif - __kmp_init_random( root_thread ); // Initialize random number generator - } - - /* setup the serial team held in reserve by the root thread */ - if( ! root_thread->th.th_serial_team ) { - kmp_internal_control_t r_icvs = __kmp_get_global_icvs(); - KF_TRACE( 10, ( "__kmp_register_root: before serial_team\n" ) ); - - root_thread->th.th_serial_team = __kmp_allocate_team( root, 1, 1, -#if OMPT_SUPPORT - 0, // root parallel id -#endif -#if OMP_40_ENABLED - proc_bind_default, -#endif - &r_icvs, - 0 USE_NESTED_HOT_ARG(NULL) ); - } - KMP_ASSERT( root_thread->th.th_serial_team ); - KF_TRACE( 10, ( "__kmp_register_root: after serial_team = %p\n", - root_thread->th.th_serial_team ) ); - - /* drop root_thread into place */ - TCW_SYNC_PTR(__kmp_threads[gtid], root_thread); - - root->r.r_root_team->t.t_threads[0] = root_thread; - root->r.r_hot_team ->t.t_threads[0] = root_thread; - root_thread->th.th_serial_team->t.t_threads[0] = root_thread; - root_thread->th.th_serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for execution (it is unused for now). - root->r.r_uber_thread = root_thread; - - /* initialize the thread, get it ready to go */ - __kmp_initialize_info( root_thread, root->r.r_root_team, 0, gtid ); - TCW_4(__kmp_init_gtid, TRUE); - - /* prepare the master thread for get_gtid() */ - __kmp_gtid_set_specific( gtid ); - -#if USE_ITT_BUILD - __kmp_itt_thread_name( gtid ); -#endif /* USE_ITT_BUILD */ - - #ifdef KMP_TDATA_GTID - __kmp_gtid = gtid; - #endif - __kmp_create_worker( gtid, root_thread, __kmp_stksize ); - KMP_DEBUG_ASSERT( __kmp_gtid_get_specific() == gtid ); - - KA_TRACE( 20, ("__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, plain=%u\n", - gtid, __kmp_gtid_from_tid( 0, root->r.r_hot_team ), - root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE, - KMP_INIT_BARRIER_STATE ) ); - { // Initialize barrier data. - int b; - for ( b = 0; b < bs_last_barrier; ++ b ) { - root_thread->th.th_bar[ b ].bb.b_arrived = KMP_INIT_BARRIER_STATE; -#if USE_DEBUGGER - root_thread->th.th_bar[ b ].bb.b_worker_arrived = 0; -#endif - }; // for - } - KMP_DEBUG_ASSERT( root->r.r_hot_team->t.t_bar[ bs_forkjoin_barrier ].b_arrived == KMP_INIT_BARRIER_STATE ); - -#if KMP_AFFINITY_SUPPORTED -# if OMP_40_ENABLED - root_thread->th.th_current_place = KMP_PLACE_UNDEFINED; - root_thread->th.th_new_place = KMP_PLACE_UNDEFINED; - root_thread->th.th_first_place = KMP_PLACE_UNDEFINED; - root_thread->th.th_last_place = KMP_PLACE_UNDEFINED; -# endif - - if ( TCR_4(__kmp_init_middle) ) { - __kmp_affinity_set_init_mask( gtid, TRUE ); - } -#endif /* KMP_AFFINITY_SUPPORTED */ - - __kmp_root_counter ++; - - KMP_MB(); - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - - return gtid; -} - -#if KMP_NESTED_HOT_TEAMS -static int -__kmp_free_hot_teams( kmp_root_t *root, kmp_info_t *thr, int level, const int max_level ) -{ - int i, n, nth; - kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams; - if( !hot_teams || !hot_teams[level].hot_team ) { - return 0; - } - KMP_DEBUG_ASSERT( level < max_level ); - kmp_team_t *team = hot_teams[level].hot_team; - nth = hot_teams[level].hot_team_nth; - n = nth - 1; // master is not freed - if( level < max_level - 1 ) { - for( i = 0; i < nth; ++i ) { - kmp_info_t *th = team->t.t_threads[i]; - n += __kmp_free_hot_teams( root, th, level + 1, max_level ); - if( i > 0 && th->th.th_hot_teams ) { - __kmp_free( th->th.th_hot_teams ); - th->th.th_hot_teams = NULL; - } - } - } - __kmp_free_team( root, team, NULL ); - return n; -} -#endif - -/* Resets a root thread and clear its root and hot teams. - Returns the number of __kmp_threads entries directly and indirectly freed. -*/ -static int -__kmp_reset_root(int gtid, kmp_root_t *root) -{ - kmp_team_t * root_team = root->r.r_root_team; - kmp_team_t * hot_team = root->r.r_hot_team; - int n = hot_team->t.t_nproc; - int i; - - KMP_DEBUG_ASSERT( ! root->r.r_active ); - - root->r.r_root_team = NULL; - root->r.r_hot_team = NULL; - // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team before call - // to __kmp_free_team(). - __kmp_free_team( root, root_team USE_NESTED_HOT_ARG(NULL) ); -#if KMP_NESTED_HOT_TEAMS - if( __kmp_hot_teams_max_level > 0 ) { // need to free nested hot teams and their threads if any - for( i = 0; i < hot_team->t.t_nproc; ++i ) { - kmp_info_t *th = hot_team->t.t_threads[i]; - if( __kmp_hot_teams_max_level > 1 ) { - n += __kmp_free_hot_teams( root, th, 1, __kmp_hot_teams_max_level ); - } - if( th->th.th_hot_teams ) { - __kmp_free( th->th.th_hot_teams ); - th->th.th_hot_teams = NULL; - } - } - } -#endif - __kmp_free_team( root, hot_team USE_NESTED_HOT_ARG(NULL) ); - - // - // Before we can reap the thread, we need to make certain that all - // other threads in the teams that had this root as ancestor have stopped trying to steal tasks. - // - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - __kmp_wait_to_unref_task_teams(); - } - - #if KMP_OS_WINDOWS - /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */ - KA_TRACE( 10, ("__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC "\n", - (LPVOID)&(root->r.r_uber_thread->th), - root->r.r_uber_thread->th.th_info.ds.ds_thread ) ); - __kmp_free_handle( root->r.r_uber_thread->th.th_info.ds.ds_thread ); - #endif /* KMP_OS_WINDOWS */ - -#if OMPT_SUPPORT - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_thread_end)) { - int gtid = __kmp_get_gtid(); - __ompt_thread_end(ompt_thread_initial, gtid); - } -#endif - - TCW_4(__kmp_nth, __kmp_nth - 1); // __kmp_reap_thread will decrement __kmp_all_nth. - __kmp_reap_thread( root->r.r_uber_thread, 1 ); - - // We canot put root thread to __kmp_thread_pool, so we have to reap it istead of freeing. - root->r.r_uber_thread = NULL; - /* mark root as no longer in use */ - root->r.r_begin = FALSE; - - return n; -} - -void -__kmp_unregister_root_current_thread( int gtid ) -{ - KA_TRACE( 1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid )); - /* this lock should be ok, since unregister_root_current_thread is never called during - * and abort, only during a normal close. furthermore, if you have the - * forkjoin lock, you should never try to get the initz lock */ - - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - if( TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial ) { - KC_TRACE( 10, ("__kmp_unregister_root_current_thread: already finished, exiting T#%d\n", gtid )); - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - return; - } - kmp_root_t *root = __kmp_root[gtid]; - - KMP_DEBUG_ASSERT( __kmp_threads && __kmp_threads[gtid] ); - KMP_ASSERT( KMP_UBER_GTID( gtid )); - KMP_ASSERT( root == __kmp_threads[gtid]->th.th_root ); - KMP_ASSERT( root->r.r_active == FALSE ); - - - KMP_MB(); - -#if OMP_45_ENABLED - kmp_info_t * thread = __kmp_threads[gtid]; - kmp_team_t * team = thread->th.th_team; - kmp_task_team_t * task_team = thread->th.th_task_team; - - // we need to wait for the proxy tasks before finishing the thread - if ( task_team != NULL && task_team->tt.tt_found_proxy_tasks ) { -#if OMPT_SUPPORT - // the runtime is shutting down so we won't report any events - thread->th.ompt_thread_info.state = ompt_state_undefined; -#endif - __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL)); - } -#endif - - __kmp_reset_root(gtid, root); - - /* free up this thread slot */ - __kmp_gtid_set_specific( KMP_GTID_DNE ); -#ifdef KMP_TDATA_GTID - __kmp_gtid = KMP_GTID_DNE; -#endif - - KMP_MB(); - KC_TRACE( 10, ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid )); - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); -} - -#if KMP_OS_WINDOWS -/* __kmp_forkjoin_lock must be already held - Unregisters a root thread that is not the current thread. Returns the number of - __kmp_threads entries freed as a result. - */ -static int -__kmp_unregister_root_other_thread( int gtid ) -{ - kmp_root_t *root = __kmp_root[gtid]; - int r; - - KA_TRACE( 1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid )); - KMP_DEBUG_ASSERT( __kmp_threads && __kmp_threads[gtid] ); - KMP_ASSERT( KMP_UBER_GTID( gtid )); - KMP_ASSERT( root == __kmp_threads[gtid]->th.th_root ); - KMP_ASSERT( root->r.r_active == FALSE ); - - r = __kmp_reset_root(gtid, root); - KC_TRACE( 10, ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid )); - return r; -} -#endif - -#if KMP_DEBUG -void __kmp_task_info() { - - kmp_int32 gtid = __kmp_entry_gtid(); - kmp_int32 tid = __kmp_tid_from_gtid( gtid ); - kmp_info_t *this_thr = __kmp_threads[ gtid ]; - kmp_team_t *steam = this_thr->th.th_serial_team; - kmp_team_t *team = this_thr->th.th_team; - - __kmp_printf( "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p curtask=%p ptask=%p\n", - gtid, tid, this_thr, team, this_thr->th.th_current_task, team->t.t_implicit_task_taskdata[tid].td_parent ); -} -#endif // KMP_DEBUG - -/* TODO optimize with one big memclr, take out what isn't needed, - * split responsibility to workers as much as possible, and delay - * initialization of features as much as possible */ -static void -__kmp_initialize_info( kmp_info_t *this_thr, kmp_team_t *team, int tid, int gtid ) -{ - /* this_thr->th.th_info.ds.ds_gtid is setup in kmp_allocate_thread/create_worker - * this_thr->th.th_serial_team is setup in __kmp_allocate_thread */ - kmp_info_t *master = team->t.t_threads[0]; - KMP_DEBUG_ASSERT( this_thr != NULL ); - KMP_DEBUG_ASSERT( this_thr->th.th_serial_team ); - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( team->t.t_threads ); - KMP_DEBUG_ASSERT( team->t.t_dispatch ); - KMP_DEBUG_ASSERT( master ); - KMP_DEBUG_ASSERT( master->th.th_root ); - - KMP_MB(); - - TCW_SYNC_PTR(this_thr->th.th_team, team); - - this_thr->th.th_info.ds.ds_tid = tid; - this_thr->th.th_set_nproc = 0; -#if OMP_40_ENABLED - this_thr->th.th_set_proc_bind = proc_bind_default; -# if KMP_AFFINITY_SUPPORTED - this_thr->th.th_new_place = this_thr->th.th_current_place; -# endif -#endif - this_thr->th.th_root = master->th.th_root; - - /* setup the thread's cache of the team structure */ - this_thr->th.th_team_nproc = team->t.t_nproc; - this_thr->th.th_team_master = master; - this_thr->th.th_team_serialized = team->t.t_serialized; - TCW_PTR(this_thr->th.th_sleep_loc, NULL); - - KMP_DEBUG_ASSERT( team->t.t_implicit_task_taskdata ); - - KF_TRACE( 10, ( "__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n", - tid, gtid, this_thr, this_thr->th.th_current_task ) ); - - __kmp_init_implicit_task( this_thr->th.th_team_master->th.th_ident, this_thr, team, tid, TRUE ); - - KF_TRACE( 10, ( "__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n", - tid, gtid, this_thr, this_thr->th.th_current_task ) ); - // TODO: Initialize ICVs from parent; GEH - isn't that already done in __kmp_initialize_team()? - - /* TODO no worksharing in speculative threads */ - this_thr->th.th_dispatch = &team->t.t_dispatch[ tid ]; - - this_thr->th.th_local.this_construct = 0; - -#ifdef BUILD_TV - this_thr->th.th_local.tv_data = 0; -#endif - - if ( ! this_thr->th.th_pri_common ) { - this_thr->th.th_pri_common = (struct common_table *) __kmp_allocate( sizeof(struct common_table) ); - if ( __kmp_storage_map ) { - __kmp_print_storage_map_gtid( - gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1, - sizeof( struct common_table ), "th_%d.th_pri_common\n", gtid - ); - }; // if - this_thr->th.th_pri_head = NULL; - }; // if - - /* Initialize dynamic dispatch */ - { - volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch; - /* - * Use team max_nproc since this will never change for the team. - */ - size_t disp_size = sizeof( dispatch_private_info_t ) * - ( team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers ); - KD_TRACE( 10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid, team->t.t_max_nproc ) ); - KMP_ASSERT( dispatch ); - KMP_DEBUG_ASSERT( team->t.t_dispatch ); - KMP_DEBUG_ASSERT( dispatch == &team->t.t_dispatch[ tid ] ); - - dispatch->th_disp_index = 0; -#if OMP_45_ENABLED - dispatch->th_doacross_buf_idx = 0; -#endif - if( ! dispatch->th_disp_buffer ) { - dispatch->th_disp_buffer = (dispatch_private_info_t *) __kmp_allocate( disp_size ); - - if ( __kmp_storage_map ) { - __kmp_print_storage_map_gtid( gtid, &dispatch->th_disp_buffer[ 0 ], - &dispatch->th_disp_buffer[ team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers ], - disp_size, "th_%d.th_dispatch.th_disp_buffer " - "(team_%d.t_dispatch[%d].th_disp_buffer)", - gtid, team->t.t_id, gtid ); - } - } else { - memset( & dispatch->th_disp_buffer[0], '\0', disp_size ); - } - - dispatch->th_dispatch_pr_current = 0; - dispatch->th_dispatch_sh_current = 0; - - dispatch->th_deo_fcn = 0; /* ORDERED */ - dispatch->th_dxo_fcn = 0; /* END ORDERED */ - } - - this_thr->th.th_next_pool = NULL; - - if (!this_thr->th.th_task_state_memo_stack) { - size_t i; - this_thr->th.th_task_state_memo_stack = (kmp_uint8 *) __kmp_allocate( 4*sizeof(kmp_uint8) ); - this_thr->th.th_task_state_top = 0; - this_thr->th.th_task_state_stack_sz = 4; - for (i=0; ith.th_task_state_stack_sz; ++i) // zero init the stack - this_thr->th.th_task_state_memo_stack[i] = 0; - } - - KMP_DEBUG_ASSERT( !this_thr->th.th_spin_here ); - KMP_DEBUG_ASSERT( this_thr->th.th_next_waiting == 0 ); - - KMP_MB(); -} - - -/* allocate a new thread for the requesting team. this is only called from within a - * forkjoin critical section. we will first try to get an available thread from the - * thread pool. if none is available, we will fork a new one assuming we are able - * to create a new one. this should be assured, as the caller should check on this - * first. - */ -kmp_info_t * -__kmp_allocate_thread( kmp_root_t *root, kmp_team_t *team, int new_tid ) -{ - kmp_team_t *serial_team; - kmp_info_t *new_thr; - int new_gtid; - - KA_TRACE( 20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid() )); - KMP_DEBUG_ASSERT( root && team ); -#if !KMP_NESTED_HOT_TEAMS - KMP_DEBUG_ASSERT( KMP_MASTER_GTID( __kmp_get_gtid() )); -#endif - KMP_MB(); - - /* first, try to get one from the thread pool */ - if ( __kmp_thread_pool ) { - - new_thr = (kmp_info_t*)__kmp_thread_pool; - __kmp_thread_pool = (volatile kmp_info_t *) new_thr->th.th_next_pool; - if ( new_thr == __kmp_thread_pool_insert_pt ) { - __kmp_thread_pool_insert_pt = NULL; - } - TCW_4(new_thr->th.th_in_pool, FALSE); - // - // Don't touch th_active_in_pool or th_active. - // The worker thread adjusts those flags as it sleeps/awakens. - // - __kmp_thread_pool_nth--; - - KA_TRACE( 20, ("__kmp_allocate_thread: T#%d using thread T#%d\n", - __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid )); - KMP_ASSERT( ! new_thr->th.th_team ); - KMP_DEBUG_ASSERT( __kmp_nth < __kmp_threads_capacity ); - KMP_DEBUG_ASSERT( __kmp_thread_pool_nth >= 0 ); - - /* setup the thread structure */ - __kmp_initialize_info( new_thr, team, new_tid, new_thr->th.th_info.ds.ds_gtid ); - KMP_DEBUG_ASSERT( new_thr->th.th_serial_team ); - - TCW_4(__kmp_nth, __kmp_nth + 1); - - new_thr->th.th_task_state = 0; - new_thr->th.th_task_state_top = 0; - new_thr->th.th_task_state_stack_sz = 4; - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime back to zero if necessar y */ - /* Middle initialization might not have occurred yet */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - if ( __kmp_nth > __kmp_avail_proc ) { - __kmp_zero_bt = TRUE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - -#if KMP_DEBUG - // If thread entered pool via __kmp_free_thread, wait_flag should != KMP_BARRIER_PARENT_FLAG. - int b; - kmp_balign_t * balign = new_thr->th.th_bar; - for( b = 0; b < bs_last_barrier; ++ b ) - KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#endif - - KF_TRACE( 10, ("__kmp_allocate_thread: T#%d using thread %p T#%d\n", - __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid )); - - KMP_MB(); - return new_thr; - } - - - /* no, well fork a new one */ - KMP_ASSERT( __kmp_nth == __kmp_all_nth ); - KMP_ASSERT( __kmp_all_nth < __kmp_threads_capacity ); - -#if KMP_USE_MONITOR - // - // If this is the first worker thread the RTL is creating, then also - // launch the monitor thread. We try to do this as early as possible. - // - if ( ! TCR_4( __kmp_init_monitor ) ) { - __kmp_acquire_bootstrap_lock( & __kmp_monitor_lock ); - if ( ! TCR_4( __kmp_init_monitor ) ) { - KF_TRACE( 10, ( "before __kmp_create_monitor\n" ) ); - TCW_4( __kmp_init_monitor, 1 ); - __kmp_create_monitor( & __kmp_monitor ); - KF_TRACE( 10, ( "after __kmp_create_monitor\n" ) ); - #if KMP_OS_WINDOWS - // AC: wait until monitor has started. This is a fix for CQ232808. - // The reason is that if the library is loaded/unloaded in a loop with small (parallel) - // work in between, then there is high probability that monitor thread started after - // the library shutdown. At shutdown it is too late to cope with the problem, because - // when the master is in DllMain (process detach) the monitor has no chances to start - // (it is blocked), and master has no means to inform the monitor that the library has gone, - // because all the memory which the monitor can access is going to be released/reset. - while ( TCR_4(__kmp_init_monitor) < 2 ) { - KMP_YIELD( TRUE ); - } - KF_TRACE( 10, ( "after monitor thread has started\n" ) ); - #endif - } - __kmp_release_bootstrap_lock( & __kmp_monitor_lock ); - } -#endif - - KMP_MB(); - for( new_gtid=1 ; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid ) { - KMP_DEBUG_ASSERT( new_gtid < __kmp_threads_capacity ); - } - - /* allocate space for it. */ - new_thr = (kmp_info_t*) __kmp_allocate( sizeof(kmp_info_t) ); - - TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr); - - if ( __kmp_storage_map ) { - __kmp_print_thread_storage_map( new_thr, new_gtid ); - } - - /* add the reserve serialized team, initialized from the team's master thread */ - { - kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs( team ); - KF_TRACE( 10, ( "__kmp_allocate_thread: before th_serial/serial_team\n" ) ); - - new_thr->th.th_serial_team = serial_team = - (kmp_team_t*) __kmp_allocate_team( root, 1, 1, -#if OMPT_SUPPORT - 0, // root parallel id -#endif -#if OMP_40_ENABLED - proc_bind_default, -#endif - &r_icvs, - 0 USE_NESTED_HOT_ARG(NULL) ); - } - KMP_ASSERT ( serial_team ); - serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for execution (it is unused for now). - serial_team->t.t_threads[0] = new_thr; - KF_TRACE( 10, ( "__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n", - new_thr ) ); - - /* setup the thread structures */ - __kmp_initialize_info( new_thr, team, new_tid, new_gtid ); - - #if USE_FAST_MEMORY - __kmp_initialize_fast_memory( new_thr ); - #endif /* USE_FAST_MEMORY */ - - #if KMP_USE_BGET - KMP_DEBUG_ASSERT( new_thr->th.th_local.bget_data == NULL ); - __kmp_initialize_bget( new_thr ); - #endif - - __kmp_init_random( new_thr ); // Initialize random number generator - - /* Initialize these only once when thread is grabbed for a team allocation */ - KA_TRACE( 20, ("__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n", - __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE )); - - int b; - kmp_balign_t * balign = new_thr->th.th_bar; - for(b=0; bth.th_spin_here = FALSE; - new_thr->th.th_next_waiting = 0; - -#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED - new_thr->th.th_current_place = KMP_PLACE_UNDEFINED; - new_thr->th.th_new_place = KMP_PLACE_UNDEFINED; - new_thr->th.th_first_place = KMP_PLACE_UNDEFINED; - new_thr->th.th_last_place = KMP_PLACE_UNDEFINED; -#endif - - TCW_4(new_thr->th.th_in_pool, FALSE); - new_thr->th.th_active_in_pool = FALSE; - TCW_4(new_thr->th.th_active, TRUE); - - /* adjust the global counters */ - __kmp_all_nth ++; - __kmp_nth ++; - - // - // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) - // for low numbers of procs, and method #2 (keyed API call) for higher - // numbers of procs. - // - if ( __kmp_adjust_gtid_mode ) { - if ( __kmp_all_nth >= __kmp_tls_gtid_min ) { - if ( TCR_4(__kmp_gtid_mode) != 2) { - TCW_4(__kmp_gtid_mode, 2); - } - } - else { - if (TCR_4(__kmp_gtid_mode) != 1 ) { - TCW_4(__kmp_gtid_mode, 1); - } - } - } - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime back to zero if necessary */ - /* Middle initialization might not have occurred yet */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - if ( __kmp_nth > __kmp_avail_proc ) { - __kmp_zero_bt = TRUE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - - /* actually fork it and create the new worker thread */ - KF_TRACE( 10, ("__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr )); - __kmp_create_worker( new_gtid, new_thr, __kmp_stksize ); - KF_TRACE( 10, ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr )); - - KA_TRACE( 20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(), new_gtid )); - KMP_MB(); - return new_thr; -} - -/* - * reinitialize team for reuse. - * - * The hot team code calls this case at every fork barrier, so EPCC barrier - * test are extremely sensitive to changes in it, esp. writes to the team - * struct, which cause a cache invalidation in all threads. - * - * IF YOU TOUCH THIS ROUTINE, RUN EPCC C SYNCBENCH ON A BIG-IRON MACHINE!!! - */ -static void -__kmp_reinitialize_team( kmp_team_t *team, kmp_internal_control_t *new_icvs, ident_t *loc ) { - KF_TRACE( 10, ( "__kmp_reinitialize_team: enter this_thread=%p team=%p\n", - team->t.t_threads[0], team ) ); - KMP_DEBUG_ASSERT( team && new_icvs); - KMP_DEBUG_ASSERT( ( ! TCR_4(__kmp_init_parallel) ) || new_icvs->nproc ); - KMP_CHECK_UPDATE(team->t.t_ident, loc); - - KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID()); - - // Copy ICVs to the master thread's implicit taskdata - __kmp_init_implicit_task( loc, team->t.t_threads[0], team, 0, FALSE ); - copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs); - - KF_TRACE( 10, ( "__kmp_reinitialize_team: exit this_thread=%p team=%p\n", - team->t.t_threads[0], team ) ); -} - - -/* initialize the team data structure - * this assumes the t_threads and t_max_nproc are already set - * also, we don't touch the arguments */ -static void -__kmp_initialize_team( - kmp_team_t * team, - int new_nproc, - kmp_internal_control_t * new_icvs, - ident_t * loc -) { - KF_TRACE( 10, ( "__kmp_initialize_team: enter: team=%p\n", team ) ); - - /* verify */ - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( new_nproc <= team->t.t_max_nproc ); - KMP_DEBUG_ASSERT( team->t.t_threads ); - KMP_MB(); - - team->t.t_master_tid = 0; /* not needed */ - /* team->t.t_master_bar; not needed */ - team->t.t_serialized = new_nproc > 1 ? 0 : 1; - team->t.t_nproc = new_nproc; - - /* team->t.t_parent = NULL; TODO not needed & would mess up hot team */ - team->t.t_next_pool = NULL; - /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess up hot team */ - - TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */ - team->t.t_invoke = NULL; /* not needed */ - - // TODO???: team->t.t_max_active_levels = new_max_active_levels; - team->t.t_sched = new_icvs->sched; - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - team->t.t_fp_control_saved = FALSE; /* not needed */ - team->t.t_x87_fpu_control_word = 0; /* not needed */ - team->t.t_mxcsr = 0; /* not needed */ -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - team->t.t_construct = 0; - __kmp_init_lock( & team->t.t_single_lock ); - - team->t.t_ordered .dt.t_value = 0; - team->t.t_master_active = FALSE; - - memset( & team->t.t_taskq, '\0', sizeof( kmp_taskq_t )); - -#ifdef KMP_DEBUG - team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */ -#endif - team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */ - - team->t.t_control_stack_top = NULL; - - __kmp_reinitialize_team( team, new_icvs, loc ); - - KMP_MB(); - KF_TRACE( 10, ( "__kmp_initialize_team: exit: team=%p\n", team ) ); -} - -#if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED -/* Sets full mask for thread and returns old mask, no changes to structures. */ -static void -__kmp_set_thread_affinity_mask_full_tmp( kmp_affin_mask_t *old_mask ) -{ - if ( KMP_AFFINITY_CAPABLE() ) { - int status; - if ( old_mask != NULL ) { - status = __kmp_get_system_affinity( old_mask, TRUE ); - int error = errno; - if ( status != 0 ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( ChangeThreadAffMaskError ), - KMP_ERR( error ), - __kmp_msg_null - ); - } - } - __kmp_set_system_affinity( __kmp_affin_fullMask, TRUE ); - } -} -#endif - -#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED - -// -// __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism. -// It calculats the worker + master thread's partition based upon the parent -// thread's partition, and binds each worker to a thread in their partition. -// The master thread's partition should already include its current binding. -// -static void -__kmp_partition_places( kmp_team_t *team, int update_master_only ) -{ - // - // Copy the master thread's place partion to the team struct - // - kmp_info_t *master_th = team->t.t_threads[0]; - KMP_DEBUG_ASSERT( master_th != NULL ); - kmp_proc_bind_t proc_bind = team->t.t_proc_bind; - int first_place = master_th->th.th_first_place; - int last_place = master_th->th.th_last_place; - int masters_place = master_th->th.th_current_place; - team->t.t_first_place = first_place; - team->t.t_last_place = last_place; - - KA_TRACE( 20, ("__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) bound to place %d partition = [%d,%d]\n", - proc_bind, __kmp_gtid_from_thread( team->t.t_threads[0] ), team->t.t_id, - masters_place, first_place, last_place ) ); - - switch ( proc_bind ) { - - case proc_bind_default: - // - // serial teams might have the proc_bind policy set to - // proc_bind_default. It doesn't matter, as we don't - // rebind the master thread for any proc_bind policy. - // - KMP_DEBUG_ASSERT( team->t.t_nproc == 1 ); - break; - - case proc_bind_master: - { - int f; - int n_th = team->t.t_nproc; - for ( f = 1; f < n_th; f++ ) { - kmp_info_t *th = team->t.t_threads[f]; - KMP_DEBUG_ASSERT( th != NULL ); - th->th.th_first_place = first_place; - th->th.th_last_place = last_place; - th->th.th_new_place = masters_place; - - KA_TRACE( 100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d partition = [%d,%d]\n", - __kmp_gtid_from_thread( team->t.t_threads[f] ), - team->t.t_id, f, masters_place, first_place, last_place ) ); - } - } - break; - - case proc_bind_close: - { - int f; - int n_th = team->t.t_nproc; - int n_places; - if ( first_place <= last_place ) { - n_places = last_place - first_place + 1; - } - else { - n_places = __kmp_affinity_num_masks - first_place + last_place + 1; - } - if ( n_th <= n_places ) { - int place = masters_place; - for ( f = 1; f < n_th; f++ ) { - kmp_info_t *th = team->t.t_threads[f]; - KMP_DEBUG_ASSERT( th != NULL ); - - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - th->th.th_first_place = first_place; - th->th.th_last_place = last_place; - th->th.th_new_place = place; - - KA_TRACE( 100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d partition = [%d,%d]\n", - __kmp_gtid_from_thread( team->t.t_threads[f] ), - team->t.t_id, f, place, first_place, last_place ) ); - } - } - else { - int S, rem, gap, s_count; - S = n_th / n_places; - s_count = 0; - rem = n_th - ( S * n_places ); - gap = rem > 0 ? n_places/rem : n_places; - int place = masters_place; - int gap_ct = gap; - for ( f = 0; f < n_th; f++ ) { - kmp_info_t *th = team->t.t_threads[f]; - KMP_DEBUG_ASSERT( th != NULL ); - - th->th.th_first_place = first_place; - th->th.th_last_place = last_place; - th->th.th_new_place = place; - s_count++; - - if ( (s_count == S) && rem && (gap_ct == gap) ) { - // do nothing, add an extra thread to place on next iteration - } - else if ( (s_count == S+1) && rem && (gap_ct == gap) ) { - // we added an extra thread to this place; move to next place - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - s_count = 0; - gap_ct = 1; - rem--; - } - else if (s_count == S) { // place full; don't add extra - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - gap_ct++; - s_count = 0; - } - - KA_TRACE( 100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d partition = [%d,%d]\n", - __kmp_gtid_from_thread( team->t.t_threads[f] ), - team->t.t_id, f, th->th.th_new_place, first_place, - last_place ) ); - } - KMP_DEBUG_ASSERT( place == masters_place ); - } - } - break; - - case proc_bind_spread: - { - int f; - int n_th = team->t.t_nproc; - int n_places; - int thidx; - if ( first_place <= last_place ) { - n_places = last_place - first_place + 1; - } - else { - n_places = __kmp_affinity_num_masks - first_place + last_place + 1; - } - if ( n_th <= n_places ) { - int place = masters_place; - int S = n_places/n_th; - int s_count, rem, gap, gap_ct; - rem = n_places - n_th*S; - gap = rem ? n_th/rem : 1; - gap_ct = gap; - thidx = n_th; - if (update_master_only == 1) - thidx = 1; - for ( f = 0; f < thidx; f++ ) { - kmp_info_t *th = team->t.t_threads[f]; - KMP_DEBUG_ASSERT( th != NULL ); - - th->th.th_first_place = place; - th->th.th_new_place = place; - s_count = 1; - while (s_count < S) { - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - s_count++; - } - if (rem && (gap_ct == gap)) { - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - rem--; - gap_ct = 0; - } - th->th.th_last_place = place; - gap_ct++; - - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - - KA_TRACE( 100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d partition = [%d,%d]\n", - __kmp_gtid_from_thread( team->t.t_threads[f] ), - team->t.t_id, f, th->th.th_new_place, - th->th.th_first_place, th->th.th_last_place ) ); - } - KMP_DEBUG_ASSERT( update_master_only || place == masters_place ); - } - else { - int S, rem, gap, s_count; - S = n_th / n_places; - s_count = 0; - rem = n_th - ( S * n_places ); - gap = rem > 0 ? n_places/rem : n_places; - int place = masters_place; - int gap_ct = gap; - thidx = n_th; - if (update_master_only == 1) - thidx = 1; - for ( f = 0; f < thidx; f++ ) { - kmp_info_t *th = team->t.t_threads[f]; - KMP_DEBUG_ASSERT( th != NULL ); - - th->th.th_first_place = place; - th->th.th_last_place = place; - th->th.th_new_place = place; - s_count++; - - if ( (s_count == S) && rem && (gap_ct == gap) ) { - // do nothing, add an extra thread to place on next iteration - } - else if ( (s_count == S+1) && rem && (gap_ct == gap) ) { - // we added an extra thread to this place; move on to next place - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - s_count = 0; - gap_ct = 1; - rem--; - } - else if (s_count == S) { // place is full; don't add extra thread - if ( place == last_place ) { - place = first_place; - } - else if ( place == (int)(__kmp_affinity_num_masks - 1) ) { - place = 0; - } - else { - place++; - } - gap_ct++; - s_count = 0; - } - - KA_TRACE( 100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d partition = [%d,%d]\n", - __kmp_gtid_from_thread( team->t.t_threads[f] ), - team->t.t_id, f, th->th.th_new_place, - th->th.th_first_place, th->th.th_last_place) ); - } - KMP_DEBUG_ASSERT( update_master_only || place == masters_place ); - } - } - break; - - default: - break; - } - - KA_TRACE( 20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id ) ); -} - -#endif /* OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED */ - -/* allocate a new team data structure to use. take one off of the free pool if available */ -kmp_team_t * -__kmp_allocate_team( kmp_root_t *root, int new_nproc, int max_nproc, -#if OMPT_SUPPORT - ompt_parallel_id_t ompt_parallel_id, -#endif -#if OMP_40_ENABLED - kmp_proc_bind_t new_proc_bind, -#endif - kmp_internal_control_t *new_icvs, - int argc USE_NESTED_HOT_ARG(kmp_info_t *master) ) -{ - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team); - int f; - kmp_team_t *team; - int use_hot_team = ! root->r.r_active; - int level = 0; - - KA_TRACE( 20, ("__kmp_allocate_team: called\n")); - KMP_DEBUG_ASSERT( new_nproc >=1 && argc >=0 ); - KMP_DEBUG_ASSERT( max_nproc >= new_nproc ); - KMP_MB(); - -#if KMP_NESTED_HOT_TEAMS - kmp_hot_team_ptr_t *hot_teams; - if( master ) { - team = master->th.th_team; - level = team->t.t_active_level; - if( master->th.th_teams_microtask ) { // in teams construct? - if( master->th.th_teams_size.nteams > 1 && ( // #teams > 1 - team->t.t_pkfn == (microtask_t)__kmp_teams_master || // inner fork of the teams - master->th.th_teams_level < team->t.t_level ) ) { // or nested parallel inside the teams - ++level; // not increment if #teams==1, or for outer fork of the teams; increment otherwise - } - } - hot_teams = master->th.th_hot_teams; - if( level < __kmp_hot_teams_max_level && hot_teams && hot_teams[level].hot_team ) - { // hot team has already been allocated for given level - use_hot_team = 1; - } else { - use_hot_team = 0; - } - } -#endif - // Optimization to use a "hot" team - if( use_hot_team && new_nproc > 1 ) { - KMP_DEBUG_ASSERT( new_nproc == max_nproc ); -#if KMP_NESTED_HOT_TEAMS - team = hot_teams[level].hot_team; -#else - team = root->r.r_hot_team; -#endif -#if KMP_DEBUG - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KA_TRACE( 20, ("__kmp_allocate_team: hot team task_team[0] = %p task_team[1] = %p before reinit\n", - team->t.t_task_team[0], team->t.t_task_team[1] )); - } -#endif - - // Has the number of threads changed? - /* Let's assume the most common case is that the number of threads is unchanged, and - put that case first. */ - if (team->t.t_nproc == new_nproc) { // Check changes in number of threads - KA_TRACE( 20, ("__kmp_allocate_team: reusing hot team\n" )); - // This case can mean that omp_set_num_threads() was called and the hot team size - // was already reduced, so we check the special flag - if ( team->t.t_size_changed == -1 ) { - team->t.t_size_changed = 1; - } else { - KMP_CHECK_UPDATE(team->t.t_size_changed, 0); - } - - // TODO???: team->t.t_max_active_levels = new_max_active_levels; - kmp_r_sched_t new_sched = new_icvs->sched; - if (team->t.t_sched.r_sched_type != new_sched.r_sched_type || - team->t.t_sched.chunk != new_sched.chunk) - team->t.t_sched = new_sched; // set master's schedule as new run-time schedule - - __kmp_reinitialize_team( team, new_icvs, root->r.r_uber_thread->th.th_ident ); - - KF_TRACE( 10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", - 0, team->t.t_threads[0], team ) ); - __kmp_push_current_task_to_thread( team->t.t_threads[ 0 ], team, 0 ); - -#if OMP_40_ENABLED -# if KMP_AFFINITY_SUPPORTED - if ( ( team->t.t_size_changed == 0 ) - && ( team->t.t_proc_bind == new_proc_bind ) ) { - if (new_proc_bind == proc_bind_spread) { - __kmp_partition_places(team, 1); // add flag to update only master for spread - } - KA_TRACE( 200, ("__kmp_allocate_team: reusing hot team #%d bindings: proc_bind = %d, partition = [%d,%d]\n", - team->t.t_id, new_proc_bind, team->t.t_first_place, - team->t.t_last_place ) ); - } - else { - KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind); - __kmp_partition_places( team ); - } -# else - KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind); -# endif /* KMP_AFFINITY_SUPPORTED */ -#endif /* OMP_40_ENABLED */ - } - else if( team->t.t_nproc > new_nproc ) { - KA_TRACE( 20, ("__kmp_allocate_team: decreasing hot team thread count to %d\n", new_nproc )); - - team->t.t_size_changed = 1; -#if KMP_NESTED_HOT_TEAMS - if( __kmp_hot_teams_mode == 0 ) { - // AC: saved number of threads should correspond to team's value in this mode, - // can be bigger in mode 1, when hot team has some threads in reserve - KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc); - hot_teams[level].hot_team_nth = new_nproc; -#endif // KMP_NESTED_HOT_TEAMS - /* release the extra threads we don't need any more */ - for( f = new_nproc ; f < team->t.t_nproc ; f++ ) { - KMP_DEBUG_ASSERT( team->t.t_threads[ f ] ); - if ( __kmp_tasking_mode != tskm_immediate_exec) { - // When decreasing team size, threads no longer in the team should unref task team. - team->t.t_threads[f]->th.th_task_team = NULL; - } - __kmp_free_thread( team->t.t_threads[ f ] ); - team->t.t_threads[ f ] = NULL; - } -#if KMP_NESTED_HOT_TEAMS - } // (__kmp_hot_teams_mode == 0) - else { - // When keeping extra threads in team, switch threads to wait on own b_go flag - for (f=new_nproc; ft.t_nproc; ++f) { - KMP_DEBUG_ASSERT(team->t.t_threads[f]); - kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar; - for (int b=0; bt.t_nproc = new_nproc; - // TODO???: team->t.t_max_active_levels = new_max_active_levels; - if (team->t.t_sched.r_sched_type != new_icvs->sched.r_sched_type || - team->t.t_sched.chunk != new_icvs->sched.chunk) - team->t.t_sched = new_icvs->sched; - __kmp_reinitialize_team( team, new_icvs, root->r.r_uber_thread->th.th_ident ); - - /* update the remaining threads */ - for(f = 0; f < new_nproc; ++f) { - team->t.t_threads[f]->th.th_team_nproc = new_nproc; - } - // restore the current task state of the master thread: should be the implicit task - KF_TRACE( 10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", - 0, team->t.t_threads[0], team ) ); - - __kmp_push_current_task_to_thread( team->t.t_threads[ 0 ], team, 0 ); - -#ifdef KMP_DEBUG - for ( f = 0; f < team->t.t_nproc; f++ ) { - KMP_DEBUG_ASSERT( team->t.t_threads[f] && - team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc ); - } -#endif - -#if OMP_40_ENABLED - KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind); -# if KMP_AFFINITY_SUPPORTED - __kmp_partition_places( team ); -# endif -#endif - } - else { // team->t.t_nproc < new_nproc -#if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED - kmp_affin_mask_t *old_mask; - if ( KMP_AFFINITY_CAPABLE() ) { - KMP_CPU_ALLOC(old_mask); - } -#endif - - KA_TRACE( 20, ("__kmp_allocate_team: increasing hot team thread count to %d\n", new_nproc )); - - team->t.t_size_changed = 1; - -#if KMP_NESTED_HOT_TEAMS - int avail_threads = hot_teams[level].hot_team_nth; - if( new_nproc < avail_threads ) - avail_threads = new_nproc; - kmp_info_t **other_threads = team->t.t_threads; - for ( f = team->t.t_nproc; f < avail_threads; ++f ) { - // Adjust barrier data of reserved threads (if any) of the team - // Other data will be set in __kmp_initialize_info() below. - int b; - kmp_balign_t * balign = other_threads[f]->th.th_bar; - for ( b = 0; b < bs_last_barrier; ++ b ) { - balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived; - KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#if USE_DEBUGGER - balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived; -#endif - } - } - if( hot_teams[level].hot_team_nth >= new_nproc ) { - // we have all needed threads in reserve, no need to allocate any - // this only possible in mode 1, cannot have reserved threads in mode 0 - KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1); - team->t.t_nproc = new_nproc; // just get reserved threads involved - } else { - // we may have some threads in reserve, but not enough - team->t.t_nproc = hot_teams[level].hot_team_nth; // get reserved threads involved if any - hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size -#endif // KMP_NESTED_HOT_TEAMS - if(team->t.t_max_nproc < new_nproc) { - /* reallocate larger arrays */ - __kmp_reallocate_team_arrays(team, new_nproc); - __kmp_reinitialize_team( team, new_icvs, NULL ); - } - -#if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED - /* Temporarily set full mask for master thread before - creation of workers. The reason is that workers inherit - the affinity from master, so if a lot of workers are - created on the single core quickly, they don't get - a chance to set their own affinity for a long time. - */ - __kmp_set_thread_affinity_mask_full_tmp( old_mask ); -#endif - - /* allocate new threads for the hot team */ - for( f = team->t.t_nproc ; f < new_nproc ; f++ ) { - kmp_info_t * new_worker = __kmp_allocate_thread( root, team, f ); - KMP_DEBUG_ASSERT( new_worker ); - team->t.t_threads[ f ] = new_worker; - - KA_TRACE( 20, ("__kmp_allocate_team: team %d init T#%d arrived: join=%llu, plain=%llu\n", - team->t.t_id, __kmp_gtid_from_tid( f, team ), team->t.t_id, f, - team->t.t_bar[bs_forkjoin_barrier].b_arrived, - team->t.t_bar[bs_plain_barrier].b_arrived ) ); - - { // Initialize barrier data for new threads. - int b; - kmp_balign_t * balign = new_worker->th.th_bar; - for( b = 0; b < bs_last_barrier; ++ b ) { - balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived; - KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#if USE_DEBUGGER - balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived; -#endif - } - } - } - -#if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED - if ( KMP_AFFINITY_CAPABLE() ) { - /* Restore initial master thread's affinity mask */ - __kmp_set_system_affinity( old_mask, TRUE ); - KMP_CPU_FREE(old_mask); - } -#endif -#if KMP_NESTED_HOT_TEAMS - } // end of check of t_nproc vs. new_nproc vs. hot_team_nth -#endif // KMP_NESTED_HOT_TEAMS - /* make sure everyone is syncronized */ - int old_nproc = team->t.t_nproc; // save old value and use to update only new threads below - __kmp_initialize_team( team, new_nproc, new_icvs, root->r.r_uber_thread->th.th_ident ); - - /* reinitialize the threads */ - KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc); - for (f=0; f < team->t.t_nproc; ++f) - __kmp_initialize_info( team->t.t_threads[ f ], team, f, __kmp_gtid_from_tid( f, team ) ); - if (level) { // set th_task_state for new threads in nested hot team - // __kmp_initialize_info() no longer zeroes th_task_state, so we should only need to set the - // th_task_state for the new threads. th_task_state for master thread will not be accurate until - // after this in __kmp_fork_call(), so we look to the master's memo_stack to get the correct value. - for (f=old_nproc; f < team->t.t_nproc; ++f) - team->t.t_threads[f]->th.th_task_state = team->t.t_threads[0]->th.th_task_state_memo_stack[level]; - } - else { // set th_task_state for new threads in non-nested hot team - int old_state = team->t.t_threads[0]->th.th_task_state; // copy master's state - for (f=old_nproc; f < team->t.t_nproc; ++f) - team->t.t_threads[f]->th.th_task_state = old_state; - } - -#ifdef KMP_DEBUG - for ( f = 0; f < team->t.t_nproc; ++ f ) { - KMP_DEBUG_ASSERT( team->t.t_threads[f] && - team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc ); - } -#endif - -#if OMP_40_ENABLED - KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind); -# if KMP_AFFINITY_SUPPORTED - __kmp_partition_places( team ); -# endif -#endif - } // Check changes in number of threads - -#if OMP_40_ENABLED - kmp_info_t *master = team->t.t_threads[0]; - if( master->th.th_teams_microtask ) { - for( f = 1; f < new_nproc; ++f ) { - // propagate teams construct specific info to workers - kmp_info_t *thr = team->t.t_threads[f]; - thr->th.th_teams_microtask = master->th.th_teams_microtask; - thr->th.th_teams_level = master->th.th_teams_level; - thr->th.th_teams_size = master->th.th_teams_size; - } - } -#endif /* OMP_40_ENABLED */ -#if KMP_NESTED_HOT_TEAMS - if( level ) { - // Sync barrier state for nested hot teams, not needed for outermost hot team. - for( f = 1; f < new_nproc; ++f ) { - kmp_info_t *thr = team->t.t_threads[f]; - int b; - kmp_balign_t * balign = thr->th.th_bar; - for( b = 0; b < bs_last_barrier; ++ b ) { - balign[ b ].bb.b_arrived = team->t.t_bar[ b ].b_arrived; - KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG); -#if USE_DEBUGGER - balign[ b ].bb.b_worker_arrived = team->t.t_bar[ b ].b_team_arrived; -#endif - } - } - } -#endif // KMP_NESTED_HOT_TEAMS - - /* reallocate space for arguments if necessary */ - __kmp_alloc_argv_entries( argc, team, TRUE ); - KMP_CHECK_UPDATE(team->t.t_argc, argc); - // - // The hot team re-uses the previous task team, - // if untouched during the previous release->gather phase. - // - - KF_TRACE( 10, ( " hot_team = %p\n", team ) ); - -#if KMP_DEBUG - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - KA_TRACE( 20, ("__kmp_allocate_team: hot team task_team[0] = %p task_team[1] = %p after reinit\n", - team->t.t_task_team[0], team->t.t_task_team[1] )); - } -#endif - -#if OMPT_SUPPORT - __ompt_team_assign_id(team, ompt_parallel_id); -#endif - - KMP_MB(); - - return team; - } - - /* next, let's try to take one from the team pool */ - KMP_MB(); - for( team = (kmp_team_t*) __kmp_team_pool ; (team) ; ) - { - /* TODO: consider resizing undersized teams instead of reaping them, now that we have a resizing mechanism */ - if ( team->t.t_max_nproc >= max_nproc ) { - /* take this team from the team pool */ - __kmp_team_pool = team->t.t_next_pool; - - /* setup the team for fresh use */ - __kmp_initialize_team( team, new_nproc, new_icvs, NULL ); - - KA_TRACE( 20, ( "__kmp_allocate_team: setting task_team[0] %p and task_team[1] %p to NULL\n", - &team->t.t_task_team[0], &team->t.t_task_team[1]) ); - team->t.t_task_team[0] = NULL; - team->t.t_task_team[1] = NULL; - - /* reallocate space for arguments if necessary */ - __kmp_alloc_argv_entries( argc, team, TRUE ); - KMP_CHECK_UPDATE(team->t.t_argc, argc); - - KA_TRACE( 20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n", - team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE )); - { // Initialize barrier data. - int b; - for ( b = 0; b < bs_last_barrier; ++ b) { - team->t.t_bar[ b ].b_arrived = KMP_INIT_BARRIER_STATE; -#if USE_DEBUGGER - team->t.t_bar[ b ].b_master_arrived = 0; - team->t.t_bar[ b ].b_team_arrived = 0; -#endif - } - } - -#if OMP_40_ENABLED - team->t.t_proc_bind = new_proc_bind; -#endif - - KA_TRACE( 20, ("__kmp_allocate_team: using team from pool %d.\n", team->t.t_id )); - -#if OMPT_SUPPORT - __ompt_team_assign_id(team, ompt_parallel_id); -#endif - - KMP_MB(); - - return team; - } - - /* reap team if it is too small, then loop back and check the next one */ - /* not sure if this is wise, but, will be redone during the hot-teams rewrite. */ - /* TODO: Use technique to find the right size hot-team, don't reap them */ - team = __kmp_reap_team( team ); - __kmp_team_pool = team; - } - - /* nothing available in the pool, no matter, make a new team! */ - KMP_MB(); - team = (kmp_team_t*) __kmp_allocate( sizeof( kmp_team_t ) ); - - /* and set it up */ - team->t.t_max_nproc = max_nproc; - /* NOTE well, for some reason allocating one big buffer and dividing it - * up seems to really hurt performance a lot on the P4, so, let's not use - * this... */ - __kmp_allocate_team_arrays( team, max_nproc ); - - KA_TRACE( 20, ( "__kmp_allocate_team: making a new team\n" ) ); - __kmp_initialize_team( team, new_nproc, new_icvs, NULL ); - - KA_TRACE( 20, ( "__kmp_allocate_team: setting task_team[0] %p and task_team[1] %p to NULL\n", - &team->t.t_task_team[0], &team->t.t_task_team[1] ) ); - team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes memory, no need to duplicate - team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes memory, no need to duplicate - - if ( __kmp_storage_map ) { - __kmp_print_team_storage_map( "team", team, team->t.t_id, new_nproc ); - } - - /* allocate space for arguments */ - __kmp_alloc_argv_entries( argc, team, FALSE ); - team->t.t_argc = argc; - - KA_TRACE( 20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n", - team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE )); - { // Initialize barrier data. - int b; - for ( b = 0; b < bs_last_barrier; ++ b ) { - team->t.t_bar[ b ].b_arrived = KMP_INIT_BARRIER_STATE; -#if USE_DEBUGGER - team->t.t_bar[ b ].b_master_arrived = 0; - team->t.t_bar[ b ].b_team_arrived = 0; -#endif - } - } - -#if OMP_40_ENABLED - team->t.t_proc_bind = new_proc_bind; -#endif - -#if OMPT_SUPPORT - __ompt_team_assign_id(team, ompt_parallel_id); - team->t.ompt_serialized_team_info = NULL; -#endif - - KMP_MB(); - - KA_TRACE( 20, ("__kmp_allocate_team: done creating a new team %d.\n", team->t.t_id )); - - return team; -} - -/* TODO implement hot-teams at all levels */ -/* TODO implement lazy thread release on demand (disband request) */ - -/* free the team. return it to the team pool. release all the threads - * associated with it */ -void -__kmp_free_team( kmp_root_t *root, kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master) ) -{ - int f; - KA_TRACE( 20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(), team->t.t_id )); - - /* verify state */ - KMP_DEBUG_ASSERT( root ); - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( team->t.t_nproc <= team->t.t_max_nproc ); - KMP_DEBUG_ASSERT( team->t.t_threads ); - - int use_hot_team = team == root->r.r_hot_team; -#if KMP_NESTED_HOT_TEAMS - int level; - kmp_hot_team_ptr_t *hot_teams; - if( master ) { - level = team->t.t_active_level - 1; - if( master->th.th_teams_microtask ) { // in teams construct? - if( master->th.th_teams_size.nteams > 1 ) { - ++level; // level was not increased in teams construct for team_of_masters - } - if( team->t.t_pkfn != (microtask_t)__kmp_teams_master && - master->th.th_teams_level == team->t.t_level ) { - ++level; // level was not increased in teams construct for team_of_workers before the parallel - } // team->t.t_level will be increased inside parallel - } - hot_teams = master->th.th_hot_teams; - if( level < __kmp_hot_teams_max_level ) { - KMP_DEBUG_ASSERT( team == hot_teams[level].hot_team ); - use_hot_team = 1; - } - } -#endif // KMP_NESTED_HOT_TEAMS - - /* team is done working */ - TCW_SYNC_PTR(team->t.t_pkfn, NULL); // Important for Debugging Support Library. - team->t.t_copyin_counter = 0; // init counter for possible reuse - // Do not reset pointer to parent team to NULL for hot teams. - - /* if we are non-hot team, release our threads */ - if( ! use_hot_team ) { - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - // Delete task teams - int tt_idx; - for (tt_idx=0; tt_idx<2; ++tt_idx) { - kmp_task_team_t *task_team = team->t.t_task_team[tt_idx]; - if ( task_team != NULL ) { - for (f=0; ft.t_nproc; ++f) { // Have all threads unref task teams - team->t.t_threads[f]->th.th_task_team = NULL; - } - KA_TRACE( 20, ( "__kmp_free_team: T#%d deactivating task_team %p on team %d\n", __kmp_get_gtid(), task_team, team->t.t_id ) ); -#if KMP_NESTED_HOT_TEAMS - __kmp_free_task_team( master, task_team ); -#endif - team->t.t_task_team[tt_idx] = NULL; - } - } - } - - // Reset pointer to parent team only for non-hot teams. - team->t.t_parent = NULL; - team->t.t_level = 0; - team->t.t_active_level = 0; - - /* free the worker threads */ - for ( f = 1; f < team->t.t_nproc; ++ f ) { - KMP_DEBUG_ASSERT( team->t.t_threads[ f ] ); - __kmp_free_thread( team->t.t_threads[ f ] ); - team->t.t_threads[ f ] = NULL; - } - - /* put the team back in the team pool */ - /* TODO limit size of team pool, call reap_team if pool too large */ - team->t.t_next_pool = (kmp_team_t*) __kmp_team_pool; - __kmp_team_pool = (volatile kmp_team_t*) team; - } - - KMP_MB(); -} - - -/* reap the team. destroy it, reclaim all its resources and free its memory */ -kmp_team_t * -__kmp_reap_team( kmp_team_t *team ) -{ - kmp_team_t *next_pool = team->t.t_next_pool; - - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( team->t.t_dispatch ); - KMP_DEBUG_ASSERT( team->t.t_disp_buffer ); - KMP_DEBUG_ASSERT( team->t.t_threads ); - KMP_DEBUG_ASSERT( team->t.t_argv ); - - /* TODO clean the threads that are a part of this? */ - - /* free stuff */ - - __kmp_free_team_arrays( team ); - if ( team->t.t_argv != &team->t.t_inline_argv[0] ) - __kmp_free( (void*) team->t.t_argv ); - __kmp_free( team ); - - KMP_MB(); - return next_pool; -} - -// -// Free the thread. Don't reap it, just place it on the pool of available -// threads. -// -// Changes for Quad issue 527845: We need a predictable OMP tid <-> gtid -// binding for the affinity mechanism to be useful. -// -// Now, we always keep the free list (__kmp_thread_pool) sorted by gtid. -// However, we want to avoid a potential performance problem by always -// scanning through the list to find the correct point at which to insert -// the thread (potential N**2 behavior). To do this we keep track of the -// last place a thread struct was inserted (__kmp_thread_pool_insert_pt). -// With single-level parallelism, threads will always be added to the tail -// of the list, kept track of by __kmp_thread_pool_insert_pt. With nested -// parallelism, all bets are off and we may need to scan through the entire -// free list. -// -// This change also has a potentially large performance benefit, for some -// applications. Previously, as threads were freed from the hot team, they -// would be placed back on the free list in inverse order. If the hot team -// grew back to it's original size, then the freed thread would be placed -// back on the hot team in reverse order. This could cause bad cache -// locality problems on programs where the size of the hot team regularly -// grew and shrunk. -// -// Now, for single-level parallelism, the OMP tid is alway == gtid. -// -void -__kmp_free_thread( kmp_info_t *this_th ) -{ - int gtid; - kmp_info_t **scan; - - KA_TRACE( 20, ("__kmp_free_thread: T#%d putting T#%d back on free pool.\n", - __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid )); - - KMP_DEBUG_ASSERT( this_th ); - - // When moving thread to pool, switch thread to wait on own b_go flag, and uninitialized (NULL team). - int b; - kmp_balign_t *balign = this_th->th.th_bar; - for (b=0; bth.th_task_state = 0; - - /* put thread back on the free pool */ - TCW_PTR(this_th->th.th_team, NULL); - TCW_PTR(this_th->th.th_root, NULL); - TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */ - - // - // If the __kmp_thread_pool_insert_pt is already past the new insert - // point, then we need to re-scan the entire list. - // - gtid = this_th->th.th_info.ds.ds_gtid; - if ( __kmp_thread_pool_insert_pt != NULL ) { - KMP_DEBUG_ASSERT( __kmp_thread_pool != NULL ); - if ( __kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid ) { - __kmp_thread_pool_insert_pt = NULL; - } - } - - // - // Scan down the list to find the place to insert the thread. - // scan is the address of a link in the list, possibly the address of - // __kmp_thread_pool itself. - // - // In the absence of nested parallism, the for loop will have 0 iterations. - // - if ( __kmp_thread_pool_insert_pt != NULL ) { - scan = &( __kmp_thread_pool_insert_pt->th.th_next_pool ); - } - else { - scan = (kmp_info_t **)&__kmp_thread_pool; - } - for (; ( *scan != NULL ) && ( (*scan)->th.th_info.ds.ds_gtid < gtid ); - scan = &( (*scan)->th.th_next_pool ) ); - - // - // Insert the new element on the list, and set __kmp_thread_pool_insert_pt - // to its address. - // - TCW_PTR(this_th->th.th_next_pool, *scan); - __kmp_thread_pool_insert_pt = *scan = this_th; - KMP_DEBUG_ASSERT( ( this_th->th.th_next_pool == NULL ) - || ( this_th->th.th_info.ds.ds_gtid - < this_th->th.th_next_pool->th.th_info.ds.ds_gtid ) ); - TCW_4(this_th->th.th_in_pool, TRUE); - __kmp_thread_pool_nth++; - - TCW_4(__kmp_nth, __kmp_nth - 1); - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime back to user setting or default if necessary */ - /* Middle initialization might never have occurred */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - KMP_DEBUG_ASSERT( __kmp_avail_proc > 0 ); - if ( __kmp_nth <= __kmp_avail_proc ) { - __kmp_zero_bt = FALSE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - - KMP_MB(); -} - - -/* ------------------------------------------------------------------------ */ - -void * -__kmp_launch_thread( kmp_info_t *this_thr ) -{ - int gtid = this_thr->th.th_info.ds.ds_gtid; -/* void *stack_data;*/ - kmp_team_t *(*volatile pteam); - - KMP_MB(); - KA_TRACE( 10, ("__kmp_launch_thread: T#%d start\n", gtid ) ); - - if( __kmp_env_consistency_check ) { - this_thr->th.th_cons = __kmp_allocate_cons_stack( gtid ); // ATT: Memory leak? - } - -#if OMPT_SUPPORT - if (ompt_enabled) { - this_thr->th.ompt_thread_info.state = ompt_state_overhead; - this_thr->th.ompt_thread_info.wait_id = 0; - this_thr->th.ompt_thread_info.idle_frame = __builtin_frame_address(0); - if (ompt_callbacks.ompt_callback(ompt_event_thread_begin)) { - __ompt_thread_begin(ompt_thread_worker, gtid); - } - } -#endif - - /* This is the place where threads wait for work */ - while( ! TCR_4(__kmp_global.g.g_done) ) { - KMP_DEBUG_ASSERT( this_thr == __kmp_threads[ gtid ] ); - KMP_MB(); - - /* wait for work to do */ - KA_TRACE( 20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid )); - -#if OMPT_SUPPORT - if (ompt_enabled) { - this_thr->th.ompt_thread_info.state = ompt_state_idle; - } -#endif - - /* No tid yet since not part of a team */ - __kmp_fork_barrier( gtid, KMP_GTID_DNE ); - -#if OMPT_SUPPORT - if (ompt_enabled) { - this_thr->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - - pteam = (kmp_team_t *(*))(& this_thr->th.th_team); - - /* have we been allocated? */ - if ( TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done) ) { -#if OMPT_SUPPORT - ompt_task_info_t *task_info; - ompt_parallel_id_t my_parallel_id; - if (ompt_enabled) { - task_info = __ompt_get_taskinfo(0); - my_parallel_id = (*pteam)->t.ompt_team_info.parallel_id; - } -#endif - /* we were just woken up, so run our new task */ - if ( TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL ) { - int rc; - KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n", - gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid), (*pteam)->t.t_pkfn)); - - updateHWFPControl (*pteam); - -#if OMPT_SUPPORT - if (ompt_enabled) { - this_thr->th.ompt_thread_info.state = ompt_state_work_parallel; - // Initialize OMPT task id for implicit task. - int tid = __kmp_tid_from_gtid(gtid); - task_info->task_id = __ompt_task_id_new(tid); - } -#endif - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - rc = (*pteam)->t.t_invoke( gtid ); - } - KMP_ASSERT( rc ); - -#if OMPT_SUPPORT - if (ompt_enabled) { - /* no frame set while outside task */ - task_info->frame.exit_runtime_frame = NULL; - - this_thr->th.ompt_thread_info.state = ompt_state_overhead; - } -#endif - KMP_MB(); - KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n", - gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid), (*pteam)->t.t_pkfn)); - } - /* join barrier after parallel region */ - __kmp_join_barrier( gtid ); -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) { - // don't access *pteam here: it may have already been freed - // by the master thread behind the barrier (possible race) - ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)( - my_parallel_id, task_info->task_id); - } - task_info->frame.exit_runtime_frame = NULL; - task_info->task_id = 0; - } -#endif - } - } - TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done); - -#if OMPT_SUPPORT - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_thread_end)) { - __ompt_thread_end(ompt_thread_worker, gtid); - } -#endif - - this_thr->th.th_task_team = NULL; - /* run the destructors for the threadprivate data for this thread */ - __kmp_common_destroy_gtid( gtid ); - - KA_TRACE( 10, ("__kmp_launch_thread: T#%d done\n", gtid ) ); - KMP_MB(); - return this_thr; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_internal_end_dest( void *specific_gtid ) -{ - #if KMP_COMPILER_ICC - #pragma warning( push ) - #pragma warning( disable: 810 ) // conversion from "void *" to "int" may lose significant bits - #endif - // Make sure no significant bits are lost - int gtid = (kmp_intptr_t)specific_gtid - 1; - #if KMP_COMPILER_ICC - #pragma warning( pop ) - #endif - - KA_TRACE( 30, ("__kmp_internal_end_dest: T#%d\n", gtid)); - /* NOTE: the gtid is stored as gitd+1 in the thread-local-storage - * this is because 0 is reserved for the nothing-stored case */ - - /* josh: One reason for setting the gtid specific data even when it is being - destroyed by pthread is to allow gtid lookup through thread specific data - (__kmp_gtid_get_specific). Some of the code, especially stat code, - that gets executed in the call to __kmp_internal_end_thread, actually - gets the gtid through the thread specific data. Setting it here seems - rather inelegant and perhaps wrong, but allows __kmp_internal_end_thread - to run smoothly. - todo: get rid of this after we remove the dependence on - __kmp_gtid_get_specific - */ - if(gtid >= 0 && KMP_UBER_GTID(gtid)) - __kmp_gtid_set_specific( gtid ); - #ifdef KMP_TDATA_GTID - __kmp_gtid = gtid; - #endif - __kmp_internal_end_thread( gtid ); -} - -#if KMP_OS_UNIX && KMP_DYNAMIC_LIB - -// 2009-09-08 (lev): It looks the destructor does not work. In simple test cases destructors work -// perfectly, but in real libomp.so I have no evidence it is ever called. However, -fini linker -// option in makefile.mk works fine. - -__attribute__(( destructor )) -void -__kmp_internal_end_dtor( void ) -{ - __kmp_internal_end_atexit(); -} - -void -__kmp_internal_end_fini( void ) -{ - __kmp_internal_end_atexit(); -} - -#endif - -/* [Windows] josh: when the atexit handler is called, there may still be more than one thread alive */ -void -__kmp_internal_end_atexit( void ) -{ - KA_TRACE( 30, ( "__kmp_internal_end_atexit\n" ) ); - /* [Windows] - josh: ideally, we want to completely shutdown the library in this atexit handler, but - stat code that depends on thread specific data for gtid fails because that data becomes - unavailable at some point during the shutdown, so we call __kmp_internal_end_thread - instead. We should eventually remove the dependency on __kmp_get_specific_gtid in the - stat code and use __kmp_internal_end_library to cleanly shutdown the library. - -// TODO: Can some of this comment about GVS be removed? - I suspect that the offending stat code is executed when the calling thread tries to - clean up a dead root thread's data structures, resulting in GVS code trying to close - the GVS structures for that thread, but since the stat code uses - __kmp_get_specific_gtid to get the gtid with the assumption that the calling thread is - cleaning up itself instead of another thread, it gets confused. This happens because - allowing a thread to unregister and cleanup another thread is a recent modification for - addressing an issue with Maxon Cinema4D. Based on the current design (20050722), a - thread may end up trying to unregister another thread only if thread death does not - trigger the calling of __kmp_internal_end_thread. For Linux* OS, there is the thread - specific data destructor function to detect thread death. For Windows dynamic, there - is DllMain(THREAD_DETACH). For Windows static, there is nothing. Thus, the - workaround is applicable only for Windows static stat library. - */ - __kmp_internal_end_library( -1 ); - #if KMP_OS_WINDOWS - __kmp_close_console(); - #endif -} - -static void -__kmp_reap_thread( - kmp_info_t * thread, - int is_root -) { - - // It is assumed __kmp_forkjoin_lock is acquired. - - int gtid; - - KMP_DEBUG_ASSERT( thread != NULL ); - - gtid = thread->th.th_info.ds.ds_gtid; - - if ( ! is_root ) { - - if ( __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME ) { - /* Assume the threads are at the fork barrier here */ - KA_TRACE( 20, ("__kmp_reap_thread: releasing T#%d from fork barrier for reap\n", gtid ) ); - /* Need release fence here to prevent seg faults for tree forkjoin barrier (GEH) */ - ANNOTATE_HAPPENS_BEFORE(thread); - kmp_flag_64 flag(&thread->th.th_bar[ bs_forkjoin_barrier ].bb.b_go, thread); - __kmp_release_64(&flag); - }; // if - - // Terminate OS thread. - __kmp_reap_worker( thread ); - - // - // The thread was killed asynchronously. If it was actively - // spinning in the thread pool, decrement the global count. - // - // There is a small timing hole here - if the worker thread was - // just waking up after sleeping in the pool, had reset it's - // th_active_in_pool flag but not decremented the global counter - // __kmp_thread_pool_active_nth yet, then the global counter - // might not get updated. - // - // Currently, this can only happen as the library is unloaded, - // so there are no harmful side effects. - // - if ( thread->th.th_active_in_pool ) { - thread->th.th_active_in_pool = FALSE; - KMP_TEST_THEN_DEC32( - (kmp_int32 *) &__kmp_thread_pool_active_nth ); - KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 ); - } - - // Decrement # of [worker] threads in the pool. - KMP_DEBUG_ASSERT( __kmp_thread_pool_nth > 0 ); - --__kmp_thread_pool_nth; - }; // if - - __kmp_free_implicit_task(thread); - - // Free the fast memory for tasking - #if USE_FAST_MEMORY - __kmp_free_fast_memory( thread ); - #endif /* USE_FAST_MEMORY */ - - __kmp_suspend_uninitialize_thread( thread ); - - KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] == thread ); - TCW_SYNC_PTR(__kmp_threads[gtid], NULL); - - -- __kmp_all_nth; - // __kmp_nth was decremented when thread is added to the pool. - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime back to user setting or default if necessary */ - /* Middle initialization might never have occurred */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - KMP_DEBUG_ASSERT( __kmp_avail_proc > 0 ); - if ( __kmp_nth <= __kmp_avail_proc ) { - __kmp_zero_bt = FALSE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - - /* free the memory being used */ - if( __kmp_env_consistency_check ) { - if ( thread->th.th_cons ) { - __kmp_free_cons_stack( thread->th.th_cons ); - thread->th.th_cons = NULL; - }; // if - } - - if ( thread->th.th_pri_common != NULL ) { - __kmp_free( thread->th.th_pri_common ); - thread->th.th_pri_common = NULL; - }; // if - - if (thread->th.th_task_state_memo_stack != NULL) { - __kmp_free(thread->th.th_task_state_memo_stack); - thread->th.th_task_state_memo_stack = NULL; - } - - #if KMP_USE_BGET - if ( thread->th.th_local.bget_data != NULL ) { - __kmp_finalize_bget( thread ); - }; // if - #endif - -#if KMP_AFFINITY_SUPPORTED - if ( thread->th.th_affin_mask != NULL ) { - KMP_CPU_FREE( thread->th.th_affin_mask ); - thread->th.th_affin_mask = NULL; - }; // if -#endif /* KMP_AFFINITY_SUPPORTED */ - - __kmp_reap_team( thread->th.th_serial_team ); - thread->th.th_serial_team = NULL; - __kmp_free( thread ); - - KMP_MB(); - -} // __kmp_reap_thread - -static void -__kmp_internal_end(void) -{ - int i; - - /* First, unregister the library */ - __kmp_unregister_library(); - - #if KMP_OS_WINDOWS - /* In Win static library, we can't tell when a root actually dies, so we - reclaim the data structures for any root threads that have died but not - unregistered themselves, in order to shut down cleanly. - In Win dynamic library we also can't tell when a thread dies. - */ - __kmp_reclaim_dead_roots(); // AC: moved here to always clean resources of dead roots - #endif - - for( i=0 ; i<__kmp_threads_capacity ; i++ ) - if( __kmp_root[i] ) - if( __kmp_root[i]->r.r_active ) - break; - KMP_MB(); /* Flush all pending memory write invalidates. */ - TCW_SYNC_4(__kmp_global.g.g_done, TRUE); - - if ( i < __kmp_threads_capacity ) { -#if KMP_USE_MONITOR - // 2009-09-08 (lev): Other alive roots found. Why do we kill the monitor?? - KMP_MB(); /* Flush all pending memory write invalidates. */ - - // - // Need to check that monitor was initialized before reaping it. - // If we are called form __kmp_atfork_child (which sets - // __kmp_init_parallel = 0), then __kmp_monitor will appear to - // contain valid data, but it is only valid in the parent process, - // not the child. - // - // New behavior (201008): instead of keying off of the flag - // __kmp_init_parallel, the monitor thread creation is keyed off - // of the new flag __kmp_init_monitor. - // - __kmp_acquire_bootstrap_lock( & __kmp_monitor_lock ); - if ( TCR_4( __kmp_init_monitor ) ) { - __kmp_reap_monitor( & __kmp_monitor ); - TCW_4( __kmp_init_monitor, 0 ); - } - __kmp_release_bootstrap_lock( & __kmp_monitor_lock ); - KA_TRACE( 10, ("__kmp_internal_end: monitor reaped\n" ) ); -#endif // KMP_USE_MONITOR - } else { - /* TODO move this to cleanup code */ - #ifdef KMP_DEBUG - /* make sure that everything has properly ended */ - for ( i = 0; i < __kmp_threads_capacity; i++ ) { - if( __kmp_root[i] ) { -// KMP_ASSERT( ! KMP_UBER_GTID( i ) ); // AC: there can be uber threads alive here - KMP_ASSERT( ! __kmp_root[i]->r.r_active ); // TODO: can they be active? - } - } - #endif - - KMP_MB(); - - // Reap the worker threads. - // This is valid for now, but be careful if threads are reaped sooner. - while ( __kmp_thread_pool != NULL ) { // Loop thru all the thread in the pool. - // Get the next thread from the pool. - kmp_info_t * thread = (kmp_info_t *) __kmp_thread_pool; - __kmp_thread_pool = thread->th.th_next_pool; - // Reap it. - thread->th.th_next_pool = NULL; - thread->th.th_in_pool = FALSE; - __kmp_reap_thread( thread, 0 ); - }; // while - __kmp_thread_pool_insert_pt = NULL; - - // Reap teams. - while ( __kmp_team_pool != NULL ) { // Loop thru all the teams in the pool. - // Get the next team from the pool. - kmp_team_t * team = (kmp_team_t *) __kmp_team_pool; - __kmp_team_pool = team->t.t_next_pool; - // Reap it. - team->t.t_next_pool = NULL; - __kmp_reap_team( team ); - }; // while - - __kmp_reap_task_teams( ); - - for ( i = 0; i < __kmp_threads_capacity; ++ i ) { - // TBD: Add some checking... - // Something like KMP_DEBUG_ASSERT( __kmp_thread[ i ] == NULL ); - } - - /* Make sure all threadprivate destructors get run by joining with all worker - threads before resetting this flag */ - TCW_SYNC_4(__kmp_init_common, FALSE); - - KA_TRACE( 10, ("__kmp_internal_end: all workers reaped\n" ) ); - KMP_MB(); - -#if KMP_USE_MONITOR - // - // See note above: One of the possible fixes for CQ138434 / CQ140126 - // - // FIXME: push both code fragments down and CSE them? - // push them into __kmp_cleanup() ? - // - __kmp_acquire_bootstrap_lock( & __kmp_monitor_lock ); - if ( TCR_4( __kmp_init_monitor ) ) { - __kmp_reap_monitor( & __kmp_monitor ); - TCW_4( __kmp_init_monitor, 0 ); - } - __kmp_release_bootstrap_lock( & __kmp_monitor_lock ); - KA_TRACE( 10, ("__kmp_internal_end: monitor reaped\n" ) ); -#endif - } /* else !__kmp_global.t_active */ - TCW_4(__kmp_init_gtid, FALSE); - KMP_MB(); /* Flush all pending memory write invalidates. */ - - __kmp_cleanup(); -#if OMPT_SUPPORT - ompt_fini(); -#endif -} - -void -__kmp_internal_end_library( int gtid_req ) -{ - /* if we have already cleaned up, don't try again, it wouldn't be pretty */ - /* this shouldn't be a race condition because __kmp_internal_end() is the - * only place to clear __kmp_serial_init */ - /* we'll check this later too, after we get the lock */ - // 2009-09-06: We do not set g_abort without setting g_done. This check looks redundaant, - // because the next check will work in any case. - if( __kmp_global.g.g_abort ) { - KA_TRACE( 11, ("__kmp_internal_end_library: abort, exiting\n" )); - /* TODO abort? */ - return; - } - if( TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial ) { - KA_TRACE( 10, ("__kmp_internal_end_library: already finished\n" )); - return; - } - - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* find out who we are and what we should do */ - { - int gtid = (gtid_req>=0) ? gtid_req : __kmp_gtid_get_specific(); - KA_TRACE( 10, ("__kmp_internal_end_library: enter T#%d (%d)\n", gtid, gtid_req )); - if( gtid == KMP_GTID_SHUTDOWN ) { - KA_TRACE( 10, ("__kmp_internal_end_library: !__kmp_init_runtime, system already shutdown\n" )); - return; - } else if( gtid == KMP_GTID_MONITOR ) { - KA_TRACE( 10, ("__kmp_internal_end_library: monitor thread, gtid not registered, or system shutdown\n" )); - return; - } else if( gtid == KMP_GTID_DNE ) { - KA_TRACE( 10, ("__kmp_internal_end_library: gtid not registered or system shutdown\n" )); - /* we don't know who we are, but we may still shutdown the library */ - } else if( KMP_UBER_GTID( gtid )) { - /* unregister ourselves as an uber thread. gtid is no longer valid */ - if( __kmp_root[gtid]->r.r_active ) { - __kmp_global.g.g_abort = -1; - TCW_SYNC_4(__kmp_global.g.g_done, TRUE); - KA_TRACE( 10, ("__kmp_internal_end_library: root still active, abort T#%d\n", gtid )); - return; - } else { - KA_TRACE( 10, ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid )); - __kmp_unregister_root_current_thread( gtid ); - } - } else { - /* worker threads may call this function through the atexit handler, if they call exit() */ - /* For now, skip the usual subsequent processing and just dump the debug buffer. - TODO: do a thorough shutdown instead - */ - #ifdef DUMP_DEBUG_ON_EXIT - if ( __kmp_debug_buf ) - __kmp_dump_debug_buffer( ); - #endif - return; - } - } - /* synchronize the termination process */ - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - - /* have we already finished */ - if( __kmp_global.g.g_abort ) { - KA_TRACE( 10, ("__kmp_internal_end_library: abort, exiting\n" )); - /* TODO abort? */ - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - } - if( TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial ) { - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - } - - /* We need this lock to enforce mutex between this reading of - __kmp_threads_capacity and the writing by __kmp_register_root. - Alternatively, we can use a counter of roots that is - atomically updated by __kmp_get_global_thread_id_reg, - __kmp_do_serial_initialize and __kmp_internal_end_*. - */ - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - - /* now we can safely conduct the actual termination */ - __kmp_internal_end(); - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - - KA_TRACE( 10, ("__kmp_internal_end_library: exit\n" ) ); - - #ifdef DUMP_DEBUG_ON_EXIT - if ( __kmp_debug_buf ) - __kmp_dump_debug_buffer(); - #endif - - #if KMP_OS_WINDOWS - __kmp_close_console(); - #endif - - __kmp_fini_allocator(); - -} // __kmp_internal_end_library - -void -__kmp_internal_end_thread( int gtid_req ) -{ - int i; - - /* if we have already cleaned up, don't try again, it wouldn't be pretty */ - /* this shouldn't be a race condition because __kmp_internal_end() is the - * only place to clear __kmp_serial_init */ - /* we'll check this later too, after we get the lock */ - // 2009-09-06: We do not set g_abort without setting g_done. This check looks redundant, - // because the next check will work in any case. - if( __kmp_global.g.g_abort ) { - KA_TRACE( 11, ("__kmp_internal_end_thread: abort, exiting\n" )); - /* TODO abort? */ - return; - } - if( TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: already finished\n" )); - return; - } - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* find out who we are and what we should do */ - { - int gtid = (gtid_req>=0) ? gtid_req : __kmp_gtid_get_specific(); - KA_TRACE( 10, ("__kmp_internal_end_thread: enter T#%d (%d)\n", gtid, gtid_req )); - if( gtid == KMP_GTID_SHUTDOWN ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: !__kmp_init_runtime, system already shutdown\n" )); - return; - } else if( gtid == KMP_GTID_MONITOR ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: monitor thread, gtid not registered, or system shutdown\n" )); - return; - } else if( gtid == KMP_GTID_DNE ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: gtid not registered or system shutdown\n" )); - return; - /* we don't know who we are */ - } else if( KMP_UBER_GTID( gtid )) { - /* unregister ourselves as an uber thread. gtid is no longer valid */ - if( __kmp_root[gtid]->r.r_active ) { - __kmp_global.g.g_abort = -1; - TCW_SYNC_4(__kmp_global.g.g_done, TRUE); - KA_TRACE( 10, ("__kmp_internal_end_thread: root still active, abort T#%d\n", gtid )); - return; - } else { - KA_TRACE( 10, ("__kmp_internal_end_thread: unregistering sibling T#%d\n", gtid )); - __kmp_unregister_root_current_thread( gtid ); - } - } else { - /* just a worker thread, let's leave */ - KA_TRACE( 10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid )); - - if ( gtid >= 0 ) { - __kmp_threads[gtid]->th.th_task_team = NULL; - } - - KA_TRACE( 10, ("__kmp_internal_end_thread: worker thread done, exiting T#%d\n", gtid )); - return; - } - } - #if defined KMP_DYNAMIC_LIB - // AC: lets not shutdown the Linux* OS dynamic library at the exit of uber thread, - // because we will better shutdown later in the library destructor. - // The reason of this change is performance problem when non-openmp thread - // in a loop forks and joins many openmp threads. We can save a lot of time - // keeping worker threads alive until the program shutdown. - // OM: Removed Linux* OS restriction to fix the crash on OS X* (DPD200239966) and - // Windows(DPD200287443) that occurs when using critical sections from foreign threads. - KA_TRACE( 10, ("__kmp_internal_end_thread: exiting T#%d\n", gtid_req) ); - return; - #endif - /* synchronize the termination process */ - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - - /* have we already finished */ - if( __kmp_global.g.g_abort ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: abort, exiting\n" )); - /* TODO abort? */ - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - } - if( TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial ) { - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - } - - /* We need this lock to enforce mutex between this reading of - __kmp_threads_capacity and the writing by __kmp_register_root. - Alternatively, we can use a counter of roots that is - atomically updated by __kmp_get_global_thread_id_reg, - __kmp_do_serial_initialize and __kmp_internal_end_*. - */ - - /* should we finish the run-time? are all siblings done? */ - __kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock ); - - for ( i = 0; i < __kmp_threads_capacity; ++ i ) { - if ( KMP_UBER_GTID( i ) ) { - KA_TRACE( 10, ("__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i )); - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - }; - } - - /* now we can safely conduct the actual termination */ - - __kmp_internal_end(); - - __kmp_release_bootstrap_lock( &__kmp_forkjoin_lock ); - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - - KA_TRACE( 10, ("__kmp_internal_end_thread: exit T#%d\n", gtid_req ) ); - - #ifdef DUMP_DEBUG_ON_EXIT - if ( __kmp_debug_buf ) - __kmp_dump_debug_buffer(); - #endif -} // __kmp_internal_end_thread - -// ------------------------------------------------------------------------------------------------- -// Library registration stuff. - -static long __kmp_registration_flag = 0; - // Random value used to indicate library initialization. -static char * __kmp_registration_str = NULL; - // Value to be saved in env var __KMP_REGISTERED_LIB_. - - -static inline -char * -__kmp_reg_status_name() { - /* - On RHEL 3u5 if linked statically, getpid() returns different values in each thread. - If registration and unregistration go in different threads (omp_misc_other_root_exit.cpp test case), - the name of registered_lib_env env var can not be found, because the name will contain different pid. - */ - return __kmp_str_format( "__KMP_REGISTERED_LIB_%d", (int) getpid() ); -} // __kmp_reg_status_get - - -void -__kmp_register_library_startup( - void -) { - - char * name = __kmp_reg_status_name(); // Name of the environment variable. - int done = 0; - union { - double dtime; - long ltime; - } time; - #if KMP_ARCH_X86 || KMP_ARCH_X86_64 - __kmp_initialize_system_tick(); - #endif - __kmp_read_system_time( & time.dtime ); - __kmp_registration_flag = 0xCAFE0000L | ( time.ltime & 0x0000FFFFL ); - __kmp_registration_str = - __kmp_str_format( - "%p-%lx-%s", - & __kmp_registration_flag, - __kmp_registration_flag, - KMP_LIBRARY_FILE - ); - - KA_TRACE( 50, ( "__kmp_register_library_startup: %s=\"%s\"\n", name, __kmp_registration_str ) ); - - while ( ! done ) { - - char * value = NULL; // Actual value of the environment variable. - - // Set environment variable, but do not overwrite if it is exist. - __kmp_env_set( name, __kmp_registration_str, 0 ); - // Check the variable is written. - value = __kmp_env_get( name ); - if ( value != NULL && strcmp( value, __kmp_registration_str ) == 0 ) { - - done = 1; // Ok, environment variable set successfully, exit the loop. - - } else { - - // Oops. Write failed. Another copy of OpenMP RTL is in memory. - // Check whether it alive or dead. - int neighbor = 0; // 0 -- unknown status, 1 -- alive, 2 -- dead. - char * tail = value; - char * flag_addr_str = NULL; - char * flag_val_str = NULL; - char const * file_name = NULL; - __kmp_str_split( tail, '-', & flag_addr_str, & tail ); - __kmp_str_split( tail, '-', & flag_val_str, & tail ); - file_name = tail; - if ( tail != NULL ) { - long * flag_addr = 0; - long flag_val = 0; - KMP_SSCANF( flag_addr_str, "%p", & flag_addr ); - KMP_SSCANF( flag_val_str, "%lx", & flag_val ); - if ( flag_addr != 0 && flag_val != 0 && strcmp( file_name, "" ) != 0 ) { - // First, check whether environment-encoded address is mapped into addr space. - // If so, dereference it to see if it still has the right value. - - if ( __kmp_is_address_mapped( flag_addr ) && * flag_addr == flag_val ) { - neighbor = 1; - } else { - // If not, then we know the other copy of the library is no longer running. - neighbor = 2; - }; // if - }; // if - }; // if - switch ( neighbor ) { - case 0 : // Cannot parse environment variable -- neighbor status unknown. - // Assume it is the incompatible format of future version of the library. - // Assume the other library is alive. - // WARN( ... ); // TODO: Issue a warning. - file_name = "unknown library"; - // Attention! Falling to the next case. That's intentional. - case 1 : { // Neighbor is alive. - // Check it is allowed. - char * duplicate_ok = __kmp_env_get( "KMP_DUPLICATE_LIB_OK" ); - if ( ! __kmp_str_match_true( duplicate_ok ) ) { - // That's not allowed. Issue fatal error. - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( DuplicateLibrary, KMP_LIBRARY_FILE, file_name ), - KMP_HNT( DuplicateLibrary ), - __kmp_msg_null - ); - }; // if - KMP_INTERNAL_FREE( duplicate_ok ); - __kmp_duplicate_library_ok = 1; - done = 1; // Exit the loop. - } break; - case 2 : { // Neighbor is dead. - // Clear the variable and try to register library again. - __kmp_env_unset( name ); - } break; - default : { - KMP_DEBUG_ASSERT( 0 ); - } break; - }; // switch - - }; // if - KMP_INTERNAL_FREE( (void *) value ); - - }; // while - KMP_INTERNAL_FREE( (void *) name ); - -} // func __kmp_register_library_startup - - -void -__kmp_unregister_library( void ) { - - char * name = __kmp_reg_status_name(); - char * value = __kmp_env_get( name ); - - KMP_DEBUG_ASSERT( __kmp_registration_flag != 0 ); - KMP_DEBUG_ASSERT( __kmp_registration_str != NULL ); - if ( value != NULL && strcmp( value, __kmp_registration_str ) == 0 ) { - // Ok, this is our variable. Delete it. - __kmp_env_unset( name ); - }; // if - - KMP_INTERNAL_FREE( __kmp_registration_str ); - KMP_INTERNAL_FREE( value ); - KMP_INTERNAL_FREE( name ); - - __kmp_registration_flag = 0; - __kmp_registration_str = NULL; - -} // __kmp_unregister_library - - -// End of Library registration stuff. -// ------------------------------------------------------------------------------------------------- - -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - -static void __kmp_check_mic_type() -{ - kmp_cpuid_t cpuid_state = {0}; - kmp_cpuid_t * cs_p = &cpuid_state; - __kmp_x86_cpuid(1, 0, cs_p); - // We don't support mic1 at the moment - if( (cs_p->eax & 0xff0) == 0xB10 ) { - __kmp_mic_type = mic2; - } else if( (cs_p->eax & 0xf0ff0) == 0x50670 ) { - __kmp_mic_type = mic3; - } else { - __kmp_mic_type = non_mic; - } -} - -#endif /* KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) */ - -static void -__kmp_do_serial_initialize( void ) -{ - int i, gtid; - int size; - - KA_TRACE( 10, ("__kmp_do_serial_initialize: enter\n" ) ); - - KMP_DEBUG_ASSERT( sizeof( kmp_int32 ) == 4 ); - KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == 4 ); - KMP_DEBUG_ASSERT( sizeof( kmp_int64 ) == 8 ); - KMP_DEBUG_ASSERT( sizeof( kmp_uint64 ) == 8 ); - KMP_DEBUG_ASSERT( sizeof( kmp_intptr_t ) == sizeof( void * ) ); - -#if OMPT_SUPPORT - ompt_pre_init(); -#endif - - __kmp_validate_locks(); - - /* Initialize internal memory allocator */ - __kmp_init_allocator(); - - /* Register the library startup via an environment variable - and check to see whether another copy of the library is already - registered. */ - - __kmp_register_library_startup( ); - - /* TODO reinitialization of library */ - if( TCR_4(__kmp_global.g.g_done) ) { - KA_TRACE( 10, ("__kmp_do_serial_initialize: reinitialization of library\n" ) ); - } - - __kmp_global.g.g_abort = 0; - TCW_SYNC_4(__kmp_global.g.g_done, FALSE); - - /* initialize the locks */ -#if KMP_USE_ADAPTIVE_LOCKS -#if KMP_DEBUG_ADAPTIVE_LOCKS - __kmp_init_speculative_stats(); -#endif -#endif -#if KMP_STATS_ENABLED - __kmp_stats_init(); -#endif - __kmp_init_lock( & __kmp_global_lock ); - __kmp_init_queuing_lock( & __kmp_dispatch_lock ); - __kmp_init_lock( & __kmp_debug_lock ); - __kmp_init_atomic_lock( & __kmp_atomic_lock ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_1i ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_2i ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_4i ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_4r ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_8i ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_8r ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_8c ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_10r ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_16r ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_16c ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_20c ); - __kmp_init_atomic_lock( & __kmp_atomic_lock_32c ); - __kmp_init_bootstrap_lock( & __kmp_forkjoin_lock ); - __kmp_init_bootstrap_lock( & __kmp_exit_lock ); -#if KMP_USE_MONITOR - __kmp_init_bootstrap_lock( & __kmp_monitor_lock ); -#endif - __kmp_init_bootstrap_lock( & __kmp_tp_cached_lock ); - - /* conduct initialization and initial setup of configuration */ - - __kmp_runtime_initialize(); - -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - __kmp_check_mic_type(); -#endif - - // Some global variable initialization moved here from kmp_env_initialize() -#ifdef KMP_DEBUG - kmp_diag = 0; -#endif - __kmp_abort_delay = 0; - - // From __kmp_init_dflt_team_nth() - /* assume the entire machine will be used */ - __kmp_dflt_team_nth_ub = __kmp_xproc; - if( __kmp_dflt_team_nth_ub < KMP_MIN_NTH ) { - __kmp_dflt_team_nth_ub = KMP_MIN_NTH; - } - if( __kmp_dflt_team_nth_ub > __kmp_sys_max_nth ) { - __kmp_dflt_team_nth_ub = __kmp_sys_max_nth; - } - __kmp_max_nth = __kmp_sys_max_nth; - - // Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME" part - __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; -#if KMP_USE_MONITOR - __kmp_monitor_wakeups = KMP_WAKEUPS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups ); - __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups ); -#endif - // From "KMP_LIBRARY" part of __kmp_env_initialize() - __kmp_library = library_throughput; - // From KMP_SCHEDULE initialization - __kmp_static = kmp_sch_static_balanced; - // AC: do not use analytical here, because it is non-monotonous - //__kmp_guided = kmp_sch_guided_iterative_chunked; - //__kmp_auto = kmp_sch_guided_analytical_chunked; // AC: it is the default, no need to repeate assignment - // Barrier initialization. Moved here from __kmp_env_initialize() Barrier branch bit control and barrier method - // control parts - #if KMP_FAST_REDUCTION_BARRIER - #define kmp_reduction_barrier_gather_bb ((int)1) - #define kmp_reduction_barrier_release_bb ((int)1) - #define kmp_reduction_barrier_gather_pat bp_hyper_bar - #define kmp_reduction_barrier_release_pat bp_hyper_bar - #endif // KMP_FAST_REDUCTION_BARRIER - for ( i=bs_plain_barrier; i 0 ); - if ( __kmp_avail_proc == 0 ) { - __kmp_avail_proc = __kmp_xproc; - } - - // If there were empty places in num_threads list (OMP_NUM_THREADS=,,2,3), correct them now - j = 0; - while ( ( j < __kmp_nested_nth.used ) && ! __kmp_nested_nth.nth[ j ] ) { - __kmp_nested_nth.nth[ j ] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub = __kmp_avail_proc; - j++; - } - - if ( __kmp_dflt_team_nth == 0 ) { -#ifdef KMP_DFLT_NTH_CORES - // - // Default #threads = #cores - // - __kmp_dflt_team_nth = __kmp_ncores; - KA_TRACE( 20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = __kmp_ncores (%d)\n", - __kmp_dflt_team_nth ) ); -#else - // - // Default #threads = #available OS procs - // - __kmp_dflt_team_nth = __kmp_avail_proc; - KA_TRACE( 20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = __kmp_avail_proc(%d)\n", - __kmp_dflt_team_nth ) ); -#endif /* KMP_DFLT_NTH_CORES */ - } - - if ( __kmp_dflt_team_nth < KMP_MIN_NTH ) { - __kmp_dflt_team_nth = KMP_MIN_NTH; - } - if( __kmp_dflt_team_nth > __kmp_sys_max_nth ) { - __kmp_dflt_team_nth = __kmp_sys_max_nth; - } - - // - // There's no harm in continuing if the following check fails, - // but it indicates an error in the previous logic. - // - KMP_DEBUG_ASSERT( __kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub ); - - if ( __kmp_dflt_team_nth != prev_dflt_team_nth ) { - // - // Run through the __kmp_threads array and set the num threads icv - // for each root thread that is currently registered with the RTL - // (which has not already explicitly set its nthreads-var with a - // call to omp_set_num_threads()). - // - for ( i = 0; i < __kmp_threads_capacity; i++ ) { - kmp_info_t *thread = __kmp_threads[ i ]; - if ( thread == NULL ) continue; - if ( thread->th.th_current_task->td_icvs.nproc != 0 ) continue; - - set__nproc( __kmp_threads[ i ], __kmp_dflt_team_nth ); - } - } - KA_TRACE( 20, ("__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n", - __kmp_dflt_team_nth) ); - -#ifdef KMP_ADJUST_BLOCKTIME - /* Adjust blocktime to zero if necessary */ - /* now that __kmp_avail_proc is set */ - if ( !__kmp_env_blocktime && ( __kmp_avail_proc > 0 ) ) { - KMP_DEBUG_ASSERT( __kmp_avail_proc > 0 ); - if ( __kmp_nth > __kmp_avail_proc ) { - __kmp_zero_bt = TRUE; - } - } -#endif /* KMP_ADJUST_BLOCKTIME */ - - /* we have finished middle initialization */ - TCW_SYNC_4(__kmp_init_middle, TRUE); - - KA_TRACE( 10, ("__kmp_do_middle_initialize: exit\n" ) ); -} - -void -__kmp_middle_initialize( void ) -{ - if ( __kmp_init_middle ) { - return; - } - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - if ( __kmp_init_middle ) { - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); - return; - } - __kmp_do_middle_initialize(); - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); -} - -void -__kmp_parallel_initialize( void ) -{ - int gtid = __kmp_entry_gtid(); // this might be a new root - - /* synchronize parallel initialization (for sibling) */ - if( TCR_4(__kmp_init_parallel) ) return; - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - if( TCR_4(__kmp_init_parallel) ) { __kmp_release_bootstrap_lock( &__kmp_initz_lock ); return; } - - /* TODO reinitialization after we have already shut down */ - if( TCR_4(__kmp_global.g.g_done) ) { - KA_TRACE( 10, ("__kmp_parallel_initialize: attempt to init while shutting down\n" ) ); - __kmp_infinite_loop(); - } - - /* jc: The lock __kmp_initz_lock is already held, so calling __kmp_serial_initialize - would cause a deadlock. So we call __kmp_do_serial_initialize directly. - */ - if( !__kmp_init_middle ) { - __kmp_do_middle_initialize(); - } - - /* begin initialization */ - KA_TRACE( 10, ("__kmp_parallel_initialize: enter\n" ) ); - KMP_ASSERT( KMP_UBER_GTID( gtid ) ); - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - // - // Save the FP control regs. - // Worker threads will set theirs to these values at thread startup. - // - __kmp_store_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word ); - __kmp_store_mxcsr( &__kmp_init_mxcsr ); - __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK; -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -#if KMP_OS_UNIX -# if KMP_HANDLE_SIGNALS - /* must be after __kmp_serial_initialize */ - __kmp_install_signals( TRUE ); -# endif -#endif - - __kmp_suspend_initialize(); - -#if defined(USE_LOAD_BALANCE) - if ( __kmp_global.g.g_dynamic_mode == dynamic_default ) { - __kmp_global.g.g_dynamic_mode = dynamic_load_balance; - } -#else - if ( __kmp_global.g.g_dynamic_mode == dynamic_default ) { - __kmp_global.g.g_dynamic_mode = dynamic_thread_limit; - } -#endif - - if ( __kmp_version ) { - __kmp_print_version_2(); - } - - /* we have finished parallel initialization */ - TCW_SYNC_4(__kmp_init_parallel, TRUE); - - KMP_MB(); - KA_TRACE( 10, ("__kmp_parallel_initialize: exit\n" ) ); - - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); -} - - -/* ------------------------------------------------------------------------ */ - -void -__kmp_run_before_invoked_task( int gtid, int tid, kmp_info_t *this_thr, - kmp_team_t *team ) -{ - kmp_disp_t *dispatch; - - KMP_MB(); - - /* none of the threads have encountered any constructs, yet. */ - this_thr->th.th_local.this_construct = 0; -#if KMP_CACHE_MANAGE - KMP_CACHE_PREFETCH( &this_thr->th.th_bar[ bs_forkjoin_barrier ].bb.b_arrived ); -#endif /* KMP_CACHE_MANAGE */ - dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch); - KMP_DEBUG_ASSERT( dispatch ); - KMP_DEBUG_ASSERT( team->t.t_dispatch ); - //KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[ this_thr->th.th_info.ds.ds_tid ] ); - - dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */ -#if OMP_45_ENABLED - dispatch->th_doacross_buf_idx = 0; /* reset the doacross dispatch buffer counter */ -#endif - if( __kmp_env_consistency_check ) - __kmp_push_parallel( gtid, team->t.t_ident ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ -} - -void -__kmp_run_after_invoked_task( int gtid, int tid, kmp_info_t *this_thr, - kmp_team_t *team ) -{ - if( __kmp_env_consistency_check ) - __kmp_pop_parallel( gtid, team->t.t_ident ); - - __kmp_finish_implicit_task(this_thr); -} - -int -__kmp_invoke_task_func( int gtid ) -{ - int rc; - int tid = __kmp_tid_from_gtid( gtid ); - kmp_info_t *this_thr = __kmp_threads[ gtid ]; - kmp_team_t *team = this_thr->th.th_team; - - __kmp_run_before_invoked_task( gtid, tid, this_thr, team ); -#if USE_ITT_BUILD - if ( __itt_stack_caller_create_ptr ) { - __kmp_itt_stack_callee_enter( (__itt_caller)team->t.t_stack_id ); // inform ittnotify about entering user's code - } -#endif /* USE_ITT_BUILD */ -#if INCLUDE_SSC_MARKS - SSC_MARK_INVOKING(); -#endif - -#if OMPT_SUPPORT - void *dummy; - void **exit_runtime_p; - ompt_task_id_t my_task_id; - ompt_parallel_id_t my_parallel_id; - - if (ompt_enabled) { - exit_runtime_p = &(team->t.t_implicit_task_taskdata[tid]. - ompt_task_info.frame.exit_runtime_frame); - } else { - exit_runtime_p = &dummy; - } - -#if OMPT_TRACE - my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id; - my_parallel_id = team->t.ompt_team_info.parallel_id; - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) { - ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)( - my_parallel_id, my_task_id); - } -#endif -#endif - - { - KMP_TIME_PARTITIONED_BLOCK(OMP_parallel); - KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK); - rc = __kmp_invoke_microtask( (microtask_t) TCR_SYNC_PTR(team->t.t_pkfn), - gtid, tid, (int) team->t.t_argc, (void **) team->t.t_argv -#if OMPT_SUPPORT - , exit_runtime_p -#endif - ); -#if OMPT_SUPPORT - *exit_runtime_p = NULL; -#endif - } - -#if USE_ITT_BUILD - if ( __itt_stack_caller_create_ptr ) { - __kmp_itt_stack_callee_leave( (__itt_caller)team->t.t_stack_id ); // inform ittnotify about leaving user's code - } -#endif /* USE_ITT_BUILD */ - __kmp_run_after_invoked_task( gtid, tid, this_thr, team ); - - return rc; -} - -#if OMP_40_ENABLED -void -__kmp_teams_master( int gtid ) -{ - // This routine is called by all master threads in teams construct - kmp_info_t *thr = __kmp_threads[ gtid ]; - kmp_team_t *team = thr->th.th_team; - ident_t *loc = team->t.t_ident; - thr->th.th_set_nproc = thr->th.th_teams_size.nth; - KMP_DEBUG_ASSERT( thr->th.th_teams_microtask ); - KMP_DEBUG_ASSERT( thr->th.th_set_nproc ); - KA_TRACE( 20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", - gtid, __kmp_tid_from_gtid( gtid ), thr->th.th_teams_microtask ) ); - // Launch league of teams now, but not let workers execute - // (they hang on fork barrier until next parallel) -#if INCLUDE_SSC_MARKS - SSC_MARK_FORKING(); -#endif - __kmp_fork_call( loc, gtid, fork_context_intel, - team->t.t_argc, -#if OMPT_SUPPORT - (void *)thr->th.th_teams_microtask, // "unwrapped" task -#endif - (microtask_t)thr->th.th_teams_microtask, // "wrapped" task - VOLATILE_CAST(launch_t) __kmp_invoke_task_func, - NULL ); -#if INCLUDE_SSC_MARKS - SSC_MARK_JOINING(); -#endif - - // AC: last parameter "1" eliminates join barrier which won't work because - // worker threads are in a fork barrier waiting for more parallel regions - __kmp_join_call( loc, gtid -#if OMPT_SUPPORT - , fork_context_intel -#endif - , 1 ); -} - -int -__kmp_invoke_teams_master( int gtid ) -{ - kmp_info_t *this_thr = __kmp_threads[ gtid ]; - kmp_team_t *team = this_thr->th.th_team; - #if KMP_DEBUG - if ( !__kmp_threads[gtid]-> th.th_team->t.t_serialized ) - KMP_DEBUG_ASSERT( (void*)__kmp_threads[gtid]-> th.th_team->t.t_pkfn == (void*)__kmp_teams_master ); - #endif - __kmp_run_before_invoked_task( gtid, 0, this_thr, team ); - __kmp_teams_master( gtid ); - __kmp_run_after_invoked_task( gtid, 0, this_thr, team ); - return 1; -} -#endif /* OMP_40_ENABLED */ - -/* this sets the requested number of threads for the next parallel region - * encountered by this team */ -/* since this should be enclosed in the forkjoin critical section it - * should avoid race conditions with assymmetrical nested parallelism */ - -void -__kmp_push_num_threads( ident_t *id, int gtid, int num_threads ) -{ - kmp_info_t *thr = __kmp_threads[gtid]; - - if( num_threads > 0 ) - thr->th.th_set_nproc = num_threads; -} - -#if OMP_40_ENABLED - -/* this sets the requested number of teams for the teams region and/or - * the number of threads for the next parallel region encountered */ -void -__kmp_push_num_teams( ident_t *id, int gtid, int num_teams, int num_threads ) -{ - kmp_info_t *thr = __kmp_threads[gtid]; - KMP_DEBUG_ASSERT(num_teams >= 0); - KMP_DEBUG_ASSERT(num_threads >= 0); - - if( num_teams == 0 ) - num_teams = 1; // default number of teams is 1. - if( num_teams > __kmp_max_nth ) { // if too many teams requested? - if ( !__kmp_reserve_warn ) { - __kmp_reserve_warn = 1; - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantFormThrTeam, num_teams, __kmp_max_nth ), - KMP_HNT( Unset_ALL_THREADS ), - __kmp_msg_null - ); - } - num_teams = __kmp_max_nth; - } - // Set number of teams (number of threads in the outer "parallel" of the teams) - thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams; - - // Remember the number of threads for inner parallel regions - if( num_threads == 0 ) { - if( !TCR_4(__kmp_init_middle) ) - __kmp_middle_initialize(); // get __kmp_avail_proc calculated - num_threads = __kmp_avail_proc / num_teams; - if( num_teams * num_threads > __kmp_max_nth ) { - // adjust num_threads w/o warning as it is not user setting - num_threads = __kmp_max_nth / num_teams; - } - } else { - if( num_teams * num_threads > __kmp_max_nth ) { - int new_threads = __kmp_max_nth / num_teams; - if ( !__kmp_reserve_warn ) { // user asked for too many threads - __kmp_reserve_warn = 1; // that conflicts with OMP_THREAD_LIMIT - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantFormThrTeam, num_threads, new_threads ), - KMP_HNT( Unset_ALL_THREADS ), - __kmp_msg_null - ); - } - num_threads = new_threads; - } - } - thr->th.th_teams_size.nth = num_threads; -} - - -// -// Set the proc_bind var to use in the following parallel region. -// -void -__kmp_push_proc_bind( ident_t *id, int gtid, kmp_proc_bind_t proc_bind ) -{ - kmp_info_t *thr = __kmp_threads[gtid]; - thr->th.th_set_proc_bind = proc_bind; -} - -#endif /* OMP_40_ENABLED */ - -/* Launch the worker threads into the microtask. */ - -void -__kmp_internal_fork( ident_t *id, int gtid, kmp_team_t *team ) -{ - kmp_info_t *this_thr = __kmp_threads[gtid]; - -#ifdef KMP_DEBUG - int f; -#endif /* KMP_DEBUG */ - - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( this_thr->th.th_team == team ); - KMP_ASSERT( KMP_MASTER_GTID(gtid) ); - KMP_MB(); /* Flush all pending memory write invalidates. */ - - team->t.t_construct = 0; /* no single directives seen yet */ - team->t.t_ordered.dt.t_value = 0; /* thread 0 enters the ordered section first */ - - /* Reset the identifiers on the dispatch buffer */ - KMP_DEBUG_ASSERT( team->t.t_disp_buffer ); - if ( team->t.t_max_nproc > 1 ) { - int i; - for (i = 0; i < __kmp_dispatch_num_buffers; ++i) { - team->t.t_disp_buffer[ i ].buffer_index = i; -#if OMP_45_ENABLED - team->t.t_disp_buffer[i].doacross_buf_idx = i; -#endif - } - } else { - team->t.t_disp_buffer[ 0 ].buffer_index = 0; -#if OMP_45_ENABLED - team->t.t_disp_buffer[0].doacross_buf_idx = 0; -#endif - } - - KMP_MB(); /* Flush all pending memory write invalidates. */ - KMP_ASSERT( this_thr->th.th_team == team ); - -#ifdef KMP_DEBUG - for( f=0 ; ft.t_nproc ; f++ ) { - KMP_DEBUG_ASSERT( team->t.t_threads[f] && - team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc ); - } -#endif /* KMP_DEBUG */ - - /* release the worker threads so they may begin working */ - __kmp_fork_barrier( gtid, 0 ); -} - - -void -__kmp_internal_join( ident_t *id, int gtid, kmp_team_t *team ) -{ - kmp_info_t *this_thr = __kmp_threads[gtid]; - - KMP_DEBUG_ASSERT( team ); - KMP_DEBUG_ASSERT( this_thr->th.th_team == team ); - KMP_ASSERT( KMP_MASTER_GTID(gtid) ); - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* Join barrier after fork */ - -#ifdef KMP_DEBUG - if (__kmp_threads[gtid] && __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc ) { - __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n",gtid, gtid, __kmp_threads[gtid]); - __kmp_printf("__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, team->t.t_nproc=%d\n", - gtid, __kmp_threads[gtid]->th.th_team_nproc, team, team->t.t_nproc); - __kmp_print_structure(); - } - KMP_DEBUG_ASSERT( __kmp_threads[gtid] && - __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc ); -#endif /* KMP_DEBUG */ - - __kmp_join_barrier( gtid ); /* wait for everyone */ - - KMP_MB(); /* Flush all pending memory write invalidates. */ - KMP_ASSERT( this_thr->th.th_team == team ); -} - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#ifdef USE_LOAD_BALANCE - -// -// Return the worker threads actively spinning in the hot team, if we -// are at the outermost level of parallelism. Otherwise, return 0. -// -static int -__kmp_active_hot_team_nproc( kmp_root_t *root ) -{ - int i; - int retval; - kmp_team_t *hot_team; - - if ( root->r.r_active ) { - return 0; - } - hot_team = root->r.r_hot_team; - if ( __kmp_dflt_blocktime == KMP_MAX_BLOCKTIME ) { - return hot_team->t.t_nproc - 1; // Don't count master thread - } - - // - // Skip the master thread - it is accounted for elsewhere. - // - retval = 0; - for ( i = 1; i < hot_team->t.t_nproc; i++ ) { - if ( hot_team->t.t_threads[i]->th.th_active ) { - retval++; - } - } - return retval; -} - -// -// Perform an automatic adjustment to the number of -// threads used by the next parallel region. -// -static int -__kmp_load_balance_nproc( kmp_root_t *root, int set_nproc ) -{ - int retval; - int pool_active; - int hot_team_active; - int team_curr_active; - int system_active; - - KB_TRACE( 20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", - root, set_nproc ) ); - KMP_DEBUG_ASSERT( root ); - KMP_DEBUG_ASSERT( root->r.r_root_team->t.t_threads[0]->th.th_current_task->td_icvs.dynamic == TRUE ); - KMP_DEBUG_ASSERT( set_nproc > 1 ); - - if ( set_nproc == 1) { - KB_TRACE( 20, ("__kmp_load_balance_nproc: serial execution.\n" ) ); - return 1; - } - - // - // Threads that are active in the thread pool, active in the hot team - // for this particular root (if we are at the outer par level), and - // the currently executing thread (to become the master) are available - // to add to the new team, but are currently contributing to the system - // load, and must be accounted for. - // - pool_active = TCR_4(__kmp_thread_pool_active_nth); - hot_team_active = __kmp_active_hot_team_nproc( root ); - team_curr_active = pool_active + hot_team_active + 1; - - // - // Check the system load. - // - system_active = __kmp_get_load_balance( __kmp_avail_proc + team_curr_active ); - KB_TRACE( 30, ("__kmp_load_balance_nproc: system active = %d pool active = %d hot team active = %d\n", - system_active, pool_active, hot_team_active ) ); - - if ( system_active < 0 ) { - // - // There was an error reading the necessary info from /proc, - // so use the thread limit algorithm instead. Once we set - // __kmp_global.g.g_dynamic_mode = dynamic_thread_limit, - // we shouldn't wind up getting back here. - // - __kmp_global.g.g_dynamic_mode = dynamic_thread_limit; - KMP_WARNING( CantLoadBalUsing, "KMP_DYNAMIC_MODE=thread limit" ); - - // - // Make this call behave like the thread limit algorithm. - // - retval = __kmp_avail_proc - __kmp_nth + (root->r.r_active ? 1 - : root->r.r_hot_team->t.t_nproc); - if ( retval > set_nproc ) { - retval = set_nproc; - } - if ( retval < KMP_MIN_NTH ) { - retval = KMP_MIN_NTH; - } - - KB_TRACE( 20, ("__kmp_load_balance_nproc: thread limit exit. retval:%d\n", retval ) ); - return retval; - } - - // - // There is a slight delay in the load balance algorithm in detecting - // new running procs. The real system load at this instant should be - // at least as large as the #active omp thread that are available to - // add to the team. - // - if ( system_active < team_curr_active ) { - system_active = team_curr_active; - } - retval = __kmp_avail_proc - system_active + team_curr_active; - if ( retval > set_nproc ) { - retval = set_nproc; - } - if ( retval < KMP_MIN_NTH ) { - retval = KMP_MIN_NTH; - } - - KB_TRACE( 20, ("__kmp_load_balance_nproc: exit. retval:%d\n", retval ) ); - return retval; -} // __kmp_load_balance_nproc() - -#endif /* USE_LOAD_BALANCE */ - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* NOTE: this is called with the __kmp_init_lock held */ -void -__kmp_cleanup( void ) -{ - int f; - - KA_TRACE( 10, ("__kmp_cleanup: enter\n" ) ); - - if (TCR_4(__kmp_init_parallel)) { -#if KMP_HANDLE_SIGNALS - __kmp_remove_signals(); -#endif - TCW_4(__kmp_init_parallel, FALSE); - } - - if (TCR_4(__kmp_init_middle)) { -#if KMP_AFFINITY_SUPPORTED - __kmp_affinity_uninitialize(); -#endif /* KMP_AFFINITY_SUPPORTED */ - __kmp_cleanup_hierarchy(); - TCW_4(__kmp_init_middle, FALSE); - } - - KA_TRACE( 10, ("__kmp_cleanup: go serial cleanup\n" ) ); - - if (__kmp_init_serial) { - __kmp_runtime_destroy(); - __kmp_init_serial = FALSE; - } - - for ( f = 0; f < __kmp_threads_capacity; f++ ) { - if ( __kmp_root[ f ] != NULL ) { - __kmp_free( __kmp_root[ f ] ); - __kmp_root[ f ] = NULL; - } - } - __kmp_free( __kmp_threads ); - // __kmp_threads and __kmp_root were allocated at once, as single block, so there is no need in - // freeing __kmp_root. - __kmp_threads = NULL; - __kmp_root = NULL; - __kmp_threads_capacity = 0; - -#if KMP_USE_DYNAMIC_LOCK - __kmp_cleanup_indirect_user_locks(); -#else - __kmp_cleanup_user_locks(); -#endif - - #if KMP_AFFINITY_SUPPORTED - KMP_INTERNAL_FREE( (void *) __kmp_cpuinfo_file ); - __kmp_cpuinfo_file = NULL; - #endif /* KMP_AFFINITY_SUPPORTED */ - - #if KMP_USE_ADAPTIVE_LOCKS - #if KMP_DEBUG_ADAPTIVE_LOCKS - __kmp_print_speculative_stats(); - #endif - #endif - KMP_INTERNAL_FREE( __kmp_nested_nth.nth ); - __kmp_nested_nth.nth = NULL; - __kmp_nested_nth.size = 0; - __kmp_nested_nth.used = 0; - - __kmp_i18n_catclose(); - -#if KMP_STATS_ENABLED - __kmp_stats_fini(); -#endif - - KA_TRACE( 10, ("__kmp_cleanup: exit\n" ) ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -int -__kmp_ignore_mppbeg( void ) -{ - char *env; - - if ((env = getenv( "KMP_IGNORE_MPPBEG" )) != NULL) { - if (__kmp_str_match_false( env )) - return FALSE; - } - // By default __kmpc_begin() is no-op. - return TRUE; -} - -int -__kmp_ignore_mppend( void ) -{ - char *env; - - if ((env = getenv( "KMP_IGNORE_MPPEND" )) != NULL) { - if (__kmp_str_match_false( env )) - return FALSE; - } - // By default __kmpc_end() is no-op. - return TRUE; -} - -void -__kmp_internal_begin( void ) -{ - int gtid; - kmp_root_t *root; - - /* this is a very important step as it will register new sibling threads - * and assign these new uber threads a new gtid */ - gtid = __kmp_entry_gtid(); - root = __kmp_threads[ gtid ]->th.th_root; - KMP_ASSERT( KMP_UBER_GTID( gtid )); - - if( root->r.r_begin ) return; - __kmp_acquire_lock( &root->r.r_begin_lock, gtid ); - if( root->r.r_begin ) { - __kmp_release_lock( & root->r.r_begin_lock, gtid ); - return; - } - - root->r.r_begin = TRUE; - - __kmp_release_lock( & root->r.r_begin_lock, gtid ); -} - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_user_set_library (enum library_type arg) -{ - int gtid; - kmp_root_t *root; - kmp_info_t *thread; - - /* first, make sure we are initialized so we can get our gtid */ - - gtid = __kmp_entry_gtid(); - thread = __kmp_threads[ gtid ]; - - root = thread->th.th_root; - - KA_TRACE( 20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg, library_serial )); - if (root->r.r_in_parallel) { /* Must be called in serial section of top-level thread */ - KMP_WARNING( SetLibraryIncorrectCall ); - return; - } - - switch ( arg ) { - case library_serial : - thread->th.th_set_nproc = 0; - set__nproc( thread, 1 ); - break; - case library_turnaround : - thread->th.th_set_nproc = 0; - set__nproc( thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth : __kmp_dflt_team_nth_ub ); - break; - case library_throughput : - thread->th.th_set_nproc = 0; - set__nproc( thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth : __kmp_dflt_team_nth_ub ); - break; - default: - KMP_FATAL( UnknownLibraryType, arg ); - } - - __kmp_aux_set_library ( arg ); -} - -void -__kmp_aux_set_stacksize( size_t arg ) -{ - if (! __kmp_init_serial) - __kmp_serial_initialize(); - -#if KMP_OS_DARWIN - if (arg & (0x1000 - 1)) { - arg &= ~(0x1000 - 1); - if(arg + 0x1000) /* check for overflow if we round up */ - arg += 0x1000; - } -#endif - __kmp_acquire_bootstrap_lock( &__kmp_initz_lock ); - - /* only change the default stacksize before the first parallel region */ - if (! TCR_4(__kmp_init_parallel)) { - size_t value = arg; /* argument is in bytes */ - - if (value < __kmp_sys_min_stksize ) - value = __kmp_sys_min_stksize ; - else if (value > KMP_MAX_STKSIZE) - value = KMP_MAX_STKSIZE; - - __kmp_stksize = value; - - __kmp_env_stksize = TRUE; /* was KMP_STACKSIZE specified? */ - } - - __kmp_release_bootstrap_lock( &__kmp_initz_lock ); -} - -/* set the behaviour of the runtime library */ -/* TODO this can cause some odd behaviour with sibling parallelism... */ -void -__kmp_aux_set_library (enum library_type arg) -{ - __kmp_library = arg; - - switch ( __kmp_library ) { - case library_serial : - { - KMP_INFORM( LibraryIsSerial ); - (void) __kmp_change_library( TRUE ); - } - break; - case library_turnaround : - (void) __kmp_change_library( TRUE ); - break; - case library_throughput : - (void) __kmp_change_library( FALSE ); - break; - default: - KMP_FATAL( UnknownLibraryType, arg ); - } -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_aux_set_blocktime (int arg, kmp_info_t *thread, int tid) -{ - int blocktime = arg; /* argument is in milliseconds */ -#if KMP_USE_MONITOR - int bt_intervals; -#endif - int bt_set; - - __kmp_save_internal_controls( thread ); - - /* Normalize and set blocktime for the teams */ - if (blocktime < KMP_MIN_BLOCKTIME) - blocktime = KMP_MIN_BLOCKTIME; - else if (blocktime > KMP_MAX_BLOCKTIME) - blocktime = KMP_MAX_BLOCKTIME; - - set__blocktime_team( thread->th.th_team, tid, blocktime ); - set__blocktime_team( thread->th.th_serial_team, 0, blocktime ); - -#if KMP_USE_MONITOR - /* Calculate and set blocktime intervals for the teams */ - bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups); - - set__bt_intervals_team( thread->th.th_team, tid, bt_intervals ); - set__bt_intervals_team( thread->th.th_serial_team, 0, bt_intervals ); -#endif - - /* Set whether blocktime has been set to "TRUE" */ - bt_set = TRUE; - - set__bt_set_team( thread->th.th_team, tid, bt_set ); - set__bt_set_team( thread->th.th_serial_team, 0, bt_set ); -#if KMP_USE_MONITOR - KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, " - "bt_intervals=%d, monitor_updates=%d\n", - __kmp_gtid_from_tid(tid, thread->th.th_team), - thread->th.th_team->t.t_id, tid, blocktime, bt_intervals, - __kmp_monitor_wakeups)); -#else - KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n", - __kmp_gtid_from_tid(tid, thread->th.th_team), - thread->th.th_team->t.t_id, tid, blocktime)); -#endif -} - -void -__kmp_aux_set_defaults( - char const * str, - int len -) { - if ( ! __kmp_init_serial ) { - __kmp_serial_initialize(); - }; - __kmp_env_initialize( str ); - - if (__kmp_settings -#if OMP_40_ENABLED - || __kmp_display_env || __kmp_display_env_verbose -#endif // OMP_40_ENABLED - ) { - __kmp_env_print(); - } -} // __kmp_aux_set_defaults - -/* ------------------------------------------------------------------------ */ - -/* - * internal fast reduction routines - */ - -PACKED_REDUCTION_METHOD_T -__kmp_determine_reduction_method( ident_t *loc, kmp_int32 global_tid, - kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data), - kmp_critical_name *lck ) -{ - - // Default reduction method: critical construct ( lck != NULL, like in current PAROPT ) - // If ( reduce_data!=NULL && reduce_func!=NULL ): the tree-reduction method can be selected by RTL - // If loc->flags contains KMP_IDENT_ATOMIC_REDUCE, the atomic reduce method can be selected by RTL - // Finally, it's up to OpenMP RTL to make a decision on which method to select among generated by PAROPT. - - PACKED_REDUCTION_METHOD_T retval; - - int team_size; - - KMP_DEBUG_ASSERT( loc ); // it would be nice to test ( loc != 0 ) - KMP_DEBUG_ASSERT( lck ); // it would be nice to test ( lck != 0 ) - - #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED ( ( loc->flags & ( KMP_IDENT_ATOMIC_REDUCE ) ) == ( KMP_IDENT_ATOMIC_REDUCE ) ) - #define FAST_REDUCTION_TREE_METHOD_GENERATED ( ( reduce_data ) && ( reduce_func ) ) - - retval = critical_reduce_block; - - team_size = __kmp_get_team_num_threads( global_tid ); // another choice of getting a team size ( with 1 dynamic deference ) is slower - - if( team_size == 1 ) { - - retval = empty_reduce_block; - - } else { - - int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED; - int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED; - - #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64 - - #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN - - int teamsize_cutoff = 4; - -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) { - teamsize_cutoff = 8; - } -#endif - if( tree_available ) { - if( team_size <= teamsize_cutoff ) { - if ( atomic_available ) { - retval = atomic_reduce_block; - } - } else { - retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER; - } - } else if ( atomic_available ) { - retval = atomic_reduce_block; - } - #else - #error "Unknown or unsupported OS" - #endif // KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN - - #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS - - #if KMP_OS_LINUX || KMP_OS_WINDOWS - - // basic tuning - - if( atomic_available ) { - if( num_vars <= 2 ) { // && ( team_size <= 8 ) due to false-sharing ??? - retval = atomic_reduce_block; - } - } // otherwise: use critical section - - #elif KMP_OS_DARWIN - - if( atomic_available && ( num_vars <= 3 ) ) { - retval = atomic_reduce_block; - } else if( tree_available ) { - if( ( reduce_size > ( 9 * sizeof( kmp_real64 ) ) ) && ( reduce_size < ( 2000 * sizeof( kmp_real64 ) ) ) ) { - retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER; - } - } // otherwise: use critical section - - #else - #error "Unknown or unsupported OS" - #endif - - #else - #error "Unknown or unsupported architecture" - #endif - - } - - // KMP_FORCE_REDUCTION - - // If the team is serialized (team_size == 1), ignore the forced reduction - // method and stay with the unsynchronized method (empty_reduce_block) - if( __kmp_force_reduction_method != reduction_method_not_defined && team_size != 1) { - - PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block; - - int atomic_available, tree_available; - - switch( ( forced_retval = __kmp_force_reduction_method ) ) - { - case critical_reduce_block: - KMP_ASSERT( lck ); // lck should be != 0 - break; - - case atomic_reduce_block: - atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED; - if( ! atomic_available ) { - KMP_WARNING(RedMethodNotSupported, "atomic"); - forced_retval = critical_reduce_block; - } - break; - - case tree_reduce_block: - tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED; - if( ! tree_available ) { - KMP_WARNING(RedMethodNotSupported, "tree"); - forced_retval = critical_reduce_block; - } else { - #if KMP_FAST_REDUCTION_BARRIER - forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER; - #endif - } - break; - - default: - KMP_ASSERT( 0 ); // "unsupported method specified" - } - - retval = forced_retval; - } - - KA_TRACE(10, ( "reduction method selected=%08x\n", retval ) ); - - #undef FAST_REDUCTION_TREE_METHOD_GENERATED - #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED - - return ( retval ); -} - -// this function is for testing set/get/determine reduce method -kmp_int32 -__kmp_get_reduce_method( void ) { - return ( ( __kmp_entry_thread()->th.th_local.packed_reduction_method ) >> 8 ); -} - -/* ------------------------------------------------------------------------ */ Index: runtime/src/kmp_runtime.cpp =================================================================== --- runtime/src/kmp_runtime.cpp +++ runtime/src/kmp_runtime.cpp @@ -1,5 +1,5 @@ /* - * kmp_runtime.c -- KPTS runtime support library + * kmp_runtime.cpp -- KPTS runtime support library */ Index: runtime/src/kmp_sched.cpp =================================================================== --- runtime/src/kmp_sched.cpp +++ runtime/src/kmp_sched.cpp @@ -1,5 +1,5 @@ /* - * kmp_sched.c -- static scheduling -- iteration initialization + * kmp_sched.cpp -- static scheduling -- iteration initialization */ Index: runtime/src/kmp_settings.c =================================================================== --- runtime/src/kmp_settings.c +++ runtime/src/kmp_settings.c @@ -1,5631 +0,0 @@ -/* - * kmp_settings.c -- Initialize environment variables - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_wrapper_getpid.h" -#include "kmp_environment.h" -#include "kmp_atomic.h" -#include "kmp_itt.h" -#include "kmp_str.h" -#include "kmp_settings.h" -#include "kmp_i18n.h" -#include "kmp_lock.h" -#include "kmp_io.h" -#include "kmp_affinity.h" - -static int __kmp_env_toPrint( char const * name, int flag ); - -bool __kmp_env_format = 0; // 0 - old format; 1 - new format -// ------------------------------------------------------------------------------------------------- -// Helper string functions. Subject to move to kmp_str. -// ------------------------------------------------------------------------------------------------- - -static double -__kmp_convert_to_double( char const * s ) -{ - double result; - - if ( KMP_SSCANF( s, "%lf", &result ) < 1 ) { - result = 0.0; - } - - return result; -} - -#ifdef KMP_DEBUG -static unsigned int -__kmp_readstr_with_sentinel(char *dest, char const * src, size_t len, char sentinel) { - unsigned int i; - for (i = 0; i < len; i++) { - if ((*src == '\0') || (*src == sentinel)) { - break; - } - *(dest++) = *(src++); - } - *dest = '\0'; - return i; -} -#endif - -static int -__kmp_match_with_sentinel( char const * a, char const * b, size_t len, char sentinel ) { - size_t l = 0; - - if(a == NULL) - a = ""; - if(b == NULL) - b = ""; - while(*a && *b && *b != sentinel) { - char ca = *a, cb = *b; - - if(ca >= 'a' && ca <= 'z') - ca -= 'a' - 'A'; - if(cb >= 'a' && cb <= 'z') - cb -= 'a' - 'A'; - if(ca != cb) - return FALSE; - ++l; - ++a; - ++b; - } - return l >= len; -} - -// -// Expected usage: -// token is the token to check for. -// buf is the string being parsed. -// *end returns the char after the end of the token. -// it is not modified unless a match occurs. -// -// -// Example 1: -// -// if (__kmp_match_str("token", buf, *end) { -// -// buf = end; -// } -// -// Example 2: -// -// if (__kmp_match_str("token", buf, *end) { -// char *save = **end; -// **end = sentinel; -// -// **end = save; -// buf = end; -// } -// - -static int -__kmp_match_str( char const *token, char const *buf, const char **end) { - - KMP_ASSERT(token != NULL); - KMP_ASSERT(buf != NULL); - KMP_ASSERT(end != NULL); - - while (*token && *buf) { - char ct = *token, cb = *buf; - - if(ct >= 'a' && ct <= 'z') - ct -= 'a' - 'A'; - if(cb >= 'a' && cb <= 'z') - cb -= 'a' - 'A'; - if (ct != cb) - return FALSE; - ++token; - ++buf; - } - if (*token) { - return FALSE; - } - *end = buf; - return TRUE; -} - - -static size_t -__kmp_round4k( size_t size ) { - size_t _4k = 4 * 1024; - if ( size & ( _4k - 1 ) ) { - size &= ~ ( _4k - 1 ); - if ( size <= KMP_SIZE_T_MAX - _4k ) { - size += _4k; // Round up if there is no overflow. - }; // if - }; // if - return size; -} // __kmp_round4k - - -/* - Here, multipliers are like __kmp_convert_to_seconds, but floating-point - values are allowed, and the return value is in milliseconds. The default - multiplier is milliseconds. Returns INT_MAX only if the value specified - matches "infinit*". Returns -1 if specified string is invalid. -*/ -int -__kmp_convert_to_milliseconds( char const * data ) -{ - int ret, nvalues, factor; - char mult, extra; - double value; - - if (data == NULL) return (-1); - if ( __kmp_str_match( "infinit", -1, data)) return (INT_MAX); - value = (double) 0.0; - mult = '\0'; - nvalues = KMP_SSCANF (data, "%lf%c%c", &value, &mult, &extra); - if (nvalues < 1) return (-1); - if (nvalues == 1) mult = '\0'; - if (nvalues == 3) return (-1); - - if (value < 0) return (-1); - - switch (mult) { - case '\0': - /* default is milliseconds */ - factor = 1; - break; - case 's': case 'S': - factor = 1000; - break; - case 'm': case 'M': - factor = 1000 * 60; - break; - case 'h': case 'H': - factor = 1000 * 60 * 60; - break; - case 'd': case 'D': - factor = 1000 * 24 * 60 * 60; - break; - default: - return (-1); - } - - if ( value >= ( (INT_MAX-1) / factor) ) - ret = INT_MAX-1; /* Don't allow infinite value here */ - else - ret = (int) (value * (double) factor); /* truncate to int */ - - return ret; -} - - -static int -__kmp_strcasecmp_with_sentinel( char const * a, char const * b, char sentinel ) { - if(a == NULL) - a = ""; - if(b == NULL) - b = ""; - while(*a && *b && *b != sentinel) { - char ca = *a, cb = *b; - - if(ca >= 'a' && ca <= 'z') - ca -= 'a' - 'A'; - if(cb >= 'a' && cb <= 'z') - cb -= 'a' - 'A'; - if(ca != cb) - return (int)(unsigned char)*a - (int)(unsigned char)*b; - ++a; - ++b; - } - return *a ? - (*b && *b != sentinel) ? (int)(unsigned char)*a - (int)(unsigned char)*b : 1 : - (*b && *b != sentinel) ? -1 : 0; -} - - -// ================================================================================================= -// Table structures and helper functions. -// ================================================================================================= - -typedef struct __kmp_setting kmp_setting_t; -typedef struct __kmp_stg_ss_data kmp_stg_ss_data_t; -typedef struct __kmp_stg_wp_data kmp_stg_wp_data_t; -typedef struct __kmp_stg_fr_data kmp_stg_fr_data_t; - -typedef void ( * kmp_stg_parse_func_t )( char const * name, char const * value, void * data ); -typedef void ( * kmp_stg_print_func_t )( kmp_str_buf_t * buffer, char const * name, void * data ); - -struct __kmp_setting { - char const * name; // Name of setting (environment variable). - kmp_stg_parse_func_t parse; // Parser function. - kmp_stg_print_func_t print; // Print function. - void * data; // Data passed to parser and printer. - int set; // Variable set during this "session" - // (__kmp_env_initialize() or kmp_set_defaults() call). - int defined; // Variable set in any "session". -}; // struct __kmp_setting - -struct __kmp_stg_ss_data { - size_t factor; // Default factor: 1 for KMP_STACKSIZE, 1024 for others. - kmp_setting_t * * rivals; // Array of pointers to rivals (including itself). -}; // struct __kmp_stg_ss_data - -struct __kmp_stg_wp_data { - int omp; // 0 -- KMP_LIBRARY, 1 -- OMP_WAIT_POLICY. - kmp_setting_t * * rivals; // Array of pointers to rivals (including itself). -}; // struct __kmp_stg_wp_data - -struct __kmp_stg_fr_data { - int force; // 0 -- KMP_DETERMINISTIC_REDUCTION, 1 -- KMP_FORCE_REDUCTION. - kmp_setting_t * * rivals; // Array of pointers to rivals (including itself). -}; // struct __kmp_stg_fr_data - -static int -__kmp_stg_check_rivals( // 0 -- Ok, 1 -- errors found. - char const * name, // Name of variable. - char const * value, // Value of the variable. - kmp_setting_t * * rivals // List of rival settings (the list must include current one). -); - - -// ------------------------------------------------------------------------------------------------- -// Helper parse functions. -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_bool( - char const * name, - char const * value, - int * out -) { - if ( __kmp_str_match_true( value ) ) { - * out = TRUE; - } else if (__kmp_str_match_false( value ) ) { - * out = FALSE; - } else { - __kmp_msg( - kmp_ms_warning, - KMP_MSG( BadBoolValue, name, value ), - KMP_HNT( ValidBoolValues ), - __kmp_msg_null - ); - }; // if -} // __kmp_stg_parse_bool - -static void -__kmp_stg_parse_size( - char const * name, - char const * value, - size_t size_min, - size_t size_max, - int * is_specified, - size_t * out, - size_t factor -) { - char const * msg = NULL; - #if KMP_OS_DARWIN - size_min = __kmp_round4k( size_min ); - size_max = __kmp_round4k( size_max ); - #endif // KMP_OS_DARWIN - if ( value ) { - if ( is_specified != NULL ) { - * is_specified = 1; - }; // if - __kmp_str_to_size( value, out, factor, & msg ); - if ( msg == NULL ) { - if ( * out > size_max ) { - * out = size_max; - msg = KMP_I18N_STR( ValueTooLarge ); - } else if ( * out < size_min ) { - * out = size_min; - msg = KMP_I18N_STR( ValueTooSmall ); - } else { - #if KMP_OS_DARWIN - size_t round4k = __kmp_round4k( * out ); - if ( * out != round4k ) { - * out = round4k; - msg = KMP_I18N_STR( NotMultiple4K ); - }; // if - #endif - }; // if - } else { - // If integer overflow occurred, * out == KMP_SIZE_T_MAX. Cut it to size_max silently. - if ( * out < size_min ) { - * out = size_max; - } - else if ( * out > size_max ) { - * out = size_max; - }; // if - }; // if - if ( msg != NULL ) { - // Message is not empty. Print warning. - kmp_str_buf_t buf; - __kmp_str_buf_init( & buf ); - __kmp_str_buf_print_size( & buf, * out ); - KMP_WARNING( ParseSizeIntWarn, name, value, msg ); - KMP_INFORM( Using_str_Value, name, buf.str ); - __kmp_str_buf_free( & buf ); - }; // if - }; // if -} // __kmp_stg_parse_size - -#if KMP_AFFINITY_SUPPORTED -static void -__kmp_stg_parse_str( - char const * name, - char const * value, - char const * * out -) { - __kmp_str_free(out); - * out = __kmp_str_format( "%s", value ); -} // __kmp_stg_parse_str -#endif - -static void -__kmp_stg_parse_int( - char const * name, // I: Name of environment variable (used in warning messages). - char const * value, // I: Value of environment variable to parse. - int min, // I: Miminal allowed value. - int max, // I: Maximum allowed value. - int * out // O: Output (parsed) value. -) { - char const * msg = NULL; - kmp_uint64 uint = * out; - __kmp_str_to_uint( value, & uint, & msg ); - if ( msg == NULL ) { - if ( uint < (unsigned int)min ) { - msg = KMP_I18N_STR( ValueTooSmall ); - uint = min; - } else if ( uint > (unsigned int)max ) { - msg = KMP_I18N_STR( ValueTooLarge ); - uint = max; - }; // if - } else { - // If overflow occurred msg contains error message and uint is very big. Cut tmp it - // to INT_MAX. - if ( uint < (unsigned int)min ) { - uint = min; - } - else if ( uint > (unsigned int)max ) { - uint = max; - }; // if - }; // if - if ( msg != NULL ) { - // Message is not empty. Print warning. - kmp_str_buf_t buf; - KMP_WARNING( ParseSizeIntWarn, name, value, msg ); - __kmp_str_buf_init( & buf ); - __kmp_str_buf_print( &buf, "%" KMP_UINT64_SPEC "", uint ); - KMP_INFORM( Using_uint64_Value, name, buf.str ); - __kmp_str_buf_free( &buf ); - }; // if - * out = uint; -} // __kmp_stg_parse_int - - -#if KMP_DEBUG_ADAPTIVE_LOCKS -static void -__kmp_stg_parse_file( - char const * name, - char const * value, - char * suffix, - char * * out -) { - char buffer[256]; - char *t; - int hasSuffix; - __kmp_str_free(out); - t = (char *) strrchr(value, '.'); - hasSuffix = t && __kmp_str_eqf( t, suffix ); - t = __kmp_str_format( "%s%s", value, hasSuffix ? "" : suffix ); - __kmp_expand_file_name( buffer, sizeof(buffer), t); - __kmp_str_free(&t); - * out = __kmp_str_format( "%s", buffer ); -} // __kmp_stg_parse_file -#endif - -#ifdef KMP_DEBUG -static char * par_range_to_print = NULL; - -static void -__kmp_stg_parse_par_range( - char const * name, - char const * value, - int * out_range, - char * out_routine, - char * out_file, - int * out_lb, - int * out_ub -) { - size_t len = KMP_STRLEN( value + 1 ); - par_range_to_print = (char *) KMP_INTERNAL_MALLOC( len +1 ); - KMP_STRNCPY_S( par_range_to_print, len + 1, value, len + 1); - __kmp_par_range = +1; - __kmp_par_range_lb = 0; - __kmp_par_range_ub = INT_MAX; - for (;;) { - unsigned int len; - if (( value == NULL ) || ( *value == '\0' )) { - break; - } - if ( ! __kmp_strcasecmp_with_sentinel( "routine", value, '=' )) { - value = strchr( value, '=' ) + 1; - len = __kmp_readstr_with_sentinel( out_routine, - value, KMP_PAR_RANGE_ROUTINE_LEN - 1, ',' ); - if ( len == 0 ) { - goto par_range_error; - } - value = strchr( value, ',' ); - if ( value != NULL ) { - value++; - } - continue; - } - if ( ! __kmp_strcasecmp_with_sentinel( "filename", value, '=' )) { - value = strchr( value, '=' ) + 1; - len = __kmp_readstr_with_sentinel( out_file, - value, KMP_PAR_RANGE_FILENAME_LEN - 1, ',' ); - if ( len == 0) { - goto par_range_error; - } - value = strchr( value, ',' ); - if ( value != NULL ) { - value++; - } - continue; - } - if (( ! __kmp_strcasecmp_with_sentinel( "range", value, '=' )) - || ( ! __kmp_strcasecmp_with_sentinel( "incl_range", value, '=' ))) { - value = strchr( value, '=' ) + 1; - if ( KMP_SSCANF( value, "%d:%d", out_lb, out_ub ) != 2 ) { - goto par_range_error; - } - *out_range = +1; - value = strchr( value, ',' ); - if ( value != NULL ) { - value++; - } - continue; - } - if ( ! __kmp_strcasecmp_with_sentinel( "excl_range", value, '=' )) { - value = strchr( value, '=' ) + 1; - if ( KMP_SSCANF( value, "%d:%d", out_lb, out_ub) != 2 ) { - goto par_range_error; - } - *out_range = -1; - value = strchr( value, ',' ); - if ( value != NULL ) { - value++; - } - continue; - } - par_range_error: - KMP_WARNING( ParRangeSyntax, name ); - __kmp_par_range = 0; - break; - } -} // __kmp_stg_parse_par_range -#endif - -int -__kmp_initial_threads_capacity( int req_nproc ) -{ - int nth = 32; - - /* MIN( MAX( 32, 4 * $OMP_NUM_THREADS, 4 * omp_get_num_procs() ), __kmp_max_nth) */ - if (nth < (4 * req_nproc)) - nth = (4 * req_nproc); - if (nth < (4 * __kmp_xproc)) - nth = (4 * __kmp_xproc); - - if (nth > __kmp_max_nth) - nth = __kmp_max_nth; - - return nth; -} - - -int -__kmp_default_tp_capacity( int req_nproc, int max_nth, int all_threads_specified) { - int nth = 128; - - if(all_threads_specified) - return max_nth; - /* MIN( MAX (128, 4 * $OMP_NUM_THREADS, 4 * omp_get_num_procs() ), __kmp_max_nth ) */ - if (nth < (4 * req_nproc)) - nth = (4 * req_nproc); - if (nth < (4 * __kmp_xproc)) - nth = (4 * __kmp_xproc); - - if (nth > __kmp_max_nth) - nth = __kmp_max_nth; - - return nth; -} - - -// ------------------------------------------------------------------------------------------------- -// Helper print functions. -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_print_bool( kmp_str_buf_t * buffer, char const * name, int value ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_BOOL; - } else { - __kmp_str_buf_print( buffer, " %s=%s\n", name, value ? "true" : "false" ); - } -} // __kmp_stg_print_bool - -static void -__kmp_stg_print_int( kmp_str_buf_t * buffer, char const * name, int value ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_INT; - } else { - __kmp_str_buf_print( buffer, " %s=%d\n", name, value ); - } -} // __kmp_stg_print_int - -static void -__kmp_stg_print_uint64( kmp_str_buf_t * buffer, char const * name, kmp_uint64 value ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_UINT64; - } else { - __kmp_str_buf_print( buffer, " %s=%" KMP_UINT64_SPEC "\n", name, value ); - } -} // __kmp_stg_print_uint64 - -static void -__kmp_stg_print_str( kmp_str_buf_t * buffer, char const * name, char const * value ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_STR; - } else { - __kmp_str_buf_print( buffer, " %s=%s\n", name, value ); - } -} // __kmp_stg_print_str - -static void -__kmp_stg_print_size( kmp_str_buf_t * buffer, char const * name, size_t value ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - __kmp_str_buf_print_size( buffer, value ); - __kmp_str_buf_print( buffer, "'\n" ); - } else { - __kmp_str_buf_print( buffer, " %s=", name ); - __kmp_str_buf_print_size( buffer, value ); - __kmp_str_buf_print( buffer, "\n" ); - return; - } -} // __kmp_stg_print_size - - -// ================================================================================================= -// Parse and print functions. -// ================================================================================================= - -// ------------------------------------------------------------------------------------------------- -// KMP_ALL_THREADS, KMP_MAX_THREADS, OMP_THREAD_LIMIT -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_all_threads( char const * name, char const * value, void * data ) { - - kmp_setting_t * * rivals = (kmp_setting_t * *) data; - int rc; - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - }; // if - if ( ! __kmp_strcasecmp_with_sentinel( "all", value, 0 ) ) { - __kmp_max_nth = __kmp_xproc; - __kmp_allThreadsSpecified = 1; - } else { - __kmp_stg_parse_int( name, value, 1, __kmp_sys_max_nth, & __kmp_max_nth ); - __kmp_allThreadsSpecified = 0; - } - K_DIAG( 1, ( "__kmp_max_nth == %d\n", __kmp_max_nth ) ); - -} // __kmp_stg_parse_all_threads - -static void -__kmp_stg_print_all_threads( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_max_nth ); -} // __kmp_stg_print_all_threads - -// ------------------------------------------------------------------------------------------------- -// KMP_BLOCKTIME -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_blocktime( char const * name, char const * value, void * data ) { - __kmp_dflt_blocktime = __kmp_convert_to_milliseconds( value ); - if ( __kmp_dflt_blocktime < 0 ) { - __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; - __kmp_msg( kmp_ms_warning, KMP_MSG( InvalidValue, name, value ), __kmp_msg_null ); - KMP_INFORM( Using_int_Value, name, __kmp_dflt_blocktime ); - __kmp_env_blocktime = FALSE; // Revert to default as if var not set. - } else { - if ( __kmp_dflt_blocktime < KMP_MIN_BLOCKTIME ) { - __kmp_dflt_blocktime = KMP_MIN_BLOCKTIME; - __kmp_msg( kmp_ms_warning, KMP_MSG( SmallValue, name, value ), __kmp_msg_null ); - KMP_INFORM( MinValueUsing, name, __kmp_dflt_blocktime ); - } else if ( __kmp_dflt_blocktime > KMP_MAX_BLOCKTIME ) { - __kmp_dflt_blocktime = KMP_MAX_BLOCKTIME; - __kmp_msg( kmp_ms_warning, KMP_MSG( LargeValue, name, value ), __kmp_msg_null ); - KMP_INFORM( MaxValueUsing, name, __kmp_dflt_blocktime ); - }; // if - __kmp_env_blocktime = TRUE; // KMP_BLOCKTIME was specified. - }; // if -#if KMP_USE_MONITOR - // calculate number of monitor thread wakeup intervals corresponding to blocktime. - __kmp_monitor_wakeups = KMP_WAKEUPS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups ); - __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups ); -#endif - K_DIAG( 1, ( "__kmp_env_blocktime == %d\n", __kmp_env_blocktime ) ); - if ( __kmp_env_blocktime ) { - K_DIAG( 1, ( "__kmp_dflt_blocktime == %d\n", __kmp_dflt_blocktime ) ); - } -} // __kmp_stg_parse_blocktime - -static void -__kmp_stg_print_blocktime( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_dflt_blocktime ); -} // __kmp_stg_print_blocktime - -// ------------------------------------------------------------------------------------------------- -// KMP_DUPLICATE_LIB_OK -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_duplicate_lib_ok( char const * name, char const * value, void * data ) { - /* actually this variable is not supported, - put here for compatibility with earlier builds and for static/dynamic combination */ - __kmp_stg_parse_bool( name, value, & __kmp_duplicate_library_ok ); -} // __kmp_stg_parse_duplicate_lib_ok - -static void -__kmp_stg_print_duplicate_lib_ok( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_duplicate_library_ok ); -} // __kmp_stg_print_duplicate_lib_ok - -// ------------------------------------------------------------------------------------------------- -// KMP_INHERIT_FP_CONTROL -// ------------------------------------------------------------------------------------------------- - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -static void -__kmp_stg_parse_inherit_fp_control( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_inherit_fp_control ); -} // __kmp_stg_parse_inherit_fp_control - -static void -__kmp_stg_print_inherit_fp_control( kmp_str_buf_t * buffer, char const * name, void * data ) { -#if KMP_DEBUG - __kmp_stg_print_bool( buffer, name, __kmp_inherit_fp_control ); -#endif /* KMP_DEBUG */ -} // __kmp_stg_print_inherit_fp_control - -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -// ------------------------------------------------------------------------------------------------- -// KMP_LIBRARY, OMP_WAIT_POLICY -// ------------------------------------------------------------------------------------------------- - -static char const *blocktime_str = NULL; - -static void -__kmp_stg_parse_wait_policy( char const * name, char const * value, void * data ) { - - kmp_stg_wp_data_t * wait = (kmp_stg_wp_data_t *) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, wait->rivals ); - if ( rc ) { - return; - }; // if - - if ( wait->omp ) { - if ( __kmp_str_match( "ACTIVE", 1, value ) ) { - __kmp_library = library_turnaround; - if ( blocktime_str == NULL ) { - // KMP_BLOCKTIME not specified, so set default to "infinite". - __kmp_dflt_blocktime = KMP_MAX_BLOCKTIME; - } - } else if ( __kmp_str_match( "PASSIVE", 1, value ) ) { - __kmp_library = library_throughput; - if ( blocktime_str == NULL ) { - // KMP_BLOCKTIME not specified, so set default to 0. - __kmp_dflt_blocktime = 0; - } - } else { - KMP_WARNING( StgInvalidValue, name, value ); - }; // if - } else { - if ( __kmp_str_match( "serial", 1, value ) ) { /* S */ - __kmp_library = library_serial; - } else if ( __kmp_str_match( "throughput", 2, value ) ) { /* TH */ - __kmp_library = library_throughput; - } else if ( __kmp_str_match( "turnaround", 2, value ) ) { /* TU */ - __kmp_library = library_turnaround; - } else if ( __kmp_str_match( "dedicated", 1, value ) ) { /* D */ - __kmp_library = library_turnaround; - } else if ( __kmp_str_match( "multiuser", 1, value ) ) { /* M */ - __kmp_library = library_throughput; - } else { - KMP_WARNING( StgInvalidValue, name, value ); - }; // if - }; // if - __kmp_aux_set_library( __kmp_library ); - -} // __kmp_stg_parse_wait_policy - -static void -__kmp_stg_print_wait_policy( kmp_str_buf_t * buffer, char const * name, void * data ) { - - kmp_stg_wp_data_t * wait = (kmp_stg_wp_data_t *) data; - char const * value = NULL; - - if ( wait->omp ) { - switch ( __kmp_library ) { - case library_turnaround : { - value = "ACTIVE"; - } break; - case library_throughput : { - value = "PASSIVE"; - } break; - }; // switch - } else { - switch ( __kmp_library ) { - case library_serial : { - value = "serial"; - } break; - case library_turnaround : { - value = "turnaround"; - } break; - case library_throughput : { - value = "throughput"; - } break; - }; // switch - }; // if - if ( value != NULL ) { - __kmp_stg_print_str( buffer, name, value ); - }; // if - -} // __kmp_stg_print_wait_policy - -#if KMP_USE_MONITOR -// ------------------------------------------------------------------------------------------------- -// KMP_MONITOR_STACKSIZE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_monitor_stacksize( char const * name, char const * value, void * data ) { - __kmp_stg_parse_size( - name, - value, - __kmp_sys_min_stksize, - KMP_MAX_STKSIZE, - NULL, - & __kmp_monitor_stksize, - 1 - ); -} // __kmp_stg_parse_monitor_stacksize - -static void -__kmp_stg_print_monitor_stacksize( kmp_str_buf_t * buffer, char const * name, void * data ) { - if( __kmp_env_format ) { - if ( __kmp_monitor_stksize > 0 ) - KMP_STR_BUF_PRINT_NAME_EX(name); - else - KMP_STR_BUF_PRINT_NAME; - } else { - __kmp_str_buf_print( buffer, " %s", name ); - } - if ( __kmp_monitor_stksize > 0 ) { - __kmp_str_buf_print_size( buffer, __kmp_monitor_stksize ); - } else { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - if( __kmp_env_format && __kmp_monitor_stksize ) { - __kmp_str_buf_print( buffer, "'\n"); - } - -} // __kmp_stg_print_monitor_stacksize -#endif // KMP_USE_MONITOR - -// ------------------------------------------------------------------------------------------------- -// KMP_SETTINGS -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_settings( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_settings ); -} // __kmp_stg_parse_settings - -static void -__kmp_stg_print_settings( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_settings ); -} // __kmp_stg_print_settings - -// ------------------------------------------------------------------------------------------------- -// KMP_STACKPAD -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_stackpad( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( - name, // Env var name - value, // Env var value - KMP_MIN_STKPADDING, // Min value - KMP_MAX_STKPADDING, // Max value - & __kmp_stkpadding // Var to initialize - ); -} // __kmp_stg_parse_stackpad - -static void -__kmp_stg_print_stackpad( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_stkpadding ); -} // __kmp_stg_print_stackpad - -// ------------------------------------------------------------------------------------------------- -// KMP_STACKOFFSET -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_stackoffset( char const * name, char const * value, void * data ) { - __kmp_stg_parse_size( - name, // Env var name - value, // Env var value - KMP_MIN_STKOFFSET, // Min value - KMP_MAX_STKOFFSET, // Max value - NULL, // - & __kmp_stkoffset, // Var to initialize - 1 - ); -} // __kmp_stg_parse_stackoffset - -static void -__kmp_stg_print_stackoffset( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_size( buffer, name, __kmp_stkoffset ); -} // __kmp_stg_print_stackoffset - -// ------------------------------------------------------------------------------------------------- -// KMP_STACKSIZE, OMP_STACKSIZE, GOMP_STACKSIZE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_stacksize( char const * name, char const * value, void * data ) { - - kmp_stg_ss_data_t * stacksize = (kmp_stg_ss_data_t *) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, stacksize->rivals ); - if ( rc ) { - return; - }; // if - __kmp_stg_parse_size( - name, // Env var name - value, // Env var value - __kmp_sys_min_stksize, // Min value - KMP_MAX_STKSIZE, // Max value - & __kmp_env_stksize, // - & __kmp_stksize, // Var to initialize - stacksize->factor - ); - -} // __kmp_stg_parse_stacksize - -// This function is called for printing both KMP_STACKSIZE (factor is 1) and OMP_STACKSIZE (factor is 1024). -// Currently it is not possible to print OMP_STACKSIZE value in bytes. We can consider adding this -// possibility by a customer request in future. -static void -__kmp_stg_print_stacksize( kmp_str_buf_t * buffer, char const * name, void * data ) { - kmp_stg_ss_data_t * stacksize = (kmp_stg_ss_data_t *) data; - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - __kmp_str_buf_print_size( buffer, (__kmp_stksize % 1024) ? __kmp_stksize / stacksize->factor : __kmp_stksize ); - __kmp_str_buf_print( buffer, "'\n" ); - } else { - __kmp_str_buf_print( buffer, " %s=", name ); - __kmp_str_buf_print_size( buffer, (__kmp_stksize % 1024) ? __kmp_stksize / stacksize->factor : __kmp_stksize ); - __kmp_str_buf_print( buffer, "\n" ); - } -} // __kmp_stg_print_stacksize - -// ------------------------------------------------------------------------------------------------- -// KMP_VERSION -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_version( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_version ); -} // __kmp_stg_parse_version - -static void -__kmp_stg_print_version( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_version ); -} // __kmp_stg_print_version - -// ------------------------------------------------------------------------------------------------- -// KMP_WARNINGS -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_warnings( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_generate_warnings ); - if (__kmp_generate_warnings != kmp_warnings_off) { // AC: we have only 0/1 values documented, - __kmp_generate_warnings = kmp_warnings_explicit; // so reset it to explicit in order to - } // distinguish from default setting -} // __kmp_env_parse_warnings - -static void -__kmp_stg_print_warnings( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_generate_warnings ); // AC: TODO: change to print_int? -} // __kmp_env_print_warnings // (needs documentation change)... - -// ------------------------------------------------------------------------------------------------- -// OMP_NESTED, OMP_NUM_THREADS -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_nested( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_dflt_nested ); -} // __kmp_stg_parse_nested - -static void -__kmp_stg_print_nested( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_dflt_nested ); -} // __kmp_stg_print_nested - -static void -__kmp_parse_nested_num_threads( const char *var, const char *env, kmp_nested_nthreads_t *nth_array ) -{ - const char *next = env; - const char *scan = next; - - int total = 0; // Count elements that were set. It'll be used as an array size - int prev_comma = FALSE; // For correct processing sequential commas - - // Count the number of values in the env. var string - for ( ; ; ) { - SKIP_WS( next ); - - if ( *next == '\0' ) { - break; - } - // Next character is not an integer or not a comma => end of list - if ( ( ( *next < '0' ) || ( *next > '9' ) ) && ( *next !=',') ) { - KMP_WARNING( NthSyntaxError, var, env ); - return; - } - // The next character is ',' - if ( *next == ',' ) { - // ',' is the fisrt character - if ( total == 0 || prev_comma ) { - total++; - } - prev_comma = TRUE; - next++; //skip ',' - SKIP_WS( next ); - } - // Next character is a digit - if ( *next >= '0' && *next <= '9' ) { - prev_comma = FALSE; - SKIP_DIGITS( next ); - total++; - const char *tmp = next; - SKIP_WS( tmp ); - if ( ( *next == ' ' || *next == '\t' ) && ( *tmp >= '0' && *tmp <= '9' ) ) { - KMP_WARNING( NthSpacesNotAllowed, var, env ); - return; - } - } - } - KMP_DEBUG_ASSERT( total > 0 ); - if( total <= 0 ) { - KMP_WARNING( NthSyntaxError, var, env ); - return; - } - - // Check if the nested nthreads array exists - if ( ! nth_array->nth ) { - // Allocate an array of double size - nth_array->nth = ( int * )KMP_INTERNAL_MALLOC( sizeof( int ) * total * 2 ); - if ( nth_array->nth == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - } - nth_array->size = total * 2; - } else { - if ( nth_array->size < total ) { - // Increase the array size - do { - nth_array->size *= 2; - } while ( nth_array->size < total ); - - nth_array->nth = (int *) KMP_INTERNAL_REALLOC( - nth_array->nth, sizeof( int ) * nth_array->size ); - if ( nth_array->nth == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - } - } - } - nth_array->used = total; - int i = 0; - - prev_comma = FALSE; - total = 0; - // Save values in the array - for ( ; ; ) { - SKIP_WS( scan ); - if ( *scan == '\0' ) { - break; - } - // The next character is ',' - if ( *scan == ',' ) { - // ',' in the beginning of the list - if ( total == 0 ) { - // The value is supposed to be equal to __kmp_avail_proc but it is unknown at the moment. - // So let's put a placeholder (#threads = 0) to correct it later. - nth_array->nth[i++] = 0; - total++; - }else if ( prev_comma ) { - // Num threads is inherited from the previous level - nth_array->nth[i] = nth_array->nth[i - 1]; - i++; - total++; - } - prev_comma = TRUE; - scan++; //skip ',' - SKIP_WS( scan ); - } - // Next character is a digit - if ( *scan >= '0' && *scan <= '9' ) { - int num; - const char *buf = scan; - char const * msg = NULL; - prev_comma = FALSE; - SKIP_DIGITS( scan ); - total++; - - num = __kmp_str_to_int( buf, *scan ); - if ( num < KMP_MIN_NTH ) { - msg = KMP_I18N_STR( ValueTooSmall ); - num = KMP_MIN_NTH; - } else if ( num > __kmp_sys_max_nth ) { - msg = KMP_I18N_STR( ValueTooLarge ); - num = __kmp_sys_max_nth; - } - if ( msg != NULL ) { - // Message is not empty. Print warning. - KMP_WARNING( ParseSizeIntWarn, var, env, msg ); - KMP_INFORM( Using_int_Value, var, num ); - } - nth_array->nth[i++] = num; - } - } -} - -static void -__kmp_stg_parse_num_threads( char const * name, char const * value, void * data ) { - // TODO: Remove this option. OMP_NUM_THREADS is a list of positive integers! - if ( ! __kmp_strcasecmp_with_sentinel( "all", value, 0 ) ) { - // The array of 1 element - __kmp_nested_nth.nth = ( int* )KMP_INTERNAL_MALLOC( sizeof( int ) ); - __kmp_nested_nth.size = __kmp_nested_nth.used = 1; - __kmp_nested_nth.nth[0] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub = __kmp_xproc; - } else { - __kmp_parse_nested_num_threads( name, value, & __kmp_nested_nth ); - if ( __kmp_nested_nth.nth ) { - __kmp_dflt_team_nth = __kmp_nested_nth.nth[0]; - if ( __kmp_dflt_team_nth_ub < __kmp_dflt_team_nth ) { - __kmp_dflt_team_nth_ub = __kmp_dflt_team_nth; - } - } - }; // if - K_DIAG( 1, ( "__kmp_dflt_team_nth == %d\n", __kmp_dflt_team_nth ) ); -} // __kmp_stg_parse_num_threads - -static void -__kmp_stg_print_num_threads( kmp_str_buf_t * buffer, char const * name, void * data ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME; - } else { - __kmp_str_buf_print( buffer, " %s", name ); - } - if ( __kmp_nested_nth.used ) { - kmp_str_buf_t buf; - __kmp_str_buf_init( &buf ); - for ( int i = 0; i < __kmp_nested_nth.used; i++) { - __kmp_str_buf_print( &buf, "%d", __kmp_nested_nth.nth[i] ); - if ( i < __kmp_nested_nth.used - 1 ) { - __kmp_str_buf_print( &buf, "," ); - } - } - __kmp_str_buf_print( buffer, "='%s'\n", buf.str ); - __kmp_str_buf_free(&buf); - } else { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } -} // __kmp_stg_print_num_threads - -// ------------------------------------------------------------------------------------------------- -// OpenMP 3.0: KMP_TASKING, OMP_MAX_ACTIVE_LEVELS, -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_tasking( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, (int)tskm_max, (int *)&__kmp_tasking_mode ); -} // __kmp_stg_parse_tasking - -static void -__kmp_stg_print_tasking( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_tasking_mode ); -} // __kmp_stg_print_tasking - -static void -__kmp_stg_parse_task_stealing( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, 1, (int *)&__kmp_task_stealing_constraint ); -} // __kmp_stg_parse_task_stealing - -static void -__kmp_stg_print_task_stealing( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_task_stealing_constraint ); -} // __kmp_stg_print_task_stealing - -static void -__kmp_stg_parse_max_active_levels( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, KMP_MAX_ACTIVE_LEVELS_LIMIT, & __kmp_dflt_max_active_levels ); -} // __kmp_stg_parse_max_active_levels - -static void -__kmp_stg_print_max_active_levels( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_dflt_max_active_levels ); -} // __kmp_stg_print_max_active_levels - -#if OMP_40_ENABLED -// ------------------------------------------------------------------------------------------------- -// OpenMP 4.0: OMP_DEFAULT_DEVICE -// ------------------------------------------------------------------------------------------------- -static void __kmp_stg_parse_default_device(char const *name, char const *value, void *data) { - __kmp_stg_parse_int(name, value, 0, KMP_MAX_DEFAULT_DEVICE_LIMIT, &__kmp_default_device); -} // __kmp_stg_parse_default_device - -static void __kmp_stg_print_default_device(kmp_str_buf_t *buffer, char const *name, void *data) { - __kmp_stg_print_int(buffer, name, __kmp_default_device); -} // __kmp_stg_print_default_device -#endif - -#if OMP_45_ENABLED -// ------------------------------------------------------------------------------------------------- -// OpenMP 4.5: OMP_MAX_TASK_PRIORITY -// ------------------------------------------------------------------------------------------------- -static void -__kmp_stg_parse_max_task_priority(char const *name, char const *value, void *data) { - __kmp_stg_parse_int(name, value, 0, KMP_MAX_TASK_PRIORITY_LIMIT, &__kmp_max_task_priority); -} // __kmp_stg_parse_max_task_priority - -static void -__kmp_stg_print_max_task_priority(kmp_str_buf_t *buffer, char const *name, void *data) { - __kmp_stg_print_int(buffer, name, __kmp_max_task_priority); -} // __kmp_stg_print_max_task_priority -#endif // OMP_45_ENABLED - -// ------------------------------------------------------------------------------------------------- -// KMP_DISP_NUM_BUFFERS -// ------------------------------------------------------------------------------------------------- -static void -__kmp_stg_parse_disp_buffers( char const * name, char const * value, void * data ) { - if ( TCR_4(__kmp_init_serial) ) { - KMP_WARNING( EnvSerialWarn, name ); - return; - } // read value before serial initialization only - __kmp_stg_parse_int( name, value, 1, KMP_MAX_NTH, & __kmp_dispatch_num_buffers ); -} // __kmp_stg_parse_disp_buffers - -static void -__kmp_stg_print_disp_buffers( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_dispatch_num_buffers ); -} // __kmp_stg_print_disp_buffers - -#if KMP_NESTED_HOT_TEAMS -// ------------------------------------------------------------------------------------------------- -// KMP_HOT_TEAMS_MAX_LEVEL, KMP_HOT_TEAMS_MODE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_hot_teams_level( char const * name, char const * value, void * data ) { - if ( TCR_4(__kmp_init_parallel) ) { - KMP_WARNING( EnvParallelWarn, name ); - return; - } // read value before first parallel only - __kmp_stg_parse_int( name, value, 0, KMP_MAX_ACTIVE_LEVELS_LIMIT, & __kmp_hot_teams_max_level ); -} // __kmp_stg_parse_hot_teams_level - -static void -__kmp_stg_print_hot_teams_level( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_hot_teams_max_level ); -} // __kmp_stg_print_hot_teams_level - -static void -__kmp_stg_parse_hot_teams_mode( char const * name, char const * value, void * data ) { - if ( TCR_4(__kmp_init_parallel) ) { - KMP_WARNING( EnvParallelWarn, name ); - return; - } // read value before first parallel only - __kmp_stg_parse_int( name, value, 0, KMP_MAX_ACTIVE_LEVELS_LIMIT, & __kmp_hot_teams_mode ); -} // __kmp_stg_parse_hot_teams_mode - -static void -__kmp_stg_print_hot_teams_mode( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_hot_teams_mode ); -} // __kmp_stg_print_hot_teams_mode - -#endif // KMP_NESTED_HOT_TEAMS - -// ------------------------------------------------------------------------------------------------- -// KMP_HANDLE_SIGNALS -// ------------------------------------------------------------------------------------------------- - -#if KMP_HANDLE_SIGNALS - -static void -__kmp_stg_parse_handle_signals( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_handle_signals ); -} // __kmp_stg_parse_handle_signals - -static void -__kmp_stg_print_handle_signals( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_handle_signals ); -} // __kmp_stg_print_handle_signals - -#endif // KMP_HANDLE_SIGNALS - -// ------------------------------------------------------------------------------------------------- -// KMP_X_DEBUG, KMP_DEBUG, KMP_DEBUG_BUF_*, KMP_DIAG -// ------------------------------------------------------------------------------------------------- - -#ifdef KMP_DEBUG - -#define KMP_STG_X_DEBUG( x ) \ - static void __kmp_stg_parse_##x##_debug( char const * name, char const * value, void * data ) { \ - __kmp_stg_parse_int( name, value, 0, INT_MAX, & kmp_##x##_debug ); \ - } /* __kmp_stg_parse_x_debug */ \ - static void __kmp_stg_print_##x##_debug( kmp_str_buf_t * buffer, char const * name, void * data ) { \ - __kmp_stg_print_int( buffer, name, kmp_##x##_debug ); \ - } /* __kmp_stg_print_x_debug */ - -KMP_STG_X_DEBUG( a ) -KMP_STG_X_DEBUG( b ) -KMP_STG_X_DEBUG( c ) -KMP_STG_X_DEBUG( d ) -KMP_STG_X_DEBUG( e ) -KMP_STG_X_DEBUG( f ) - -#undef KMP_STG_X_DEBUG - -static void -__kmp_stg_parse_debug( char const * name, char const * value, void * data ) { - int debug = 0; - __kmp_stg_parse_int( name, value, 0, INT_MAX, & debug ); - if ( kmp_a_debug < debug ) { - kmp_a_debug = debug; - }; // if - if ( kmp_b_debug < debug ) { - kmp_b_debug = debug; - }; // if - if ( kmp_c_debug < debug ) { - kmp_c_debug = debug; - }; // if - if ( kmp_d_debug < debug ) { - kmp_d_debug = debug; - }; // if - if ( kmp_e_debug < debug ) { - kmp_e_debug = debug; - }; // if - if ( kmp_f_debug < debug ) { - kmp_f_debug = debug; - }; // if -} // __kmp_stg_parse_debug - -static void -__kmp_stg_parse_debug_buf( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_debug_buf ); - // !!! TODO: Move buffer initialization of of this file! It may works incorrectly if - // KMP_DEBUG_BUF is parsed before KMP_DEBUG_BUF_LINES or KMP_DEBUG_BUF_CHARS. - if ( __kmp_debug_buf ) { - int i; - int elements = __kmp_debug_buf_lines * __kmp_debug_buf_chars; - - /* allocate and initialize all entries in debug buffer to empty */ - __kmp_debug_buffer = (char *) __kmp_page_allocate( elements * sizeof( char ) ); - for ( i = 0; i < elements; i += __kmp_debug_buf_chars ) - __kmp_debug_buffer[i] = '\0'; - - __kmp_debug_count = 0; - } - K_DIAG( 1, ( "__kmp_debug_buf = %d\n", __kmp_debug_buf ) ); -} // __kmp_stg_parse_debug_buf - -static void -__kmp_stg_print_debug_buf( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_debug_buf ); -} // __kmp_stg_print_debug_buf - -static void -__kmp_stg_parse_debug_buf_atomic( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_debug_buf_atomic ); -} // __kmp_stg_parse_debug_buf_atomic - -static void -__kmp_stg_print_debug_buf_atomic( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_debug_buf_atomic ); -} // __kmp_stg_print_debug_buf_atomic - -static void -__kmp_stg_parse_debug_buf_chars( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( - name, - value, - KMP_DEBUG_BUF_CHARS_MIN, - INT_MAX, - & __kmp_debug_buf_chars - ); -} // __kmp_stg_debug_parse_buf_chars - -static void -__kmp_stg_print_debug_buf_chars( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_debug_buf_chars ); -} // __kmp_stg_print_debug_buf_chars - -static void -__kmp_stg_parse_debug_buf_lines( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( - name, - value, - KMP_DEBUG_BUF_LINES_MIN, - INT_MAX, - & __kmp_debug_buf_lines - ); -} // __kmp_stg_parse_debug_buf_lines - -static void -__kmp_stg_print_debug_buf_lines( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_debug_buf_lines ); -} // __kmp_stg_print_debug_buf_lines - -static void -__kmp_stg_parse_diag( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, INT_MAX, & kmp_diag ); -} // __kmp_stg_parse_diag - -static void -__kmp_stg_print_diag( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, kmp_diag ); -} // __kmp_stg_print_diag - -#endif // KMP_DEBUG - -// ------------------------------------------------------------------------------------------------- -// KMP_ALIGN_ALLOC -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_align_alloc( char const * name, char const * value, void * data ) { - __kmp_stg_parse_size( - name, - value, - CACHE_LINE, - INT_MAX, - NULL, - & __kmp_align_alloc, - 1 - ); -} // __kmp_stg_parse_align_alloc - -static void -__kmp_stg_print_align_alloc( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_size( buffer, name, __kmp_align_alloc ); -} // __kmp_stg_print_align_alloc - -// ------------------------------------------------------------------------------------------------- -// KMP_PLAIN_BARRIER, KMP_FORKJOIN_BARRIER, KMP_REDUCTION_BARRIER -// ------------------------------------------------------------------------------------------------- - -// TODO: Remove __kmp_barrier_branch_bit_env_name varibale, remove loops from parse and print -// functions, pass required info through data argument. - -static void -__kmp_stg_parse_barrier_branch_bit( char const * name, char const * value, void * data ) { - const char *var; - - /* ---------- Barrier branch bit control ------------ */ - for ( int i=bs_plain_barrier; i KMP_MAX_BRANCH_BITS ) { - __kmp_msg( kmp_ms_warning, KMP_MSG( BarrReleaseValueInvalid, name, comma + 1 ), __kmp_msg_null ); - __kmp_barrier_release_branch_bits[ i ] = __kmp_barrier_release_bb_dflt; - } - } - if ( __kmp_barrier_gather_branch_bits[ i ] > KMP_MAX_BRANCH_BITS ) { - KMP_WARNING( BarrGatherValueInvalid, name, value ); - KMP_INFORM( Using_uint_Value, name, __kmp_barrier_gather_bb_dflt ); - __kmp_barrier_gather_branch_bits[ i ] = __kmp_barrier_gather_bb_dflt; - } - } - K_DIAG(1, ("%s == %d,%d\n", __kmp_barrier_branch_bit_env_name[ i ], \ - __kmp_barrier_gather_branch_bits [ i ], \ - __kmp_barrier_release_branch_bits [ i ])) - } -} // __kmp_stg_parse_barrier_branch_bit - -static void -__kmp_stg_print_barrier_branch_bit( kmp_str_buf_t * buffer, char const * name, void * data ) { - const char *var; - for ( int i=bs_plain_barrier; irivals ); - if ( rc ) { - return; - }; // if - if ( reduction->force ) { - if( value != 0 ) { - if( __kmp_str_match( "critical", 0, value ) ) - __kmp_force_reduction_method = critical_reduce_block; - else if( __kmp_str_match( "atomic", 0, value ) ) - __kmp_force_reduction_method = atomic_reduce_block; - else if( __kmp_str_match( "tree", 0, value ) ) - __kmp_force_reduction_method = tree_reduce_block; - else { - KMP_FATAL( UnknownForceReduction, name, value ); - } - } - } else { - __kmp_stg_parse_bool( name, value, & __kmp_determ_red ); - if( __kmp_determ_red ) { - __kmp_force_reduction_method = tree_reduce_block; - } else { - __kmp_force_reduction_method = reduction_method_not_defined; - } - } - K_DIAG( 1, ( "__kmp_force_reduction_method == %d\n", __kmp_force_reduction_method ) ); -} // __kmp_stg_parse_force_reduction - -static void -__kmp_stg_print_force_reduction( kmp_str_buf_t * buffer, char const * name, void * data ) { - - kmp_stg_fr_data_t * reduction = (kmp_stg_fr_data_t *) data; - if ( reduction->force ) { - if( __kmp_force_reduction_method == critical_reduce_block) { - __kmp_stg_print_str( buffer, name, "critical"); - } else if ( __kmp_force_reduction_method == atomic_reduce_block ) { - __kmp_stg_print_str( buffer, name, "atomic"); - } else if ( __kmp_force_reduction_method == tree_reduce_block ) { - __kmp_stg_print_str( buffer, name, "tree"); - } else { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME; - } else { - __kmp_str_buf_print( buffer, " %s", name ); - } - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - } else { - __kmp_stg_print_bool( buffer, name, __kmp_determ_red ); - } - - -} // __kmp_stg_print_force_reduction - -// ------------------------------------------------------------------------------------------------- -// KMP_STORAGE_MAP -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_storage_map( char const * name, char const * value, void * data ) { - if ( __kmp_str_match( "verbose", 1, value ) ) { - __kmp_storage_map = TRUE; - __kmp_storage_map_verbose = TRUE; - __kmp_storage_map_verbose_specified = TRUE; - - } else { - __kmp_storage_map_verbose = FALSE; - __kmp_stg_parse_bool( name, value, & __kmp_storage_map ); // !!! - }; // if -} // __kmp_stg_parse_storage_map - -static void -__kmp_stg_print_storage_map( kmp_str_buf_t * buffer, char const * name, void * data ) { - if ( __kmp_storage_map_verbose || __kmp_storage_map_verbose_specified ) { - __kmp_stg_print_str( buffer, name, "verbose" ); - } else { - __kmp_stg_print_bool( buffer, name, __kmp_storage_map ); - } -} // __kmp_stg_print_storage_map - -// ------------------------------------------------------------------------------------------------- -// KMP_ALL_THREADPRIVATE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_all_threadprivate( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, __kmp_allThreadsSpecified ? __kmp_max_nth : 1, __kmp_max_nth, - & __kmp_tp_capacity ); -} // __kmp_stg_parse_all_threadprivate - -static void -__kmp_stg_print_all_threadprivate( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_tp_capacity ); - -} - -// ------------------------------------------------------------------------------------------------- -// KMP_FOREIGN_THREADS_THREADPRIVATE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_foreign_threads_threadprivate( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_foreign_tp ); -} // __kmp_stg_parse_foreign_threads_threadprivate - -static void -__kmp_stg_print_foreign_threads_threadprivate( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_foreign_tp ); -} // __kmp_stg_print_foreign_threads_threadprivate - - -// ------------------------------------------------------------------------------------------------- -// KMP_AFFINITY, GOMP_CPU_AFFINITY, KMP_TOPOLOGY_METHOD -// ------------------------------------------------------------------------------------------------- - -#if KMP_AFFINITY_SUPPORTED -// -// Parse the proc id list. Return TRUE if successful, FALSE otherwise. -// -static int -__kmp_parse_affinity_proc_id_list( const char *var, const char *env, - const char **nextEnv, char **proclist ) -{ - const char *scan = env; - const char *next = scan; - int empty = TRUE; - - *proclist = NULL; - - for (;;) { - int start, end, stride; - - SKIP_WS(scan); - next = scan; - if (*next == '\0') { - break; - } - - if (*next == '{') { - int num; - next++; // skip '{' - SKIP_WS(next); - scan = next; - - // - // Read the first integer in the set. - // - if ((*next < '0') || (*next > '9')) { - KMP_WARNING( AffSyntaxError, var ); - return FALSE; - } - SKIP_DIGITS(next); - num = __kmp_str_to_int(scan, *next); - KMP_ASSERT(num >= 0); - - for (;;) { - // - // Check for end of set. - // - SKIP_WS(next); - if (*next == '}') { - next++; // skip '}' - break; - } - - // - // Skip optional comma. - // - if (*next == ',') { - next++; - } - SKIP_WS(next); - - // - // Read the next integer in the set. - // - scan = next; - if ((*next < '0') || (*next > '9')) { - KMP_WARNING( AffSyntaxError, var ); - return FALSE; - } - - SKIP_DIGITS(next); - num = __kmp_str_to_int(scan, *next); - KMP_ASSERT(num >= 0); - } - empty = FALSE; - - SKIP_WS(next); - if (*next == ',') { - next++; - } - scan = next; - continue; - } - - // - // Next character is not an integer => end of list - // - if ((*next < '0') || (*next > '9')) { - if (empty) { - KMP_WARNING( AffSyntaxError, var ); - return FALSE; - } - break; - } - - // - // Read the first integer. - // - SKIP_DIGITS(next); - start = __kmp_str_to_int(scan, *next); - KMP_ASSERT(start >= 0); - SKIP_WS(next); - - // - // If this isn't a range, then go on. - // - if (*next != '-') { - empty = FALSE; - - // - // Skip optional comma. - // - if (*next == ',') { - next++; - } - scan = next; - continue; - } - - // - // This is a range. Skip over the '-' and read in the 2nd int. - // - next++; // skip '-' - SKIP_WS(next); - scan = next; - if ((*next < '0') || (*next > '9')) { - KMP_WARNING( AffSyntaxError, var ); - return FALSE; - } - SKIP_DIGITS(next); - end = __kmp_str_to_int(scan, *next); - KMP_ASSERT(end >= 0); - - // - // Check for a stride parameter - // - stride = 1; - SKIP_WS(next); - if (*next == ':') { - // - // A stride is specified. Skip over the ':" and read the 3rd int. - // - int sign = +1; - next++; // skip ':' - SKIP_WS(next); - scan = next; - if (*next == '-') { - sign = -1; - next++; - SKIP_WS(next); - scan = next; - } - if ((*next < '0') || (*next > '9')) { - KMP_WARNING( AffSyntaxError, var ); - return FALSE; - } - SKIP_DIGITS(next); - stride = __kmp_str_to_int(scan, *next); - KMP_ASSERT(stride >= 0); - stride *= sign; - } - - // - // Do some range checks. - // - if (stride == 0) { - KMP_WARNING( AffZeroStride, var ); - return FALSE; - } - if (stride > 0) { - if (start > end) { - KMP_WARNING( AffStartGreaterEnd, var, start, end ); - return FALSE; - } - } - else { - if (start < end) { - KMP_WARNING( AffStrideLessZero, var, start, end ); - return FALSE; - } - } - if ((end - start) / stride > 65536 ) { - KMP_WARNING( AffRangeTooBig, var, end, start, stride ); - return FALSE; - } - - empty = FALSE; - - // - // Skip optional comma. - // - SKIP_WS(next); - if (*next == ',') { - next++; - } - scan = next; - } - - *nextEnv = next; - - { - int len = next - env; - char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); - KMP_MEMCPY_S(retlist, (len+1)*sizeof(char), env, len * sizeof(char)); - retlist[len] = '\0'; - *proclist = retlist; - } - return TRUE; -} - - -// -// If KMP_AFFINITY is specified without a type, then -// __kmp_affinity_notype should point to its setting. -// -static kmp_setting_t *__kmp_affinity_notype = NULL; - -static void -__kmp_parse_affinity_env( char const * name, char const * value, - enum affinity_type * out_type, - char ** out_proclist, - int * out_verbose, - int * out_warn, - int * out_respect, - enum affinity_gran * out_gran, - int * out_gran_levels, - int * out_dups, - int * out_compact, - int * out_offset -) -{ - char * buffer = NULL; // Copy of env var value. - char * buf = NULL; // Buffer for strtok_r() function. - char * next = NULL; // end of token / start of next. - const char * start; // start of current token (for err msgs) - int count = 0; // Counter of parsed integer numbers. - int number[ 2 ]; // Parsed numbers. - - // Guards. - int type = 0; - int proclist = 0; - int max_proclist = 0; - int verbose = 0; - int warnings = 0; - int respect = 0; - int gran = 0; - int dups = 0; - - KMP_ASSERT( value != NULL ); - - if ( TCR_4(__kmp_init_middle) ) { - KMP_WARNING( EnvMiddleWarn, name ); - __kmp_env_toPrint( name, 0 ); - return; - } - __kmp_env_toPrint( name, 1 ); - - buffer = __kmp_str_format( "%s", value ); // Copy env var to keep original intact. - buf = buffer; - SKIP_WS(buf); - - // Helper macros. - - // - // If we see a parse error, emit a warning and scan to the next ",". - // - // FIXME - there's got to be a better way to print an error - // message, hopefully without overwritting peices of buf. - // - #define EMIT_WARN(skip,errlist) \ - { \ - char ch; \ - if (skip) { \ - SKIP_TO(next, ','); \ - } \ - ch = *next; \ - *next = '\0'; \ - KMP_WARNING errlist; \ - *next = ch; \ - if (skip) { \ - if (ch == ',') next++; \ - } \ - buf = next; \ - } - - #define _set_param(_guard,_var,_val) \ - { \ - if ( _guard == 0 ) { \ - _var = _val; \ - } else { \ - EMIT_WARN( FALSE, ( AffParamDefined, name, start ) ); \ - }; \ - ++ _guard; \ - } - - #define set_type(val) _set_param( type, *out_type, val ) - #define set_verbose(val) _set_param( verbose, *out_verbose, val ) - #define set_warnings(val) _set_param( warnings, *out_warn, val ) - #define set_respect(val) _set_param( respect, *out_respect, val ) - #define set_dups(val) _set_param( dups, *out_dups, val ) - #define set_proclist(val) _set_param( proclist, *out_proclist, val ) - - #define set_gran(val,levels) \ - { \ - if ( gran == 0 ) { \ - *out_gran = val; \ - *out_gran_levels = levels; \ - } else { \ - EMIT_WARN( FALSE, ( AffParamDefined, name, start ) ); \ - }; \ - ++ gran; \ - } - -# if OMP_40_ENABLED - KMP_DEBUG_ASSERT( ( __kmp_nested_proc_bind.bind_types != NULL ) - && ( __kmp_nested_proc_bind.used > 0 ) ); -# endif - - while ( *buf != '\0' ) { - start = next = buf; - - if (__kmp_match_str("none", buf, (const char **)&next)) { - set_type( affinity_none ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; -# endif - buf = next; - } else if (__kmp_match_str("scatter", buf, (const char **)&next)) { - set_type( affinity_scatter ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("compact", buf, (const char **)&next)) { - set_type( affinity_compact ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("logical", buf, (const char **)&next)) { - set_type( affinity_logical ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("physical", buf, (const char **)&next)) { - set_type( affinity_physical ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("explicit", buf, (const char **)&next)) { - set_type( affinity_explicit ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("balanced", buf, (const char **)&next)) { - set_type( affinity_balanced ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - buf = next; - } else if (__kmp_match_str("disabled", buf, (const char **)&next)) { - set_type( affinity_disabled ); -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; -# endif - buf = next; - } else if (__kmp_match_str("verbose", buf, (const char **)&next)) { - set_verbose( TRUE ); - buf = next; - } else if (__kmp_match_str("noverbose", buf, (const char **)&next)) { - set_verbose( FALSE ); - buf = next; - } else if (__kmp_match_str("warnings", buf, (const char **)&next)) { - set_warnings( TRUE ); - buf = next; - } else if (__kmp_match_str("nowarnings", buf, (const char **)&next)) { - set_warnings( FALSE ); - buf = next; - } else if (__kmp_match_str("respect", buf, (const char **)&next)) { - set_respect( TRUE ); - buf = next; - } else if (__kmp_match_str("norespect", buf, (const char **)&next)) { - set_respect( FALSE ); - buf = next; - } else if (__kmp_match_str("duplicates", buf, (const char **)&next) - || __kmp_match_str("dups", buf, (const char **)&next)) { - set_dups( TRUE ); - buf = next; - } else if (__kmp_match_str("noduplicates", buf, (const char **)&next) - || __kmp_match_str("nodups", buf, (const char **)&next)) { - set_dups( FALSE ); - buf = next; - } else if (__kmp_match_str("granularity", buf, (const char **)&next) - || __kmp_match_str("gran", buf, (const char **)&next)) { - SKIP_WS(next); - if (*next != '=') { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - next++; // skip '=' - SKIP_WS(next); - - buf = next; - if (__kmp_match_str("fine", buf, (const char **)&next)) { - set_gran( affinity_gran_fine, -1 ); - buf = next; - } else if (__kmp_match_str("thread", buf, (const char **)&next)) { - set_gran( affinity_gran_thread, -1 ); - buf = next; - } else if (__kmp_match_str("core", buf, (const char **)&next)) { - set_gran( affinity_gran_core, -1 ); - buf = next; - } else if (__kmp_match_str("package", buf, (const char **)&next)) { - set_gran( affinity_gran_package, -1 ); - buf = next; - } else if (__kmp_match_str("node", buf, (const char **)&next)) { - set_gran( affinity_gran_node, -1 ); - buf = next; -# if KMP_GROUP_AFFINITY - } else if (__kmp_match_str("group", buf, (const char **)&next)) { - set_gran( affinity_gran_group, -1 ); - buf = next; -# endif /* KMP_GROUP AFFINITY */ - } else if ((*buf >= '0') && (*buf <= '9')) { - int n; - next = buf; - SKIP_DIGITS(next); - n = __kmp_str_to_int( buf, *next ); - KMP_ASSERT(n >= 0); - buf = next; - set_gran( affinity_gran_default, n ); - } else { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - } else if (__kmp_match_str("proclist", buf, (const char **)&next)) { - char *temp_proclist; - - SKIP_WS(next); - if (*next != '=') { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - next++; // skip '=' - SKIP_WS(next); - if (*next != '[') { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - next++; // skip '[' - buf = next; - if (! __kmp_parse_affinity_proc_id_list(name, buf, - (const char **)&next, &temp_proclist)) { - // - // warning already emitted. - // - SKIP_TO(next, ']'); - if (*next == ']') next++; - SKIP_TO(next, ','); - if (*next == ',') next++; - buf = next; - continue; - } - if (*next != ']') { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - next++; // skip ']' - set_proclist( temp_proclist ); - } else if ((*buf >= '0') && (*buf <= '9')) { - // Parse integer numbers -- permute and offset. - int n; - next = buf; - SKIP_DIGITS(next); - n = __kmp_str_to_int( buf, *next ); - KMP_ASSERT(n >= 0); - buf = next; - if ( count < 2 ) { - number[ count ] = n; - } else { - KMP_WARNING( AffManyParams, name, start ); - }; // if - ++ count; - } else { - EMIT_WARN( TRUE, ( AffInvalidParam, name, start ) ); - continue; - } - - SKIP_WS(next); - if (*next == ',') { - next++; - SKIP_WS(next); - } - else if (*next != '\0') { - const char *temp = next; - EMIT_WARN( TRUE, ( ParseExtraCharsWarn, name, temp ) ); - continue; - } - buf = next; - } // while - - #undef EMIT_WARN - #undef _set_param - #undef set_type - #undef set_verbose - #undef set_warnings - #undef set_respect - #undef set_granularity - - __kmp_str_free((const char **) &buffer); - - if ( proclist ) { - if ( ! type ) { - KMP_WARNING( AffProcListNoType, name ); - __kmp_affinity_type = affinity_explicit; - } - else if ( __kmp_affinity_type != affinity_explicit ) { - KMP_WARNING( AffProcListNotExplicit, name ); - KMP_ASSERT( *out_proclist != NULL ); - KMP_INTERNAL_FREE( *out_proclist ); - *out_proclist = NULL; - } - } - switch ( *out_type ) { - case affinity_logical: - case affinity_physical: { - if ( count > 0 ) { - *out_offset = number[ 0 ]; - }; // if - if ( count > 1 ) { - KMP_WARNING( AffManyParamsForLogic, name, number[ 1 ] ); - }; // if - } break; - case affinity_balanced: { - if ( count > 0 ) { - *out_compact = number[ 0 ]; - }; // if - if ( count > 1 ) { - *out_offset = number[ 1 ]; - }; // if - - if ( __kmp_affinity_gran == affinity_gran_default ) { -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) { - if( __kmp_affinity_verbose || __kmp_affinity_warnings ) { - KMP_WARNING( AffGranUsing, "KMP_AFFINITY", "fine" ); - } - __kmp_affinity_gran = affinity_gran_fine; - } else -#endif - { - if( __kmp_affinity_verbose || __kmp_affinity_warnings ) { - KMP_WARNING( AffGranUsing, "KMP_AFFINITY", "core" ); - } - __kmp_affinity_gran = affinity_gran_core; - } - } - } break; - case affinity_scatter: - case affinity_compact: { - if ( count > 0 ) { - *out_compact = number[ 0 ]; - }; // if - if ( count > 1 ) { - *out_offset = number[ 1 ]; - }; // if - } break; - case affinity_explicit: { - if ( *out_proclist == NULL ) { - KMP_WARNING( AffNoProcList, name ); - __kmp_affinity_type = affinity_none; - } - if ( count > 0 ) { - KMP_WARNING( AffNoParam, name, "explicit" ); - } - } break; - case affinity_none: { - if ( count > 0 ) { - KMP_WARNING( AffNoParam, name, "none" ); - }; // if - } break; - case affinity_disabled: { - if ( count > 0 ) { - KMP_WARNING( AffNoParam, name, "disabled" ); - }; // if - } break; - case affinity_default: { - if ( count > 0 ) { - KMP_WARNING( AffNoParam, name, "default" ); - }; // if - } break; - default: { - KMP_ASSERT( 0 ); - }; - }; // switch -} // __kmp_parse_affinity_env - -static void -__kmp_stg_parse_affinity( char const * name, char const * value, void * data ) -{ - kmp_setting_t **rivals = (kmp_setting_t **) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - } - - __kmp_parse_affinity_env( name, value, & __kmp_affinity_type, - & __kmp_affinity_proclist, & __kmp_affinity_verbose, - & __kmp_affinity_warnings, & __kmp_affinity_respect_mask, - & __kmp_affinity_gran, & __kmp_affinity_gran_levels, - & __kmp_affinity_dups, & __kmp_affinity_compact, - & __kmp_affinity_offset ); - -} // __kmp_stg_parse_affinity - -static void -__kmp_stg_print_affinity( kmp_str_buf_t * buffer, char const * name, void * data ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - } else { - __kmp_str_buf_print( buffer, " %s='", name ); - } - if ( __kmp_affinity_verbose ) { - __kmp_str_buf_print( buffer, "%s,", "verbose"); - } else { - __kmp_str_buf_print( buffer, "%s,", "noverbose"); - } - if ( __kmp_affinity_warnings ) { - __kmp_str_buf_print( buffer, "%s,", "warnings"); - } else { - __kmp_str_buf_print( buffer, "%s,", "nowarnings"); - } - if ( KMP_AFFINITY_CAPABLE() ) { - if ( __kmp_affinity_respect_mask ) { - __kmp_str_buf_print( buffer, "%s,", "respect"); - } else { - __kmp_str_buf_print( buffer, "%s,", "norespect"); - } - switch ( __kmp_affinity_gran ) { - case affinity_gran_default: - __kmp_str_buf_print( buffer, "%s", "granularity=default,"); - break; - case affinity_gran_fine: - __kmp_str_buf_print( buffer, "%s", "granularity=fine,"); - break; - case affinity_gran_thread: - __kmp_str_buf_print( buffer, "%s", "granularity=thread,"); - break; - case affinity_gran_core: - __kmp_str_buf_print( buffer, "%s", "granularity=core,"); - break; - case affinity_gran_package: - __kmp_str_buf_print( buffer, "%s", "granularity=package,"); - break; - case affinity_gran_node: - __kmp_str_buf_print( buffer, "%s", "granularity=node,"); - break; -# if KMP_GROUP_AFFINITY - case affinity_gran_group: - __kmp_str_buf_print( buffer, "%s", "granularity=group,"); - break; -# endif /* KMP_GROUP_AFFINITY */ - } - if ( __kmp_affinity_dups ) { - __kmp_str_buf_print( buffer, "%s,", "duplicates"); - } else { - __kmp_str_buf_print( buffer, "%s,", "noduplicates"); - } - } - if ( ! KMP_AFFINITY_CAPABLE() ) { - __kmp_str_buf_print( buffer, "%s", "disabled" ); - } - else switch ( __kmp_affinity_type ){ - case affinity_none: - __kmp_str_buf_print( buffer, "%s", "none"); - break; - case affinity_physical: - __kmp_str_buf_print( buffer, "%s,%d", "physical", - __kmp_affinity_offset ); - break; - case affinity_logical: - __kmp_str_buf_print( buffer, "%s,%d", "logical", - __kmp_affinity_offset ); - break; - case affinity_compact: - __kmp_str_buf_print( buffer, "%s,%d,%d", "compact", - __kmp_affinity_compact, __kmp_affinity_offset ); - break; - case affinity_scatter: - __kmp_str_buf_print( buffer, "%s,%d,%d", "scatter", - __kmp_affinity_compact, __kmp_affinity_offset ); - break; - case affinity_explicit: - __kmp_str_buf_print( buffer, "%s=[%s],%s", "proclist", - __kmp_affinity_proclist, "explicit" ); - break; - case affinity_balanced: - __kmp_str_buf_print( buffer, "%s,%d,%d", "balanced", - __kmp_affinity_compact, __kmp_affinity_offset ); - break; - case affinity_disabled: - __kmp_str_buf_print( buffer, "%s", "disabled"); - break; - case affinity_default: - __kmp_str_buf_print( buffer, "%s", "default"); - break; - default: - __kmp_str_buf_print( buffer, "%s", ""); - break; - } - __kmp_str_buf_print( buffer, "'\n" ); -} //__kmp_stg_print_affinity - -# ifdef KMP_GOMP_COMPAT - -static void -__kmp_stg_parse_gomp_cpu_affinity( char const * name, char const * value, void * data ) -{ - const char * next = NULL; - char * temp_proclist; - kmp_setting_t **rivals = (kmp_setting_t **) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - } - - if ( TCR_4(__kmp_init_middle) ) { - KMP_WARNING( EnvMiddleWarn, name ); - __kmp_env_toPrint( name, 0 ); - return; - } - - __kmp_env_toPrint( name, 1 ); - - if ( __kmp_parse_affinity_proc_id_list( name, value, &next, - &temp_proclist )) { - SKIP_WS(next); - if (*next == '\0') { - // - // GOMP_CPU_AFFINITY => granularity=fine,explicit,proclist=... - // - __kmp_affinity_proclist = temp_proclist; - __kmp_affinity_type = affinity_explicit; - __kmp_affinity_gran = affinity_gran_fine; -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - } - else { - KMP_WARNING( AffSyntaxError, name ); - if (temp_proclist != NULL) { - KMP_INTERNAL_FREE((void *)temp_proclist); - } - } - } - else { - // - // Warning already emitted - // - __kmp_affinity_type = affinity_none; -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; -# endif - } -} // __kmp_stg_parse_gomp_cpu_affinity - -# endif /* KMP_GOMP_COMPAT */ - - -# if OMP_40_ENABLED - -/*----------------------------------------------------------------------------- - -The OMP_PLACES proc id list parser. Here is the grammar: - -place_list := place -place_list := place , place_list -place := num -place := place : num -place := place : num : signed -place := { subplacelist } -place := ! place // (lowest priority) -subplace_list := subplace -subplace_list := subplace , subplace_list -subplace := num -subplace := num : num -subplace := num : num : signed -signed := num -signed := + signed -signed := - signed - ------------------------------------------------------------------------------*/ - -static int -__kmp_parse_subplace_list( const char *var, const char **scan ) -{ - const char *next; - - for (;;) { - int start, count, stride; - - // - // Read in the starting proc id - // - SKIP_WS(*scan); - if ((**scan < '0') || (**scan > '9')) { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - next = *scan; - SKIP_DIGITS(next); - start = __kmp_str_to_int(*scan, *next); - KMP_ASSERT(start >= 0); - *scan = next; - - // - // valid follow sets are ',' ':' and '}' - // - SKIP_WS(*scan); - if (**scan == '}') { - break; - } - if (**scan == ',') { - (*scan)++; // skip ',' - continue; - } - if (**scan != ':') { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - (*scan)++; // skip ':' - - // - // Read count parameter - // - SKIP_WS(*scan); - if ((**scan < '0') || (**scan > '9')) { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - next = *scan; - SKIP_DIGITS(next); - count = __kmp_str_to_int(*scan, *next); - KMP_ASSERT(count >= 0); - *scan = next; - - // - // valid follow sets are ',' ':' and '}' - // - SKIP_WS(*scan); - if (**scan == '}') { - break; - } - if (**scan == ',') { - (*scan)++; // skip ',' - continue; - } - if (**scan != ':') { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - (*scan)++; // skip ':' - - // - // Read stride parameter - // - int sign = +1; - for (;;) { - SKIP_WS(*scan); - if (**scan == '+') { - (*scan)++; // skip '+' - continue; - } - if (**scan == '-') { - sign *= -1; - (*scan)++; // skip '-' - continue; - } - break; - } - SKIP_WS(*scan); - if ((**scan < '0') || (**scan > '9')) { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - next = *scan; - SKIP_DIGITS(next); - stride = __kmp_str_to_int(*scan, *next); - KMP_ASSERT(stride >= 0); - *scan = next; - stride *= sign; - - // - // valid follow sets are ',' and '}' - // - SKIP_WS(*scan); - if (**scan == '}') { - break; - } - if (**scan == ',') { - (*scan)++; // skip ',' - continue; - } - - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - return TRUE; -} - -static int -__kmp_parse_place( const char *var, const char ** scan ) -{ - const char *next; - - // - // valid follow sets are '{' '!' and num - // - SKIP_WS(*scan); - if (**scan == '{') { - (*scan)++; // skip '{' - if (! __kmp_parse_subplace_list(var, scan)) { - return FALSE; - } - if (**scan != '}') { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - (*scan)++; // skip '}' - } - else if (**scan == '!') { - (*scan)++; // skip '!' - return __kmp_parse_place(var, scan); //'!' has lower precedence than ':' - } - else if ((**scan >= '0') && (**scan <= '9')) { - next = *scan; - SKIP_DIGITS(next); - int proc = __kmp_str_to_int(*scan, *next); - KMP_ASSERT(proc >= 0); - *scan = next; - } - else { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - return TRUE; -} - -static int -__kmp_parse_place_list( const char *var, const char *env, char **place_list ) -{ - const char *scan = env; - const char *next = scan; - - for (;;) { - int start, count, stride; - - if (! __kmp_parse_place(var, &scan)) { - return FALSE; - } - - // - // valid follow sets are ',' ':' and EOL - // - SKIP_WS(scan); - if (*scan == '\0') { - break; - } - if (*scan == ',') { - scan++; // skip ',' - continue; - } - if (*scan != ':') { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - scan++; // skip ':' - - // - // Read count parameter - // - SKIP_WS(scan); - if ((*scan < '0') || (*scan > '9')) { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - next = scan; - SKIP_DIGITS(next); - count = __kmp_str_to_int(scan, *next); - KMP_ASSERT(count >= 0); - scan = next; - - // - // valid follow sets are ',' ':' and EOL - // - SKIP_WS(scan); - if (*scan == '\0') { - break; - } - if (*scan == ',') { - scan++; // skip ',' - continue; - } - if (*scan != ':') { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - scan++; // skip ':' - - // - // Read stride parameter - // - int sign = +1; - for (;;) { - SKIP_WS(scan); - if (*scan == '+') { - scan++; // skip '+' - continue; - } - if (*scan == '-') { - sign *= -1; - scan++; // skip '-' - continue; - } - break; - } - SKIP_WS(scan); - if ((*scan < '0') || (*scan > '9')) { - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - next = scan; - SKIP_DIGITS(next); - stride = __kmp_str_to_int(scan, *next); - KMP_ASSERT(stride >= 0); - scan = next; - stride *= sign; - - // - // valid follow sets are ',' and EOL - // - SKIP_WS(scan); - if (*scan == '\0') { - break; - } - if (*scan == ',') { - scan++; // skip ',' - continue; - } - - KMP_WARNING( SyntaxErrorUsing, var, "\"threads\"" ); - return FALSE; - } - - { - int len = scan - env; - char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char)); - KMP_MEMCPY_S(retlist, (len+1)*sizeof(char), env, len * sizeof(char)); - retlist[len] = '\0'; - *place_list = retlist; - } - return TRUE; -} - -static void -__kmp_stg_parse_places( char const * name, char const * value, void * data ) -{ - int count; - const char *scan = value; - const char *next = scan; - const char *kind = "\"threads\""; - kmp_setting_t **rivals = (kmp_setting_t **) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - } - - // - // If OMP_PROC_BIND is not specified but OMP_PLACES is, - // then let OMP_PROC_BIND default to true. - // - if ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_default ) { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_true; - } - - //__kmp_affinity_num_places = 0; - - if ( __kmp_match_str( "threads", scan, &next ) ) { - scan = next; - __kmp_affinity_type = affinity_compact; - __kmp_affinity_gran = affinity_gran_thread; - __kmp_affinity_dups = FALSE; - kind = "\"threads\""; - } - else if ( __kmp_match_str( "cores", scan, &next ) ) { - scan = next; - __kmp_affinity_type = affinity_compact; - __kmp_affinity_gran = affinity_gran_core; - __kmp_affinity_dups = FALSE; - kind = "\"cores\""; - } - else if ( __kmp_match_str( "sockets", scan, &next ) ) { - scan = next; - __kmp_affinity_type = affinity_compact; - __kmp_affinity_gran = affinity_gran_package; - __kmp_affinity_dups = FALSE; - kind = "\"sockets\""; - } - else { - if ( __kmp_affinity_proclist != NULL ) { - KMP_INTERNAL_FREE( (void *)__kmp_affinity_proclist ); - __kmp_affinity_proclist = NULL; - } - if ( __kmp_parse_place_list( name, value, &__kmp_affinity_proclist ) ) { - __kmp_affinity_type = affinity_explicit; - __kmp_affinity_gran = affinity_gran_fine; - __kmp_affinity_dups = FALSE; - if ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_default ) { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_true; - } - } - return; - } - - if ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_default ) { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_true; - } - - SKIP_WS(scan); - if ( *scan == '\0' ) { - return; - } - - // - // Parse option count parameter in parentheses - // - if ( *scan != '(' ) { - KMP_WARNING( SyntaxErrorUsing, name, kind ); - return; - } - scan++; // skip '(' - - SKIP_WS(scan); - next = scan; - SKIP_DIGITS(next); - count = __kmp_str_to_int(scan, *next); - KMP_ASSERT(count >= 0); - scan = next; - - SKIP_WS(scan); - if ( *scan != ')' ) { - KMP_WARNING( SyntaxErrorUsing, name, kind ); - return; - } - scan++; // skip ')' - - SKIP_WS(scan); - if ( *scan != '\0' ) { - KMP_WARNING( ParseExtraCharsWarn, name, scan ); - } - __kmp_affinity_num_places = count; -} - -static void -__kmp_stg_print_places( kmp_str_buf_t * buffer, char const * name, - void * data ) -{ - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME; - } else { - __kmp_str_buf_print( buffer, " %s", name ); - } - if ( ( __kmp_nested_proc_bind.used == 0 ) - || ( __kmp_nested_proc_bind.bind_types == NULL ) - || ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_false ) ) { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - else if ( __kmp_affinity_type == affinity_explicit ) { - if ( __kmp_affinity_proclist != NULL ) { - __kmp_str_buf_print( buffer, "='%s'\n", __kmp_affinity_proclist ); - } - else { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - } - else if ( __kmp_affinity_type == affinity_compact ) { - int num; - if ( __kmp_affinity_num_masks > 0 ) { - num = __kmp_affinity_num_masks; - } - else if ( __kmp_affinity_num_places > 0 ) { - num = __kmp_affinity_num_places; - } - else { - num = 0; - } - if ( __kmp_affinity_gran == affinity_gran_thread ) { - if ( num > 0 ) { - __kmp_str_buf_print( buffer, "='threads(%d)'\n", num ); - } - else { - __kmp_str_buf_print( buffer, "='threads'\n" ); - } - } - else if ( __kmp_affinity_gran == affinity_gran_core ) { - if ( num > 0 ) { - __kmp_str_buf_print( buffer, "='cores(%d)' \n", num ); - } - else { - __kmp_str_buf_print( buffer, "='cores'\n" ); - } - } - else if ( __kmp_affinity_gran == affinity_gran_package ) { - if ( num > 0 ) { - __kmp_str_buf_print( buffer, "='sockets(%d)'\n", num ); - } - else { - __kmp_str_buf_print( buffer, "='sockets'\n" ); - } - } - else { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - } - else { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } -} - -# endif /* OMP_40_ENABLED */ - -# if (! OMP_40_ENABLED) - -static void -__kmp_stg_parse_proc_bind( char const * name, char const * value, void * data ) -{ - int enabled; - kmp_setting_t **rivals = (kmp_setting_t **) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - } - - // - // in OMP 3.1, OMP_PROC_BIND is strictly a boolean - // - __kmp_stg_parse_bool( name, value, & enabled ); - if ( enabled ) { - // - // OMP_PROC_BIND => granularity=fine,scatter on MIC - // OMP_PROC_BIND => granularity=core,scatter elsewhere - // - __kmp_affinity_type = affinity_scatter; -# if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) - __kmp_affinity_gran = affinity_gran_fine; - else -# endif - __kmp_affinity_gran = affinity_gran_core; - } - else { - __kmp_affinity_type = affinity_none; - } -} // __kmp_parse_proc_bind - -# endif /* if (! OMP_40_ENABLED) */ - - -static void -__kmp_stg_parse_topology_method( char const * name, char const * value, - void * data ) { - if ( __kmp_str_match( "all", 1, value ) ) { - __kmp_affinity_top_method = affinity_top_method_all; - } -# if KMP_ARCH_X86 || KMP_ARCH_X86_64 - else if ( __kmp_str_match( "x2apic id", 9, value ) - || __kmp_str_match( "x2apic_id", 9, value ) - || __kmp_str_match( "x2apic-id", 9, value ) - || __kmp_str_match( "x2apicid", 8, value ) - || __kmp_str_match( "cpuid leaf 11", 13, value ) - || __kmp_str_match( "cpuid_leaf_11", 13, value ) - || __kmp_str_match( "cpuid-leaf-11", 13, value ) - || __kmp_str_match( "cpuid leaf11", 12, value ) - || __kmp_str_match( "cpuid_leaf11", 12, value ) - || __kmp_str_match( "cpuid-leaf11", 12, value ) - || __kmp_str_match( "cpuidleaf 11", 12, value ) - || __kmp_str_match( "cpuidleaf_11", 12, value ) - || __kmp_str_match( "cpuidleaf-11", 12, value ) - || __kmp_str_match( "cpuidleaf11", 11, value ) - || __kmp_str_match( "cpuid 11", 8, value ) - || __kmp_str_match( "cpuid_11", 8, value ) - || __kmp_str_match( "cpuid-11", 8, value ) - || __kmp_str_match( "cpuid11", 7, value ) - || __kmp_str_match( "leaf 11", 7, value ) - || __kmp_str_match( "leaf_11", 7, value ) - || __kmp_str_match( "leaf-11", 7, value ) - || __kmp_str_match( "leaf11", 6, value ) ) { - __kmp_affinity_top_method = affinity_top_method_x2apicid; - } - else if ( __kmp_str_match( "apic id", 7, value ) - || __kmp_str_match( "apic_id", 7, value ) - || __kmp_str_match( "apic-id", 7, value ) - || __kmp_str_match( "apicid", 6, value ) - || __kmp_str_match( "cpuid leaf 4", 12, value ) - || __kmp_str_match( "cpuid_leaf_4", 12, value ) - || __kmp_str_match( "cpuid-leaf-4", 12, value ) - || __kmp_str_match( "cpuid leaf4", 11, value ) - || __kmp_str_match( "cpuid_leaf4", 11, value ) - || __kmp_str_match( "cpuid-leaf4", 11, value ) - || __kmp_str_match( "cpuidleaf 4", 11, value ) - || __kmp_str_match( "cpuidleaf_4", 11, value ) - || __kmp_str_match( "cpuidleaf-4", 11, value ) - || __kmp_str_match( "cpuidleaf4", 10, value ) - || __kmp_str_match( "cpuid 4", 7, value ) - || __kmp_str_match( "cpuid_4", 7, value ) - || __kmp_str_match( "cpuid-4", 7, value ) - || __kmp_str_match( "cpuid4", 6, value ) - || __kmp_str_match( "leaf 4", 6, value ) - || __kmp_str_match( "leaf_4", 6, value ) - || __kmp_str_match( "leaf-4", 6, value ) - || __kmp_str_match( "leaf4", 5, value ) ) { - __kmp_affinity_top_method = affinity_top_method_apicid; - } -# endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - else if ( __kmp_str_match( "/proc/cpuinfo", 2, value ) - || __kmp_str_match( "cpuinfo", 5, value )) { - __kmp_affinity_top_method = affinity_top_method_cpuinfo; - } -# if KMP_GROUP_AFFINITY - else if ( __kmp_str_match( "group", 1, value ) ) { - __kmp_affinity_top_method = affinity_top_method_group; - } -# endif /* KMP_GROUP_AFFINITY */ - else if ( __kmp_str_match( "flat", 1, value ) ) { - __kmp_affinity_top_method = affinity_top_method_flat; - } -# if KMP_USE_HWLOC - else if ( __kmp_str_match( "hwloc", 1, value) ) { - __kmp_affinity_top_method = affinity_top_method_hwloc; - } -# endif - else { - KMP_WARNING( StgInvalidValue, name, value ); - } -} // __kmp_stg_parse_topology_method - -static void -__kmp_stg_print_topology_method( kmp_str_buf_t * buffer, char const * name, - void * data ) { -# if KMP_DEBUG - char const * value = NULL; - - switch ( __kmp_affinity_top_method ) { - case affinity_top_method_default: - value = "default"; - break; - - case affinity_top_method_all: - value = "all"; - break; - -# if KMP_ARCH_X86 || KMP_ARCH_X86_64 - case affinity_top_method_x2apicid: - value = "x2APIC id"; - break; - - case affinity_top_method_apicid: - value = "APIC id"; - break; -# endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - case affinity_top_method_cpuinfo: - value = "cpuinfo"; - break; - -# if KMP_GROUP_AFFINITY - case affinity_top_method_group: - value = "group"; - break; -# endif /* KMP_GROUP_AFFINITY */ - - case affinity_top_method_flat: - value = "flat"; - break; - } - - if ( value != NULL ) { - __kmp_stg_print_str( buffer, name, value ); - } -# endif /* KMP_DEBUG */ -} // __kmp_stg_print_topology_method - -#endif /* KMP_AFFINITY_SUPPORTED */ - - -#if OMP_40_ENABLED - -// -// OMP_PROC_BIND / bind-var is functional on all 4.0 builds, including OS X* -// OMP_PLACES / place-partition-var is not. -// -static void -__kmp_stg_parse_proc_bind( char const * name, char const * value, void * data ) -{ - kmp_setting_t **rivals = (kmp_setting_t **) data; - int rc; - - rc = __kmp_stg_check_rivals( name, value, rivals ); - if ( rc ) { - return; - } - - // - // in OMP 4.0 OMP_PROC_BIND is a vector of proc_bind types. - // - KMP_DEBUG_ASSERT( (__kmp_nested_proc_bind.bind_types != NULL) - && ( __kmp_nested_proc_bind.used > 0 ) ); - - const char *buf = value; - const char *next; - int num; - SKIP_WS( buf ); - if ( (*buf >= '0') && (*buf <= '9') ) { - next = buf; - SKIP_DIGITS( next ); - num = __kmp_str_to_int( buf, *next ); - KMP_ASSERT( num >= 0 ); - buf = next; - SKIP_WS( buf ); - } - else { - num = -1; - } - - next = buf; - if ( __kmp_match_str( "disabled", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); -# if KMP_AFFINITY_SUPPORTED - __kmp_affinity_type = affinity_disabled; -# endif /* KMP_AFFINITY_SUPPORTED */ - __kmp_nested_proc_bind.used = 1; - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; - } - else if ( ( num == (int)proc_bind_false ) - || __kmp_match_str( "false", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); -# if KMP_AFFINITY_SUPPORTED - __kmp_affinity_type = affinity_none; -# endif /* KMP_AFFINITY_SUPPORTED */ - __kmp_nested_proc_bind.used = 1; - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; - } - else if ( ( num == (int)proc_bind_true ) - || __kmp_match_str( "true", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); - __kmp_nested_proc_bind.used = 1; - __kmp_nested_proc_bind.bind_types[0] = proc_bind_true; - } - else { - // - // Count the number of values in the env var string - // - const char *scan; - int nelem = 1; - for ( scan = buf; *scan != '\0'; scan++ ) { - if ( *scan == ',' ) { - nelem++; - } - } - - // - // Create / expand the nested proc_bind array as needed - // - if ( __kmp_nested_proc_bind.size < nelem ) { - __kmp_nested_proc_bind.bind_types = (kmp_proc_bind_t *) - KMP_INTERNAL_REALLOC( __kmp_nested_proc_bind.bind_types, - sizeof(kmp_proc_bind_t) * nelem ); - if ( __kmp_nested_proc_bind.bind_types == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - } - __kmp_nested_proc_bind.size = nelem; - } - __kmp_nested_proc_bind.used = nelem; - - // - // Save values in the nested proc_bind array - // - int i = 0; - for (;;) { - enum kmp_proc_bind_t bind; - - if ( ( num == (int)proc_bind_master ) - || __kmp_match_str( "master", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); - bind = proc_bind_master; - } - else if ( ( num == (int)proc_bind_close ) - || __kmp_match_str( "close", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); - bind = proc_bind_close; - } - else if ( ( num == (int)proc_bind_spread ) - || __kmp_match_str( "spread", buf, &next ) ) { - buf = next; - SKIP_WS( buf ); - bind = proc_bind_spread; - } - else { - KMP_WARNING( StgInvalidValue, name, value ); - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; - __kmp_nested_proc_bind.used = 1; - return; - } - - __kmp_nested_proc_bind.bind_types[i++] = bind; - if ( i >= nelem ) { - break; - } - KMP_DEBUG_ASSERT( *buf == ',' ); - buf++; - SKIP_WS( buf ); - - // - // Read next value if it was specified as an integer - // - if ( (*buf >= '0') && (*buf <= '9') ) { - next = buf; - SKIP_DIGITS( next ); - num = __kmp_str_to_int( buf, *next ); - KMP_ASSERT( num >= 0 ); - buf = next; - SKIP_WS( buf ); - } - else { - num = -1; - } - } - SKIP_WS( buf ); - } - if ( *buf != '\0' ) { - KMP_WARNING( ParseExtraCharsWarn, name, buf ); - } -} - - -static void -__kmp_stg_print_proc_bind( kmp_str_buf_t * buffer, char const * name, - void * data ) -{ - int nelem = __kmp_nested_proc_bind.used; - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME; - } else { - __kmp_str_buf_print( buffer, " %s", name ); - } - if ( nelem == 0 ) { - __kmp_str_buf_print( buffer, ": %s\n", KMP_I18N_STR( NotDefined ) ); - } - else { - int i; - __kmp_str_buf_print( buffer, "='", name ); - for ( i = 0; i < nelem; i++ ) { - switch ( __kmp_nested_proc_bind.bind_types[i] ) { - case proc_bind_false: - __kmp_str_buf_print( buffer, "false" ); - break; - - case proc_bind_true: - __kmp_str_buf_print( buffer, "true" ); - break; - - case proc_bind_master: - __kmp_str_buf_print( buffer, "master" ); - break; - - case proc_bind_close: - __kmp_str_buf_print( buffer, "close" ); - break; - - case proc_bind_spread: - __kmp_str_buf_print( buffer, "spread" ); - break; - - case proc_bind_intel: - __kmp_str_buf_print( buffer, "intel" ); - break; - - case proc_bind_default: - __kmp_str_buf_print( buffer, "default" ); - break; - } - if ( i < nelem - 1 ) { - __kmp_str_buf_print( buffer, "," ); - } - } - __kmp_str_buf_print( buffer, "'\n" ); - } -} - -#endif /* OMP_40_ENABLED */ - - -// ------------------------------------------------------------------------------------------------- -// OMP_DYNAMIC -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_omp_dynamic( char const * name, char const * value, void * data ) -{ - __kmp_stg_parse_bool( name, value, & (__kmp_global.g.g_dynamic) ); -} // __kmp_stg_parse_omp_dynamic - -static void -__kmp_stg_print_omp_dynamic( kmp_str_buf_t * buffer, char const * name, void * data ) -{ - __kmp_stg_print_bool( buffer, name, __kmp_global.g.g_dynamic ); -} // __kmp_stg_print_omp_dynamic - -static void -__kmp_stg_parse_kmp_dynamic_mode( char const * name, char const * value, void * data ) -{ - if ( TCR_4(__kmp_init_parallel) ) { - KMP_WARNING( EnvParallelWarn, name ); - __kmp_env_toPrint( name, 0 ); - return; - } -#ifdef USE_LOAD_BALANCE - else if ( __kmp_str_match( "load balance", 2, value ) - || __kmp_str_match( "load_balance", 2, value ) - || __kmp_str_match( "load-balance", 2, value ) - || __kmp_str_match( "loadbalance", 2, value ) - || __kmp_str_match( "balance", 1, value ) ) { - __kmp_global.g.g_dynamic_mode = dynamic_load_balance; - } -#endif /* USE_LOAD_BALANCE */ - else if ( __kmp_str_match( "thread limit", 1, value ) - || __kmp_str_match( "thread_limit", 1, value ) - || __kmp_str_match( "thread-limit", 1, value ) - || __kmp_str_match( "threadlimit", 1, value ) - || __kmp_str_match( "limit", 2, value ) ) { - __kmp_global.g.g_dynamic_mode = dynamic_thread_limit; - } - else if ( __kmp_str_match( "random", 1, value ) ) { - __kmp_global.g.g_dynamic_mode = dynamic_random; - } - else { - KMP_WARNING( StgInvalidValue, name, value ); - } -} //__kmp_stg_parse_kmp_dynamic_mode - -static void -__kmp_stg_print_kmp_dynamic_mode( kmp_str_buf_t * buffer, char const * name, void * data ) -{ -#if KMP_DEBUG - if ( __kmp_global.g.g_dynamic_mode == dynamic_default ) { - __kmp_str_buf_print( buffer, " %s: %s \n", name, KMP_I18N_STR( NotDefined ) ); - } -# ifdef USE_LOAD_BALANCE - else if ( __kmp_global.g.g_dynamic_mode == dynamic_load_balance ) { - __kmp_stg_print_str( buffer, name, "load balance" ); - } -# endif /* USE_LOAD_BALANCE */ - else if ( __kmp_global.g.g_dynamic_mode == dynamic_thread_limit ) { - __kmp_stg_print_str( buffer, name, "thread limit" ); - } - else if ( __kmp_global.g.g_dynamic_mode == dynamic_random ) { - __kmp_stg_print_str( buffer, name, "random" ); - } - else { - KMP_ASSERT(0); - } -#endif /* KMP_DEBUG */ -} // __kmp_stg_print_kmp_dynamic_mode - - -#ifdef USE_LOAD_BALANCE - -// ------------------------------------------------------------------------------------------------- -// KMP_LOAD_BALANCE_INTERVAL -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_ld_balance_interval( char const * name, char const * value, void * data ) -{ - double interval = __kmp_convert_to_double( value ); - if ( interval >= 0 ) { - __kmp_load_balance_interval = interval; - } else { - KMP_WARNING( StgInvalidValue, name, value ); - }; // if -} // __kmp_stg_parse_load_balance_interval - -static void -__kmp_stg_print_ld_balance_interval( kmp_str_buf_t * buffer, char const * name, void * data ) { -#if KMP_DEBUG - __kmp_str_buf_print( buffer, " %s=%8.6f\n", name, __kmp_load_balance_interval ); -#endif /* KMP_DEBUG */ -} // __kmp_stg_print_load_balance_interval - -#endif /* USE_LOAD_BALANCE */ - -// ------------------------------------------------------------------------------------------------- -// KMP_INIT_AT_FORK -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_init_at_fork( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_need_register_atfork ); - if ( __kmp_need_register_atfork ) { - __kmp_need_register_atfork_specified = TRUE; - }; -} // __kmp_stg_parse_init_at_fork - -static void -__kmp_stg_print_init_at_fork( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_need_register_atfork_specified ); -} // __kmp_stg_print_init_at_fork - -// ------------------------------------------------------------------------------------------------- -// KMP_SCHEDULE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_schedule( char const * name, char const * value, void * data ) { - - if ( value != NULL ) { - size_t length = KMP_STRLEN( value ); - if ( length > INT_MAX ) { - KMP_WARNING( LongValue, name ); - } else { - char *semicolon; - if( value[ length - 1 ] == '"' || value[ length -1 ] == '\'' ) - KMP_WARNING( UnbalancedQuotes, name ); - do { - char sentinel; - - semicolon = (char *) strchr( value, ';' ); - if( *value && semicolon != value ) { - char *comma = (char *) strchr( value, ',' ); - - if ( comma ) { - ++comma; - sentinel = ','; - } else - sentinel = ';'; - if ( !__kmp_strcasecmp_with_sentinel( "static", value, sentinel ) ) { - if( !__kmp_strcasecmp_with_sentinel( "greedy", comma, ';' ) ) { - __kmp_static = kmp_sch_static_greedy; - continue; - } else if( !__kmp_strcasecmp_with_sentinel( "balanced", comma, ';' ) ) { - __kmp_static = kmp_sch_static_balanced; - continue; - } - } else if ( !__kmp_strcasecmp_with_sentinel( "guided", value, sentinel ) ) { - if ( !__kmp_strcasecmp_with_sentinel( "iterative", comma, ';' ) ) { - __kmp_guided = kmp_sch_guided_iterative_chunked; - continue; - } else if ( !__kmp_strcasecmp_with_sentinel( "analytical", comma, ';' ) ) { - /* analytical not allowed for too many threads */ - __kmp_guided = kmp_sch_guided_analytical_chunked; - continue; - } - } - KMP_WARNING( InvalidClause, name, value ); - } else - KMP_WARNING( EmptyClause, name ); - } while ( (value = semicolon ? semicolon + 1 : NULL) ); - } - }; // if - -} // __kmp_stg_parse__schedule - -static void -__kmp_stg_print_schedule( kmp_str_buf_t * buffer, char const * name, void * data ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - } else { - __kmp_str_buf_print( buffer, " %s='", name ); - } - if ( __kmp_static == kmp_sch_static_greedy ) { - __kmp_str_buf_print( buffer, "%s", "static,greedy"); - } else if ( __kmp_static == kmp_sch_static_balanced ) { - __kmp_str_buf_print ( buffer, "%s", "static,balanced"); - } - if ( __kmp_guided == kmp_sch_guided_iterative_chunked ) { - __kmp_str_buf_print( buffer, ";%s'\n", "guided,iterative"); - } else if ( __kmp_guided == kmp_sch_guided_analytical_chunked ) { - __kmp_str_buf_print( buffer, ";%s'\n", "guided,analytical"); - } -} // __kmp_stg_print_schedule - -// ------------------------------------------------------------------------------------------------- -// OMP_SCHEDULE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_omp_schedule( char const * name, char const * value, void * data ) -{ - size_t length; - if( value ) { - length = KMP_STRLEN( value ); - if( length ) { - char *comma = (char *) strchr( value, ',' ); - if( value[ length - 1 ] == '"' || value[ length -1 ] == '\'') - KMP_WARNING( UnbalancedQuotes, name ); - /* get the specified scheduling style */ - if (!__kmp_strcasecmp_with_sentinel("dynamic", value, ',')) /* DYNAMIC */ - __kmp_sched = kmp_sch_dynamic_chunked; - else if (!__kmp_strcasecmp_with_sentinel("guided", value, ',')) /* GUIDED */ - __kmp_sched = kmp_sch_guided_chunked; -// AC: TODO: add AUTO schedule, and pprobably remove TRAPEZOIDAL (OMP 3.0 does not allow it) - else if (!__kmp_strcasecmp_with_sentinel("auto", value, ',')) { /* AUTO */ - __kmp_sched = kmp_sch_auto; - if( comma ) { - __kmp_msg( kmp_ms_warning, KMP_MSG( IgnoreChunk, name, comma ), __kmp_msg_null ); - comma = NULL; - } - } - else if (!__kmp_strcasecmp_with_sentinel("trapezoidal", value, ',')) /* TRAPEZOIDAL */ - __kmp_sched = kmp_sch_trapezoidal; - else if (!__kmp_strcasecmp_with_sentinel("static", value, ',')) /* STATIC */ - __kmp_sched = kmp_sch_static; -#if KMP_STATIC_STEAL_ENABLED - else if (!__kmp_strcasecmp_with_sentinel("static_steal", value, ',')) - __kmp_sched = kmp_sch_static_steal; -#endif - else { - KMP_WARNING( StgInvalidValue, name, value ); - value = NULL; /* skip processing of comma */ - } - if( value && comma ) { - __kmp_env_chunk = TRUE; - - if(__kmp_sched == kmp_sch_static) - __kmp_sched = kmp_sch_static_chunked; - ++comma; - __kmp_chunk = __kmp_str_to_int( comma, 0 ); - if ( __kmp_chunk < 1 ) { - __kmp_chunk = KMP_DEFAULT_CHUNK; - __kmp_msg( kmp_ms_warning, KMP_MSG( InvalidChunk, name, comma ), __kmp_msg_null ); - KMP_INFORM( Using_int_Value, name, __kmp_chunk ); -// AC: next block commented out until KMP_DEFAULT_CHUNK != KMP_MIN_CHUNK (to improve code coverage :) -// The default chunk size is 1 according to standard, thus making KMP_MIN_CHUNK not 1 we would introduce mess: -// wrong chunk becomes 1, but it will be impossible to explicitely set 1, because it becomes KMP_MIN_CHUNK... -// } else if ( __kmp_chunk < KMP_MIN_CHUNK ) { -// __kmp_chunk = KMP_MIN_CHUNK; - } else if ( __kmp_chunk > KMP_MAX_CHUNK ) { - __kmp_chunk = KMP_MAX_CHUNK; - __kmp_msg( kmp_ms_warning, KMP_MSG( LargeChunk, name, comma ), __kmp_msg_null ); - KMP_INFORM( Using_int_Value, name, __kmp_chunk ); - } - } else - __kmp_env_chunk = FALSE; - } else - KMP_WARNING( EmptyString, name ); - } - K_DIAG(1, ("__kmp_static == %d\n", __kmp_static)) - K_DIAG(1, ("__kmp_guided == %d\n", __kmp_guided)) - K_DIAG(1, ("__kmp_sched == %d\n", __kmp_sched)) - K_DIAG(1, ("__kmp_chunk == %d\n", __kmp_chunk)) -} // __kmp_stg_parse_omp_schedule - -static void -__kmp_stg_print_omp_schedule( kmp_str_buf_t * buffer, char const * name, void * data ) { - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - } else { - __kmp_str_buf_print( buffer, " %s='", name ); - } - if ( __kmp_chunk ) { - switch ( __kmp_sched ) { - case kmp_sch_dynamic_chunked: - __kmp_str_buf_print( buffer, "%s,%d'\n", "dynamic", __kmp_chunk); - break; - case kmp_sch_guided_iterative_chunked: - case kmp_sch_guided_analytical_chunked: - __kmp_str_buf_print( buffer, "%s,%d'\n", "guided", __kmp_chunk); - break; - case kmp_sch_trapezoidal: - __kmp_str_buf_print( buffer, "%s,%d'\n", "trapezoidal", __kmp_chunk); - break; - case kmp_sch_static: - case kmp_sch_static_chunked: - case kmp_sch_static_balanced: - case kmp_sch_static_greedy: - __kmp_str_buf_print( buffer, "%s,%d'\n", "static", __kmp_chunk); - break; - case kmp_sch_static_steal: - __kmp_str_buf_print( buffer, "%s,%d'\n", "static_steal", __kmp_chunk); - break; - case kmp_sch_auto: - __kmp_str_buf_print( buffer, "%s,%d'\n", "auto", __kmp_chunk); - break; - } - } else { - switch ( __kmp_sched ) { - case kmp_sch_dynamic_chunked: - __kmp_str_buf_print( buffer, "%s'\n", "dynamic"); - break; - case kmp_sch_guided_iterative_chunked: - case kmp_sch_guided_analytical_chunked: - __kmp_str_buf_print( buffer, "%s'\n", "guided"); - break; - case kmp_sch_trapezoidal: - __kmp_str_buf_print( buffer, "%s'\n", "trapezoidal"); - break; - case kmp_sch_static: - case kmp_sch_static_chunked: - case kmp_sch_static_balanced: - case kmp_sch_static_greedy: - __kmp_str_buf_print( buffer, "%s'\n", "static"); - break; - case kmp_sch_static_steal: - __kmp_str_buf_print( buffer, "%s'\n", "static_steal"); - break; - case kmp_sch_auto: - __kmp_str_buf_print( buffer, "%s'\n", "auto"); - break; - } - } -} // __kmp_stg_print_omp_schedule - -// ------------------------------------------------------------------------------------------------- -// KMP_ATOMIC_MODE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_atomic_mode( char const * name, char const * value, void * data ) { - // Modes: 0 -- do not change default; 1 -- Intel perf mode, 2 -- GOMP compatibility mode. - int mode = 0; - int max = 1; - #ifdef KMP_GOMP_COMPAT - max = 2; - #endif /* KMP_GOMP_COMPAT */ - __kmp_stg_parse_int( name, value, 0, max, & mode ); - // TODO; parse_int is not very suitable for this case. In case of overflow it is better to use - // 0 rather that max value. - if ( mode > 0 ) { - __kmp_atomic_mode = mode; - }; // if -} // __kmp_stg_parse_atomic_mode - -static void -__kmp_stg_print_atomic_mode( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_atomic_mode ); -} // __kmp_stg_print_atomic_mode - - -// ------------------------------------------------------------------------------------------------- -// KMP_CONSISTENCY_CHECK -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_consistency_check( char const * name, char const * value, void * data ) { - if ( ! __kmp_strcasecmp_with_sentinel( "all", value, 0 ) ) { - // Note, this will not work from kmp_set_defaults because th_cons stack was not allocated - // for existed thread(s) thus the first __kmp_push_ will break with assertion. - // TODO: allocate th_cons if called from kmp_set_defaults. - __kmp_env_consistency_check = TRUE; - } else if ( ! __kmp_strcasecmp_with_sentinel( "none", value, 0 ) ) { - __kmp_env_consistency_check = FALSE; - } else { - KMP_WARNING( StgInvalidValue, name, value ); - }; // if -} // __kmp_stg_parse_consistency_check - -static void -__kmp_stg_print_consistency_check( kmp_str_buf_t * buffer, char const * name, void * data ) { -#if KMP_DEBUG - const char *value = NULL; - - if ( __kmp_env_consistency_check ) { - value = "all"; - } else { - value = "none"; - } - - if ( value != NULL ) { - __kmp_stg_print_str( buffer, name, value ); - } -#endif /* KMP_DEBUG */ -} // __kmp_stg_print_consistency_check - - -#if USE_ITT_BUILD -// ------------------------------------------------------------------------------------------------- -// KMP_ITT_PREPARE_DELAY -// ------------------------------------------------------------------------------------------------- - -#if USE_ITT_NOTIFY - -static void -__kmp_stg_parse_itt_prepare_delay( char const * name, char const * value, void * data ) -{ - // Experimental code: KMP_ITT_PREPARE_DELAY specifies numbert of loop iterations. - int delay = 0; - __kmp_stg_parse_int( name, value, 0, INT_MAX, & delay ); - __kmp_itt_prepare_delay = delay; -} // __kmp_str_parse_itt_prepare_delay - -static void -__kmp_stg_print_itt_prepare_delay( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_uint64( buffer, name, __kmp_itt_prepare_delay ); - -} // __kmp_str_print_itt_prepare_delay - -#endif // USE_ITT_NOTIFY -#endif /* USE_ITT_BUILD */ - -// ------------------------------------------------------------------------------------------------- -// KMP_MALLOC_POOL_INCR -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_malloc_pool_incr( char const * name, char const * value, void * data ) { - __kmp_stg_parse_size( - name, - value, - KMP_MIN_MALLOC_POOL_INCR, - KMP_MAX_MALLOC_POOL_INCR, - NULL, - & __kmp_malloc_pool_incr, - 1 - ); -} // __kmp_stg_parse_malloc_pool_incr - -static void -__kmp_stg_print_malloc_pool_incr( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_size( buffer, name, __kmp_malloc_pool_incr ); - -} // _kmp_stg_print_malloc_pool_incr - - -#ifdef KMP_DEBUG - -// ------------------------------------------------------------------------------------------------- -// KMP_PAR_RANGE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_par_range_env( char const * name, char const * value, void * data ) { - __kmp_stg_parse_par_range( - name, - value, - & __kmp_par_range, - __kmp_par_range_routine, - __kmp_par_range_filename, - & __kmp_par_range_lb, - & __kmp_par_range_ub - ); -} // __kmp_stg_parse_par_range_env - -static void -__kmp_stg_print_par_range_env( kmp_str_buf_t * buffer, char const * name, void * data ) { - if (__kmp_par_range != 0) { - __kmp_stg_print_str( buffer, name, par_range_to_print ); - } -} // __kmp_stg_print_par_range_env - -#if KMP_USE_MONITOR -// ------------------------------------------------------------------------------------------------- -// KMP_YIELD_CYCLE, KMP_YIELD_ON, KMP_YIELD_OFF -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_yield_cycle( char const * name, char const * value, void * data ) { - int flag = __kmp_yield_cycle; - __kmp_stg_parse_bool( name, value, & flag ); - __kmp_yield_cycle = flag; -} // __kmp_stg_parse_yield_cycle - -static void -__kmp_stg_print_yield_cycle( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_yield_cycle ); -} // __kmp_stg_print_yield_cycle - -static void -__kmp_stg_parse_yield_on( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 2, INT_MAX, & __kmp_yield_on_count ); -} // __kmp_stg_parse_yield_on - -static void -__kmp_stg_print_yield_on( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_yield_on_count ); -} // __kmp_stg_print_yield_on - -static void -__kmp_stg_parse_yield_off( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 2, INT_MAX, & __kmp_yield_off_count ); -} // __kmp_stg_parse_yield_off - -static void -__kmp_stg_print_yield_off( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_yield_off_count ); -} // __kmp_stg_print_yield_off -#endif // KMP_USE_MONITOR - -#endif - -// ------------------------------------------------------------------------------------------------- -// KMP_INIT_WAIT, KMP_NEXT_WAIT -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_init_wait( char const * name, char const * value, void * data ) { - int wait; - KMP_ASSERT( ( __kmp_init_wait & 1 ) == 0 ); - wait = __kmp_init_wait / 2; - __kmp_stg_parse_int( name, value, KMP_MIN_INIT_WAIT, KMP_MAX_INIT_WAIT, & wait ); - __kmp_init_wait = wait * 2; - KMP_ASSERT( ( __kmp_init_wait & 1 ) == 0 ); - __kmp_yield_init = __kmp_init_wait; -} // __kmp_stg_parse_init_wait - -static void -__kmp_stg_print_init_wait( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_init_wait ); -} // __kmp_stg_print_init_wait - -static void -__kmp_stg_parse_next_wait( char const * name, char const * value, void * data ) { - int wait; - KMP_ASSERT( ( __kmp_next_wait & 1 ) == 0 ); - wait = __kmp_next_wait / 2; - __kmp_stg_parse_int( name, value, KMP_MIN_NEXT_WAIT, KMP_MAX_NEXT_WAIT, & wait ); - __kmp_next_wait = wait * 2; - KMP_ASSERT( ( __kmp_next_wait & 1 ) == 0 ); - __kmp_yield_next = __kmp_next_wait; -} // __kmp_stg_parse_next_wait - -static void -__kmp_stg_print_next_wait( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_next_wait ); -} //__kmp_stg_print_next_wait - - -// ------------------------------------------------------------------------------------------------- -// KMP_GTID_MODE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_gtid_mode( char const * name, char const * value, void * data ) { - // - // Modes: - // 0 -- do not change default - // 1 -- sp search - // 2 -- use "keyed" TLS var, i.e. - // pthread_getspecific(Linux* OS/OS X*) or TlsGetValue(Windows* OS) - // 3 -- __declspec(thread) TLS var in tdata section - // - int mode = 0; - int max = 2; - #ifdef KMP_TDATA_GTID - max = 3; - #endif /* KMP_TDATA_GTID */ - __kmp_stg_parse_int( name, value, 0, max, & mode ); - // TODO; parse_int is not very suitable for this case. In case of overflow it is better to use - // 0 rather that max value. - if ( mode == 0 ) { - __kmp_adjust_gtid_mode = TRUE; - } - else { - __kmp_gtid_mode = mode; - __kmp_adjust_gtid_mode = FALSE; - }; // if -} // __kmp_str_parse_gtid_mode - -static void -__kmp_stg_print_gtid_mode( kmp_str_buf_t * buffer, char const * name, void * data ) { - if ( __kmp_adjust_gtid_mode ) { - __kmp_stg_print_int( buffer, name, 0 ); - } - else { - __kmp_stg_print_int( buffer, name, __kmp_gtid_mode ); - } -} // __kmp_stg_print_gtid_mode - -// ------------------------------------------------------------------------------------------------- -// KMP_NUM_LOCKS_IN_BLOCK -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_lock_block( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, KMP_INT_MAX, & __kmp_num_locks_in_block ); -} // __kmp_str_parse_lock_block - -static void -__kmp_stg_print_lock_block( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_num_locks_in_block ); -} // __kmp_stg_print_lock_block - -// ------------------------------------------------------------------------------------------------- -// KMP_LOCK_KIND -// ------------------------------------------------------------------------------------------------- - -#if KMP_USE_DYNAMIC_LOCK -# define KMP_STORE_LOCK_SEQ(a) (__kmp_user_lock_seq = lockseq_##a) -#else -# define KMP_STORE_LOCK_SEQ(a) -#endif - -static void -__kmp_stg_parse_lock_kind( char const * name, char const * value, void * data ) { - if ( __kmp_init_user_locks ) { - KMP_WARNING( EnvLockWarn, name ); - return; - } - - if ( __kmp_str_match( "tas", 2, value ) - || __kmp_str_match( "test and set", 2, value ) - || __kmp_str_match( "test_and_set", 2, value ) - || __kmp_str_match( "test-and-set", 2, value ) - || __kmp_str_match( "test andset", 2, value ) - || __kmp_str_match( "test_andset", 2, value ) - || __kmp_str_match( "test-andset", 2, value ) - || __kmp_str_match( "testand set", 2, value ) - || __kmp_str_match( "testand_set", 2, value ) - || __kmp_str_match( "testand-set", 2, value ) - || __kmp_str_match( "testandset", 2, value ) ) { - __kmp_user_lock_kind = lk_tas; - KMP_STORE_LOCK_SEQ(tas); - } -#if KMP_USE_FUTEX - else if ( __kmp_str_match( "futex", 1, value ) ) { - if ( __kmp_futex_determine_capable() ) { - __kmp_user_lock_kind = lk_futex; - KMP_STORE_LOCK_SEQ(futex); - } - else { - KMP_WARNING( FutexNotSupported, name, value ); - } - } -#endif - else if ( __kmp_str_match( "ticket", 2, value ) ) { - __kmp_user_lock_kind = lk_ticket; - KMP_STORE_LOCK_SEQ(ticket); - } - else if ( __kmp_str_match( "queuing", 1, value ) - || __kmp_str_match( "queue", 1, value ) ) { - __kmp_user_lock_kind = lk_queuing; - KMP_STORE_LOCK_SEQ(queuing); - } - else if ( __kmp_str_match( "drdpa ticket", 1, value ) - || __kmp_str_match( "drdpa_ticket", 1, value ) - || __kmp_str_match( "drdpa-ticket", 1, value ) - || __kmp_str_match( "drdpaticket", 1, value ) - || __kmp_str_match( "drdpa", 1, value ) ) { - __kmp_user_lock_kind = lk_drdpa; - KMP_STORE_LOCK_SEQ(drdpa); - } -#if KMP_USE_ADAPTIVE_LOCKS - else if ( __kmp_str_match( "adaptive", 1, value ) ) { - if( __kmp_cpuinfo.rtm ) { // ??? Is cpuinfo available here? - __kmp_user_lock_kind = lk_adaptive; - KMP_STORE_LOCK_SEQ(adaptive); - } else { - KMP_WARNING( AdaptiveNotSupported, name, value ); - __kmp_user_lock_kind = lk_queuing; - KMP_STORE_LOCK_SEQ(queuing); - } - } -#endif // KMP_USE_ADAPTIVE_LOCKS -#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX - else if ( __kmp_str_match("rtm", 1, value) ) { - if ( __kmp_cpuinfo.rtm ) { - __kmp_user_lock_kind = lk_rtm; - KMP_STORE_LOCK_SEQ(rtm); - } else { - KMP_WARNING( AdaptiveNotSupported, name, value ); - __kmp_user_lock_kind = lk_queuing; - KMP_STORE_LOCK_SEQ(queuing); - } - } - else if ( __kmp_str_match("hle", 1, value) ) { - __kmp_user_lock_kind = lk_hle; - KMP_STORE_LOCK_SEQ(hle); - } -#endif - else { - KMP_WARNING( StgInvalidValue, name, value ); - } -} - -static void -__kmp_stg_print_lock_kind( kmp_str_buf_t * buffer, char const * name, void * data ) { - const char *value = NULL; - - switch ( __kmp_user_lock_kind ) { - case lk_default: - value = "default"; - break; - - case lk_tas: - value = "tas"; - break; - -#if KMP_USE_FUTEX - case lk_futex: - value = "futex"; - break; -#endif - -#if KMP_USE_DYNAMIC_LOCK && KMP_USE_TSX - case lk_rtm: - value = "rtm"; - break; - - case lk_hle: - value = "hle"; - break; -#endif - - case lk_ticket: - value = "ticket"; - break; - - case lk_queuing: - value = "queuing"; - break; - - case lk_drdpa: - value = "drdpa"; - break; -#if KMP_USE_ADAPTIVE_LOCKS - case lk_adaptive: - value = "adaptive"; - break; -#endif - } - - if ( value != NULL ) { - __kmp_stg_print_str( buffer, name, value ); - } -} - -// ------------------------------------------------------------------------------------------------- -// KMP_SPIN_BACKOFF_PARAMS -// ------------------------------------------------------------------------------------------------- - -// KMP_SPIN_BACKOFF_PARAMS=max_backoff[,min_tick] (max backoff size, min tick for machine pause) -static void -__kmp_stg_parse_spin_backoff_params(const char* name, const char* value, void* data) -{ - const char *next = value; - - int total = 0; // Count elements that were set. It'll be used as an array size - int prev_comma = FALSE; // For correct processing sequential commas - int i; - - kmp_uint32 max_backoff = __kmp_spin_backoff_params.max_backoff; - kmp_uint32 min_tick = __kmp_spin_backoff_params.min_tick; - - // Run only 3 iterations because it is enough to read two values or find a syntax error - for ( i = 0; i < 3 ; i++) { - SKIP_WS( next ); - - if ( *next == '\0' ) { - break; - } - // Next character is not an integer or not a comma OR number of values > 2 => end of list - if ( ( ( *next < '0' || *next > '9' ) && *next !=',' ) || total > 2 ) { - KMP_WARNING( EnvSyntaxError, name, value ); - return; - } - // The next character is ',' - if ( *next == ',' ) { - // ',' is the fisrt character - if ( total == 0 || prev_comma ) { - total++; - } - prev_comma = TRUE; - next++; //skip ',' - SKIP_WS( next ); - } - // Next character is a digit - if ( *next >= '0' && *next <= '9' ) { - int num; - const char *buf = next; - char const * msg = NULL; - prev_comma = FALSE; - SKIP_DIGITS( next ); - total++; - - const char *tmp = next; - SKIP_WS( tmp ); - if ( ( *next == ' ' || *next == '\t' ) && ( *tmp >= '0' && *tmp <= '9' ) ) { - KMP_WARNING( EnvSpacesNotAllowed, name, value ); - return; - } - - num = __kmp_str_to_int( buf, *next ); - if ( num <= 0 ) { // The number of retries should be > 0 - msg = KMP_I18N_STR( ValueTooSmall ); - num = 1; - } else if ( num > KMP_INT_MAX ) { - msg = KMP_I18N_STR( ValueTooLarge ); - num = KMP_INT_MAX; - } - if ( msg != NULL ) { - // Message is not empty. Print warning. - KMP_WARNING( ParseSizeIntWarn, name, value, msg ); - KMP_INFORM( Using_int_Value, name, num ); - } - if( total == 1 ) { - max_backoff = num; - } else if( total == 2 ) { - min_tick = num; - } - } - } - KMP_DEBUG_ASSERT( total > 0 ); - if( total <= 0 ) { - KMP_WARNING( EnvSyntaxError, name, value ); - return; - } - __kmp_spin_backoff_params.max_backoff = max_backoff; - __kmp_spin_backoff_params.min_tick = min_tick; -} - -static void -__kmp_stg_print_spin_backoff_params(kmp_str_buf_t *buffer, char const* name, void* data) -{ - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - } else { - __kmp_str_buf_print( buffer, " %s='", name ); - } - __kmp_str_buf_print( buffer, "%d,%d'\n", __kmp_spin_backoff_params.max_backoff, - __kmp_spin_backoff_params.min_tick ); -} - -#if KMP_USE_ADAPTIVE_LOCKS - -// ------------------------------------------------------------------------------------------------- -// KMP_ADAPTIVE_LOCK_PROPS, KMP_SPECULATIVE_STATSFILE -// ------------------------------------------------------------------------------------------------- - -// Parse out values for the tunable parameters from a string of the form -// KMP_ADAPTIVE_LOCK_PROPS=max_soft_retries[,max_badness] -static void -__kmp_stg_parse_adaptive_lock_props( const char *name, const char *value, void *data ) -{ - int max_retries = 0; - int max_badness = 0; - - const char *next = value; - - int total = 0; // Count elements that were set. It'll be used as an array size - int prev_comma = FALSE; // For correct processing sequential commas - int i; - - // Save values in the structure __kmp_speculative_backoff_params - // Run only 3 iterations because it is enough to read two values or find a syntax error - for ( i = 0; i < 3 ; i++) { - SKIP_WS( next ); - - if ( *next == '\0' ) { - break; - } - // Next character is not an integer or not a comma OR number of values > 2 => end of list - if ( ( ( *next < '0' || *next > '9' ) && *next !=',' ) || total > 2 ) { - KMP_WARNING( EnvSyntaxError, name, value ); - return; - } - // The next character is ',' - if ( *next == ',' ) { - // ',' is the fisrt character - if ( total == 0 || prev_comma ) { - total++; - } - prev_comma = TRUE; - next++; //skip ',' - SKIP_WS( next ); - } - // Next character is a digit - if ( *next >= '0' && *next <= '9' ) { - int num; - const char *buf = next; - char const * msg = NULL; - prev_comma = FALSE; - SKIP_DIGITS( next ); - total++; - - const char *tmp = next; - SKIP_WS( tmp ); - if ( ( *next == ' ' || *next == '\t' ) && ( *tmp >= '0' && *tmp <= '9' ) ) { - KMP_WARNING( EnvSpacesNotAllowed, name, value ); - return; - } - - num = __kmp_str_to_int( buf, *next ); - if ( num < 0 ) { // The number of retries should be >= 0 - msg = KMP_I18N_STR( ValueTooSmall ); - num = 1; - } else if ( num > KMP_INT_MAX ) { - msg = KMP_I18N_STR( ValueTooLarge ); - num = KMP_INT_MAX; - } - if ( msg != NULL ) { - // Message is not empty. Print warning. - KMP_WARNING( ParseSizeIntWarn, name, value, msg ); - KMP_INFORM( Using_int_Value, name, num ); - } - if( total == 1 ) { - max_retries = num; - } else if( total == 2 ) { - max_badness = num; - } - } - } - KMP_DEBUG_ASSERT( total > 0 ); - if( total <= 0 ) { - KMP_WARNING( EnvSyntaxError, name, value ); - return; - } - __kmp_adaptive_backoff_params.max_soft_retries = max_retries; - __kmp_adaptive_backoff_params.max_badness = max_badness; -} - - -static void -__kmp_stg_print_adaptive_lock_props(kmp_str_buf_t * buffer, char const * name, void * data ) -{ - if( __kmp_env_format ) { - KMP_STR_BUF_PRINT_NAME_EX(name); - } else { - __kmp_str_buf_print( buffer, " %s='", name ); - } - __kmp_str_buf_print( buffer, "%d,%d'\n", __kmp_adaptive_backoff_params.max_soft_retries, - __kmp_adaptive_backoff_params.max_badness ); -} // __kmp_stg_print_adaptive_lock_props - -#if KMP_DEBUG_ADAPTIVE_LOCKS - -static void -__kmp_stg_parse_speculative_statsfile( char const * name, char const * value, void * data ) { - __kmp_stg_parse_file( name, value, "", & __kmp_speculative_statsfile ); -} // __kmp_stg_parse_speculative_statsfile - -static void -__kmp_stg_print_speculative_statsfile( kmp_str_buf_t * buffer, char const * name, void * data ) { - if ( __kmp_str_match( "-", 0, __kmp_speculative_statsfile ) ) { - __kmp_stg_print_str( buffer, name, "stdout" ); - } else { - __kmp_stg_print_str( buffer, name, __kmp_speculative_statsfile ); - } - -} // __kmp_stg_print_speculative_statsfile - -#endif // KMP_DEBUG_ADAPTIVE_LOCKS - -#endif // KMP_USE_ADAPTIVE_LOCKS - -// ------------------------------------------------------------------------------------------------- -// KMP_HW_SUBSET (was KMP_PLACE_THREADS) -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_hw_subset( char const * name, char const * value, void * data ) { - // Value example: 5Cx2Tx15O - // Which means "use 5 cores with offset 15, 2 threads per core" - // AC: extended to sockets level, examples of - // "use 2 sockets with offset 6, 2 cores with offset 2 per socket, 2 threads per core": - // 2s,6o,2c,2o,2t; 2s,6o,2c,2t,2o; 2s@6,2c@2,2t - // To not break legacy code core-offset can be last; - // postfix "o" or prefix @ can be offset designator. - // Note: not all syntax errors are analyzed, some may be skipped. -#define CHECK_DELIM(_x) (*(_x) == ',' || *(_x) == 'x') - static int parsed = 0; - int num; - int single_warning = 0; - int flagS = 0, flagC = 0, flagT = 0, flagSO = 0, flagCO = 0; - const char *next = value; - const char *prev; - - if( strcmp(name, "KMP_PLACE_THREADS") == 0 ) { - KMP_INFORM(EnvVarDeprecated,name,"KMP_HW_SUBSET"); - if( parsed == 1 ) { - return; // already parsed KMP_HW_SUBSET - } - } - parsed = 1; - - SKIP_WS(next); // skip white spaces - if (*next == '\0') - return; // no data provided, retain default values - if( strcmp(name, "KMP_PLACE_THREADS") == 0 ) { - KMP_INFORM(EnvVarDeprecated,name,"KMP_HW_SUBSET"); - if( parsed == 1 ) { - return; // already parsed KMP_HW_SUBSET - } - } - parsed = 1; - - SKIP_WS(next); // skip white spaces - if (*next == '\0') - return; // no data provided, retain default values - // Get num_sockets first (or whatever specified) - if (*next >= '0' && *next <= '9') { - prev = next; - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - SKIP_WS(next); - if (*next == 's' || *next == 'S') { // e.g. "2s" - __kmp_place_num_sockets = num; - flagS = 1; // got num sockets - next++; - if (*next == '@') { // socket offset, e.g. "2s@4" - flagSO = 1; - prev = ++next; // don't allow spaces for simplicity - if (!(*next >= '0' && *next <= '9')) { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - __kmp_place_socket_offset = num; - } - } else if (*next == 'c' || *next == 'C') { - __kmp_place_num_cores = num; - flagS = flagC = 1; // sockets were not specified - use default - next++; - if (*next == '@') { // core offset, e.g. "2c@6" - flagCO = 1; - prev = ++next; // don't allow spaces for simplicity - if (!(*next >= '0' && *next <= '9')) { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - __kmp_place_core_offset = num; - } - } else if (CHECK_DELIM(next)) { - __kmp_place_num_cores = num; // no letter-designator - num cores - flagS = flagC = 1; // sockets were not specified - use default - next++; - } else if (*next == 't' || *next == 'T') { - __kmp_place_num_threads_per_core = num; - // sockets, cores were not specified - use default - return; // we ignore offset value in case all cores are used - } else if (*next == '\0') { - __kmp_place_num_cores = num; - return; // the only value provided - set num cores - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - KMP_DEBUG_ASSERT(flagS); // num sockets should already be set here - SKIP_WS(next); - if (*next == '\0') - return; // " n " - something like this - if (CHECK_DELIM(next)) { - next++; // skip delimiter - SKIP_WS(next); - } - - // Get second value (could be offset, num_cores, num_threads) - if (*next >= '0' && *next <= '9') { - prev = next; - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - SKIP_WS(next); - if (*next == 'c' || *next == 'C') { - KMP_DEBUG_ASSERT(flagC == 0); - __kmp_place_num_cores = num; - flagC = 1; - next++; - if (*next == '@') { // core offset, e.g. "2c@6" - flagCO = 1; - prev = ++next; // don't allow spaces for simplicity - if (!(*next >= '0' && *next <= '9')) { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - __kmp_place_core_offset = num; - } - } else if (*next == 'o' || *next == 'O') { // offset specified - KMP_WARNING(AffHWSubsetDeprecated); - single_warning = 1; - if (flagC) { // whether num_cores already specified (sockets skipped) - KMP_DEBUG_ASSERT(!flagCO); // either "o" or @, not both - __kmp_place_core_offset = num; - } else { - KMP_DEBUG_ASSERT(!flagSO); // either "o" or @, not both - __kmp_place_socket_offset = num; - } - next++; - } else if (*next == 't' || *next == 'T') { - KMP_DEBUG_ASSERT(flagT == 0); - __kmp_place_num_threads_per_core = num; - flagC = 1; // num_cores could be skipped ? - flagT = 1; - next++; // can have core-offset specified after num threads - } else if (*next == '\0') { - KMP_DEBUG_ASSERT(flagC); // 4x2 means 4 cores 2 threads per core - __kmp_place_num_threads_per_core = num; - return; // two values provided without letter-designator - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - SKIP_WS(next); - if (*next == '\0') - return; // " Ns,Nc " - something like this - if (CHECK_DELIM(next)) { - next++; // skip delimiter - SKIP_WS(next); - } - - // Get third value (could be core-offset, num_cores, num_threads) - if (*next >= '0' && *next <= '9') { - prev = next; - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - SKIP_WS(next); - if (*next == 't' || *next == 'T') { - KMP_DEBUG_ASSERT(flagT == 0); - __kmp_place_num_threads_per_core = num; - if (flagC == 0) - return; // num_cores could be skipped (e.g. 2s,4o,2t) - flagT = 1; - next++; // can have core-offset specified later (e.g. 2s,1c,2t,3o) - } else if (*next == 'c' || *next == 'C') { - KMP_DEBUG_ASSERT(flagC == 0); - __kmp_place_num_cores = num; - flagC = 1; - next++; - //KMP_DEBUG_ASSERT(*next != '@'); // socket offset used "o" designator - } else if (*next == 'o' || *next == 'O') { - KMP_WARNING(AffHWSubsetDeprecated); - single_warning = 1; - KMP_DEBUG_ASSERT(flagC); - //KMP_DEBUG_ASSERT(!flagSO); // socket offset couldn't use @ designator - __kmp_place_core_offset = num; - next++; - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - KMP_DEBUG_ASSERT(flagC); - SKIP_WS(next); - if ( *next == '\0' ) - return; - if (CHECK_DELIM(next)) { - next++; // skip delimiter - SKIP_WS(next); - } - - // Get 4-th value (could be core-offset, num_threads) - if (*next >= '0' && *next <= '9') { - prev = next; - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - SKIP_WS(next); - if (*next == 'o' || *next == 'O') { - if (!single_warning) { // warn once - KMP_WARNING(AffHWSubsetDeprecated); - } - KMP_DEBUG_ASSERT(!flagSO); // socket offset couldn't use @ designator - __kmp_place_core_offset = num; - next++; - } else if (*next == 't' || *next == 'T') { - KMP_DEBUG_ASSERT(flagT == 0); - __kmp_place_num_threads_per_core = num; - flagT = 1; - next++; // can have core-offset specified after num threads - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - return; - } - SKIP_WS(next); - if ( *next == '\0' ) - return; - if (CHECK_DELIM(next)) { - next++; // skip delimiter - SKIP_WS(next); - } - - // Get 5-th value (could be core-offset, num_threads) - if (*next >= '0' && *next <= '9') { - prev = next; - SKIP_DIGITS(next); - num = __kmp_str_to_int(prev, *next); - SKIP_WS(next); - if (*next == 'o' || *next == 'O') { - if (!single_warning) { // warn once - KMP_WARNING(AffHWSubsetDeprecated); - } - KMP_DEBUG_ASSERT(flagT); - KMP_DEBUG_ASSERT(!flagSO); // socket offset couldn't use @ designator - __kmp_place_core_offset = num; - } else if (*next == 't' || *next == 'T') { - KMP_DEBUG_ASSERT(flagT == 0); - __kmp_place_num_threads_per_core = num; - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - } - } else { - KMP_WARNING(AffHWSubsetInvalid, name, value); - } - return; -#undef CHECK_DELIM -} - -static void -__kmp_stg_print_hw_subset( kmp_str_buf_t * buffer, char const * name, void * data ) { - if (__kmp_place_num_sockets + __kmp_place_num_cores + __kmp_place_num_threads_per_core) { - int comma = 0; - kmp_str_buf_t buf; - __kmp_str_buf_init(&buf); - if(__kmp_env_format) - KMP_STR_BUF_PRINT_NAME_EX(name); - else - __kmp_str_buf_print(buffer, " %s='", name); - if (__kmp_place_num_sockets) { - __kmp_str_buf_print(&buf, "%ds", __kmp_place_num_sockets); - if (__kmp_place_socket_offset) - __kmp_str_buf_print(&buf, "@%d", __kmp_place_socket_offset); - comma = 1; - } - if (__kmp_place_num_cores) { - __kmp_str_buf_print(&buf, "%s%dc", comma?",":"", __kmp_place_num_cores); - if (__kmp_place_core_offset) - __kmp_str_buf_print(&buf, "@%d", __kmp_place_core_offset); - comma = 1; - } - if (__kmp_place_num_threads_per_core) - __kmp_str_buf_print(&buf, "%s%dt", comma?",":"", __kmp_place_num_threads_per_core); - __kmp_str_buf_print(buffer, "%s'\n", buf.str ); - __kmp_str_buf_free(&buf); -/* - } else { - __kmp_str_buf_print( buffer, " %s: %s \n", name, KMP_I18N_STR( NotDefined ) ); -*/ - } -} - -#if USE_ITT_BUILD -// ------------------------------------------------------------------------------------------------- -// KMP_FORKJOIN_FRAMES -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_forkjoin_frames( char const * name, char const * value, void * data ) { - __kmp_stg_parse_bool( name, value, & __kmp_forkjoin_frames ); -} // __kmp_stg_parse_forkjoin_frames - -static void -__kmp_stg_print_forkjoin_frames( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_forkjoin_frames ); -} // __kmp_stg_print_forkjoin_frames - -// ------------------------------------------------------------------------------------------------- -// KMP_FORKJOIN_FRAMES_MODE -// ------------------------------------------------------------------------------------------------- - -static void -__kmp_stg_parse_forkjoin_frames_mode( char const * name, char const * value, void * data ) { - __kmp_stg_parse_int( name, value, 0, 3, & __kmp_forkjoin_frames_mode ); -} // __kmp_stg_parse_forkjoin_frames - -static void -__kmp_stg_print_forkjoin_frames_mode( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_int( buffer, name, __kmp_forkjoin_frames_mode ); -} // __kmp_stg_print_forkjoin_frames -#endif /* USE_ITT_BUILD */ - -// ------------------------------------------------------------------------------------------------- -// OMP_DISPLAY_ENV -// ------------------------------------------------------------------------------------------------- - -#if OMP_40_ENABLED - -static void -__kmp_stg_parse_omp_display_env( char const * name, char const * value, void * data ) -{ - if ( __kmp_str_match( "VERBOSE", 1, value ) ) - { - __kmp_display_env_verbose = TRUE; - } else { - __kmp_stg_parse_bool( name, value, & __kmp_display_env ); - } - -} // __kmp_stg_parse_omp_display_env - -static void -__kmp_stg_print_omp_display_env( kmp_str_buf_t * buffer, char const * name, void * data ) -{ - if ( __kmp_display_env_verbose ) - { - __kmp_stg_print_str( buffer, name, "VERBOSE" ); - } else { - __kmp_stg_print_bool( buffer, name, __kmp_display_env ); - } -} // __kmp_stg_print_omp_display_env - -static void -__kmp_stg_parse_omp_cancellation( char const * name, char const * value, void * data ) { - if ( TCR_4(__kmp_init_parallel) ) { - KMP_WARNING( EnvParallelWarn, name ); - return; - } // read value before first parallel only - __kmp_stg_parse_bool( name, value, & __kmp_omp_cancellation ); -} // __kmp_stg_parse_omp_cancellation - -static void -__kmp_stg_print_omp_cancellation( kmp_str_buf_t * buffer, char const * name, void * data ) { - __kmp_stg_print_bool( buffer, name, __kmp_omp_cancellation ); -} // __kmp_stg_print_omp_cancellation - -#endif - -// ------------------------------------------------------------------------------------------------- -// Table. -// ------------------------------------------------------------------------------------------------- - - -static kmp_setting_t __kmp_stg_table[] = { - - { "KMP_ALL_THREADS", __kmp_stg_parse_all_threads, __kmp_stg_print_all_threads, NULL, 0, 0 }, - { "KMP_BLOCKTIME", __kmp_stg_parse_blocktime, __kmp_stg_print_blocktime, NULL, 0, 0 }, - { "KMP_DUPLICATE_LIB_OK", __kmp_stg_parse_duplicate_lib_ok, __kmp_stg_print_duplicate_lib_ok, NULL, 0, 0 }, - { "KMP_LIBRARY", __kmp_stg_parse_wait_policy, __kmp_stg_print_wait_policy, NULL, 0, 0 }, - { "KMP_MAX_THREADS", __kmp_stg_parse_all_threads, NULL, NULL, 0, 0 }, // For backward compatibility -#if KMP_USE_MONITOR - { "KMP_MONITOR_STACKSIZE", __kmp_stg_parse_monitor_stacksize, __kmp_stg_print_monitor_stacksize, NULL, 0, 0 }, -#endif - { "KMP_SETTINGS", __kmp_stg_parse_settings, __kmp_stg_print_settings, NULL, 0, 0 }, - { "KMP_STACKOFFSET", __kmp_stg_parse_stackoffset, __kmp_stg_print_stackoffset, NULL, 0, 0 }, - { "KMP_STACKSIZE", __kmp_stg_parse_stacksize, __kmp_stg_print_stacksize, NULL, 0, 0 }, - { "KMP_STACKPAD", __kmp_stg_parse_stackpad, __kmp_stg_print_stackpad, NULL, 0, 0 }, - { "KMP_VERSION", __kmp_stg_parse_version, __kmp_stg_print_version, NULL, 0, 0 }, - { "KMP_WARNINGS", __kmp_stg_parse_warnings, __kmp_stg_print_warnings, NULL, 0, 0 }, - - { "OMP_NESTED", __kmp_stg_parse_nested, __kmp_stg_print_nested, NULL, 0, 0 }, - { "OMP_NUM_THREADS", __kmp_stg_parse_num_threads, __kmp_stg_print_num_threads, NULL, 0, 0 }, - { "OMP_STACKSIZE", __kmp_stg_parse_stacksize, __kmp_stg_print_stacksize, NULL, 0, 0 }, - - { "KMP_TASKING", __kmp_stg_parse_tasking, __kmp_stg_print_tasking, NULL, 0, 0 }, - { "KMP_TASK_STEALING_CONSTRAINT", __kmp_stg_parse_task_stealing, __kmp_stg_print_task_stealing, NULL, 0, 0 }, - { "OMP_MAX_ACTIVE_LEVELS", __kmp_stg_parse_max_active_levels, __kmp_stg_print_max_active_levels, NULL, 0, 0 }, -#if OMP_40_ENABLED - { "OMP_DEFAULT_DEVICE", __kmp_stg_parse_default_device, __kmp_stg_print_default_device, NULL, 0, 0 }, -#endif -#if OMP_45_ENABLED - { "OMP_MAX_TASK_PRIORITY", __kmp_stg_parse_max_task_priority, __kmp_stg_print_max_task_priority, NULL, 0, 0 }, -#endif - { "OMP_THREAD_LIMIT", __kmp_stg_parse_all_threads, __kmp_stg_print_all_threads, NULL, 0, 0 }, - { "OMP_WAIT_POLICY", __kmp_stg_parse_wait_policy, __kmp_stg_print_wait_policy, NULL, 0, 0 }, - { "KMP_DISP_NUM_BUFFERS", __kmp_stg_parse_disp_buffers, __kmp_stg_print_disp_buffers, NULL, 0, 0 }, -#if KMP_NESTED_HOT_TEAMS - { "KMP_HOT_TEAMS_MAX_LEVEL", __kmp_stg_parse_hot_teams_level, __kmp_stg_print_hot_teams_level, NULL, 0, 0 }, - { "KMP_HOT_TEAMS_MODE", __kmp_stg_parse_hot_teams_mode, __kmp_stg_print_hot_teams_mode, NULL, 0, 0 }, -#endif // KMP_NESTED_HOT_TEAMS - -#if KMP_HANDLE_SIGNALS - { "KMP_HANDLE_SIGNALS", __kmp_stg_parse_handle_signals, __kmp_stg_print_handle_signals, NULL, 0, 0 }, -#endif - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - { "KMP_INHERIT_FP_CONTROL", __kmp_stg_parse_inherit_fp_control, __kmp_stg_print_inherit_fp_control, NULL, 0, 0 }, -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -#ifdef KMP_GOMP_COMPAT - { "GOMP_STACKSIZE", __kmp_stg_parse_stacksize, NULL, NULL, 0, 0 }, -#endif - -#ifdef KMP_DEBUG - { "KMP_A_DEBUG", __kmp_stg_parse_a_debug, __kmp_stg_print_a_debug, NULL, 0, 0 }, - { "KMP_B_DEBUG", __kmp_stg_parse_b_debug, __kmp_stg_print_b_debug, NULL, 0, 0 }, - { "KMP_C_DEBUG", __kmp_stg_parse_c_debug, __kmp_stg_print_c_debug, NULL, 0, 0 }, - { "KMP_D_DEBUG", __kmp_stg_parse_d_debug, __kmp_stg_print_d_debug, NULL, 0, 0 }, - { "KMP_E_DEBUG", __kmp_stg_parse_e_debug, __kmp_stg_print_e_debug, NULL, 0, 0 }, - { "KMP_F_DEBUG", __kmp_stg_parse_f_debug, __kmp_stg_print_f_debug, NULL, 0, 0 }, - { "KMP_DEBUG", __kmp_stg_parse_debug, NULL, /* no print */ NULL, 0, 0 }, - { "KMP_DEBUG_BUF", __kmp_stg_parse_debug_buf, __kmp_stg_print_debug_buf, NULL, 0, 0 }, - { "KMP_DEBUG_BUF_ATOMIC", __kmp_stg_parse_debug_buf_atomic, __kmp_stg_print_debug_buf_atomic, NULL, 0, 0 }, - { "KMP_DEBUG_BUF_CHARS", __kmp_stg_parse_debug_buf_chars, __kmp_stg_print_debug_buf_chars, NULL, 0, 0 }, - { "KMP_DEBUG_BUF_LINES", __kmp_stg_parse_debug_buf_lines, __kmp_stg_print_debug_buf_lines, NULL, 0, 0 }, - { "KMP_DIAG", __kmp_stg_parse_diag, __kmp_stg_print_diag, NULL, 0, 0 }, - - { "KMP_PAR_RANGE", __kmp_stg_parse_par_range_env, __kmp_stg_print_par_range_env, NULL, 0, 0 }, -#if KMP_USE_MONITOR - { "KMP_YIELD_CYCLE", __kmp_stg_parse_yield_cycle, __kmp_stg_print_yield_cycle, NULL, 0, 0 }, - { "KMP_YIELD_ON", __kmp_stg_parse_yield_on, __kmp_stg_print_yield_on, NULL, 0, 0 }, - { "KMP_YIELD_OFF", __kmp_stg_parse_yield_off, __kmp_stg_print_yield_off, NULL, 0, 0 }, -#endif -#endif // KMP_DEBUG - - { "KMP_ALIGN_ALLOC", __kmp_stg_parse_align_alloc, __kmp_stg_print_align_alloc, NULL, 0, 0 }, - - { "KMP_PLAIN_BARRIER", __kmp_stg_parse_barrier_branch_bit, __kmp_stg_print_barrier_branch_bit, NULL, 0, 0 }, - { "KMP_PLAIN_BARRIER_PATTERN", __kmp_stg_parse_barrier_pattern, __kmp_stg_print_barrier_pattern, NULL, 0, 0 }, - { "KMP_FORKJOIN_BARRIER", __kmp_stg_parse_barrier_branch_bit, __kmp_stg_print_barrier_branch_bit, NULL, 0, 0 }, - { "KMP_FORKJOIN_BARRIER_PATTERN", __kmp_stg_parse_barrier_pattern, __kmp_stg_print_barrier_pattern, NULL, 0, 0 }, -#if KMP_FAST_REDUCTION_BARRIER - { "KMP_REDUCTION_BARRIER", __kmp_stg_parse_barrier_branch_bit, __kmp_stg_print_barrier_branch_bit, NULL, 0, 0 }, - { "KMP_REDUCTION_BARRIER_PATTERN", __kmp_stg_parse_barrier_pattern, __kmp_stg_print_barrier_pattern, NULL, 0, 0 }, -#endif - - { "KMP_ABORT_DELAY", __kmp_stg_parse_abort_delay, __kmp_stg_print_abort_delay, NULL, 0, 0 }, - { "KMP_CPUINFO_FILE", __kmp_stg_parse_cpuinfo_file, __kmp_stg_print_cpuinfo_file, NULL, 0, 0 }, - { "KMP_FORCE_REDUCTION", __kmp_stg_parse_force_reduction, __kmp_stg_print_force_reduction, NULL, 0, 0 }, - { "KMP_DETERMINISTIC_REDUCTION", __kmp_stg_parse_force_reduction, __kmp_stg_print_force_reduction, NULL, 0, 0 }, - { "KMP_STORAGE_MAP", __kmp_stg_parse_storage_map, __kmp_stg_print_storage_map, NULL, 0, 0 }, - { "KMP_ALL_THREADPRIVATE", __kmp_stg_parse_all_threadprivate, __kmp_stg_print_all_threadprivate, NULL, 0, 0 }, - { "KMP_FOREIGN_THREADS_THREADPRIVATE", __kmp_stg_parse_foreign_threads_threadprivate, __kmp_stg_print_foreign_threads_threadprivate, NULL, 0, 0 }, - -#if KMP_AFFINITY_SUPPORTED - { "KMP_AFFINITY", __kmp_stg_parse_affinity, __kmp_stg_print_affinity, NULL, 0, 0 }, -# ifdef KMP_GOMP_COMPAT - { "GOMP_CPU_AFFINITY", __kmp_stg_parse_gomp_cpu_affinity, NULL, /* no print */ NULL, 0, 0 }, -# endif /* KMP_GOMP_COMPAT */ -# if OMP_40_ENABLED - { "OMP_PROC_BIND", __kmp_stg_parse_proc_bind, __kmp_stg_print_proc_bind, NULL, 0, 0 }, - { "OMP_PLACES", __kmp_stg_parse_places, __kmp_stg_print_places, NULL, 0, 0 }, -# else - { "OMP_PROC_BIND", __kmp_stg_parse_proc_bind, NULL, /* no print */ NULL, 0, 0 }, -# endif /* OMP_40_ENABLED */ - - { "KMP_TOPOLOGY_METHOD", __kmp_stg_parse_topology_method, __kmp_stg_print_topology_method, NULL, 0, 0 }, - -#else - - // - // KMP_AFFINITY is not supported on OS X*, nor is OMP_PLACES. - // OMP_PROC_BIND and proc-bind-var are supported, however. - // -# if OMP_40_ENABLED - { "OMP_PROC_BIND", __kmp_stg_parse_proc_bind, __kmp_stg_print_proc_bind, NULL, 0, 0 }, -# endif - -#endif // KMP_AFFINITY_SUPPORTED - - { "KMP_INIT_AT_FORK", __kmp_stg_parse_init_at_fork, __kmp_stg_print_init_at_fork, NULL, 0, 0 }, - { "KMP_SCHEDULE", __kmp_stg_parse_schedule, __kmp_stg_print_schedule, NULL, 0, 0 }, - { "OMP_SCHEDULE", __kmp_stg_parse_omp_schedule, __kmp_stg_print_omp_schedule, NULL, 0, 0 }, - { "KMP_ATOMIC_MODE", __kmp_stg_parse_atomic_mode, __kmp_stg_print_atomic_mode, NULL, 0, 0 }, - { "KMP_CONSISTENCY_CHECK", __kmp_stg_parse_consistency_check, __kmp_stg_print_consistency_check, NULL, 0, 0 }, - -#if USE_ITT_BUILD && USE_ITT_NOTIFY - { "KMP_ITT_PREPARE_DELAY", __kmp_stg_parse_itt_prepare_delay, __kmp_stg_print_itt_prepare_delay, NULL, 0, 0 }, -#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */ - { "KMP_MALLOC_POOL_INCR", __kmp_stg_parse_malloc_pool_incr, __kmp_stg_print_malloc_pool_incr, NULL, 0, 0 }, - { "KMP_INIT_WAIT", __kmp_stg_parse_init_wait, __kmp_stg_print_init_wait, NULL, 0, 0 }, - { "KMP_NEXT_WAIT", __kmp_stg_parse_next_wait, __kmp_stg_print_next_wait, NULL, 0, 0 }, - { "KMP_GTID_MODE", __kmp_stg_parse_gtid_mode, __kmp_stg_print_gtid_mode, NULL, 0, 0 }, - { "OMP_DYNAMIC", __kmp_stg_parse_omp_dynamic, __kmp_stg_print_omp_dynamic, NULL, 0, 0 }, - { "KMP_DYNAMIC_MODE", __kmp_stg_parse_kmp_dynamic_mode, __kmp_stg_print_kmp_dynamic_mode, NULL, 0, 0 }, - -#ifdef USE_LOAD_BALANCE - { "KMP_LOAD_BALANCE_INTERVAL", __kmp_stg_parse_ld_balance_interval,__kmp_stg_print_ld_balance_interval,NULL, 0, 0 }, -#endif - - { "KMP_NUM_LOCKS_IN_BLOCK", __kmp_stg_parse_lock_block, __kmp_stg_print_lock_block, NULL, 0, 0 }, - { "KMP_LOCK_KIND", __kmp_stg_parse_lock_kind, __kmp_stg_print_lock_kind, NULL, 0, 0 }, - { "KMP_SPIN_BACKOFF_PARAMS", __kmp_stg_parse_spin_backoff_params, __kmp_stg_print_spin_backoff_params, NULL, 0, 0 }, -#if KMP_USE_ADAPTIVE_LOCKS - { "KMP_ADAPTIVE_LOCK_PROPS", __kmp_stg_parse_adaptive_lock_props,__kmp_stg_print_adaptive_lock_props, NULL, 0, 0 }, -#if KMP_DEBUG_ADAPTIVE_LOCKS - { "KMP_SPECULATIVE_STATSFILE", __kmp_stg_parse_speculative_statsfile,__kmp_stg_print_speculative_statsfile, NULL, 0, 0 }, -#endif -#endif // KMP_USE_ADAPTIVE_LOCKS - { "KMP_PLACE_THREADS", __kmp_stg_parse_hw_subset, __kmp_stg_print_hw_subset, NULL, 0, 0 }, - { "KMP_HW_SUBSET", __kmp_stg_parse_hw_subset, __kmp_stg_print_hw_subset, NULL, 0, 0 }, -#if USE_ITT_BUILD - { "KMP_FORKJOIN_FRAMES", __kmp_stg_parse_forkjoin_frames, __kmp_stg_print_forkjoin_frames, NULL, 0, 0 }, - { "KMP_FORKJOIN_FRAMES_MODE", __kmp_stg_parse_forkjoin_frames_mode,__kmp_stg_print_forkjoin_frames_mode, NULL, 0, 0 }, -#endif - -# if OMP_40_ENABLED - { "OMP_DISPLAY_ENV", __kmp_stg_parse_omp_display_env, __kmp_stg_print_omp_display_env, NULL, 0, 0 }, - { "OMP_CANCELLATION", __kmp_stg_parse_omp_cancellation, __kmp_stg_print_omp_cancellation, NULL, 0, 0 }, -#endif - { "", NULL, NULL, NULL, 0, 0 } -}; // settings - -static int const __kmp_stg_count = sizeof( __kmp_stg_table ) / sizeof( kmp_setting_t ); - -static inline -kmp_setting_t * -__kmp_stg_find( char const * name ) { - - int i; - if ( name != NULL ) { - for ( i = 0; i < __kmp_stg_count; ++ i ) { - if ( strcmp( __kmp_stg_table[ i ].name, name ) == 0 ) { - return & __kmp_stg_table[ i ]; - }; // if - }; // for - }; // if - return NULL; - -} // __kmp_stg_find - - -static int -__kmp_stg_cmp( void const * _a, void const * _b ) { - kmp_setting_t * a = (kmp_setting_t *) _a; - kmp_setting_t * b = (kmp_setting_t *) _b; - - // - // Process KMP_AFFINITY last. - // It needs to come after OMP_PLACES and GOMP_CPU_AFFINITY. - // - if ( strcmp( a->name, "KMP_AFFINITY" ) == 0 ) { - if ( strcmp( b->name, "KMP_AFFINITY" ) == 0 ) { - return 0; - } - return 1; - } - else if ( strcmp( b->name, "KMP_AFFINITY" ) == 0 ) { - return -1; - } - return strcmp( a->name, b->name ); -} // __kmp_stg_cmp - - -static void -__kmp_stg_init( void -) { - - static int initialized = 0; - - if ( ! initialized ) { - - // Sort table. - qsort( __kmp_stg_table, __kmp_stg_count - 1, sizeof( kmp_setting_t ), __kmp_stg_cmp ); - - { // Initialize *_STACKSIZE data. - - kmp_setting_t * kmp_stacksize = __kmp_stg_find( "KMP_STACKSIZE" ); // 1st priority. -#ifdef KMP_GOMP_COMPAT - kmp_setting_t * gomp_stacksize = __kmp_stg_find( "GOMP_STACKSIZE" ); // 2nd priority. -#endif - kmp_setting_t * omp_stacksize = __kmp_stg_find( "OMP_STACKSIZE" ); // 3rd priority. - - // !!! volatile keyword is Intel (R) C Compiler bug CQ49908 workaround. - // !!! Compiler does not understand rivals is used and optimizes out assignments - // !!! rivals[ i ++ ] = ...; - static kmp_setting_t * volatile rivals[ 4 ]; - static kmp_stg_ss_data_t kmp_data = { 1, (kmp_setting_t **)rivals }; -#ifdef KMP_GOMP_COMPAT - static kmp_stg_ss_data_t gomp_data = { 1024, (kmp_setting_t **)rivals }; -#endif - static kmp_stg_ss_data_t omp_data = { 1024, (kmp_setting_t **)rivals }; - int i = 0; - - rivals[ i ++ ] = kmp_stacksize; -#ifdef KMP_GOMP_COMPAT - if ( gomp_stacksize != NULL ) { - rivals[ i ++ ] = gomp_stacksize; - }; // if -#endif - rivals[ i ++ ] = omp_stacksize; - rivals[ i ++ ] = NULL; - - kmp_stacksize->data = & kmp_data; -#ifdef KMP_GOMP_COMPAT - if ( gomp_stacksize != NULL ) { - gomp_stacksize->data = & gomp_data; - }; // if -#endif - omp_stacksize->data = & omp_data; - - } - - { // Initialize KMP_LIBRARY and OMP_WAIT_POLICY data. - - kmp_setting_t * kmp_library = __kmp_stg_find( "KMP_LIBRARY" ); // 1st priority. - kmp_setting_t * omp_wait_policy = __kmp_stg_find( "OMP_WAIT_POLICY" ); // 2nd priority. - - // !!! volatile keyword is Intel (R) C Compiler bug CQ49908 workaround. - static kmp_setting_t * volatile rivals[ 3 ]; - static kmp_stg_wp_data_t kmp_data = { 0, (kmp_setting_t **)rivals }; - static kmp_stg_wp_data_t omp_data = { 1, (kmp_setting_t **)rivals }; - int i = 0; - - rivals[ i ++ ] = kmp_library; - if ( omp_wait_policy != NULL ) { - rivals[ i ++ ] = omp_wait_policy; - }; // if - rivals[ i ++ ] = NULL; - - kmp_library->data = & kmp_data; - if ( omp_wait_policy != NULL ) { - omp_wait_policy->data = & omp_data; - }; // if - - } - - { // Initialize KMP_ALL_THREADS, KMP_MAX_THREADS, and OMP_THREAD_LIMIT data. - - kmp_setting_t * kmp_all_threads = __kmp_stg_find( "KMP_ALL_THREADS" ); // 1st priority. - kmp_setting_t * kmp_max_threads = __kmp_stg_find( "KMP_MAX_THREADS" ); // 2nd priority. - kmp_setting_t * omp_thread_limit = __kmp_stg_find( "OMP_THREAD_LIMIT" ); // 3rd priority. - - // !!! volatile keyword is Intel (R) C Compiler bug CQ49908 workaround. - static kmp_setting_t * volatile rivals[ 4 ]; - int i = 0; - - rivals[ i ++ ] = kmp_all_threads; - rivals[ i ++ ] = kmp_max_threads; - if ( omp_thread_limit != NULL ) { - rivals[ i ++ ] = omp_thread_limit; - }; // if - rivals[ i ++ ] = NULL; - - kmp_all_threads->data = (void*)& rivals; - kmp_max_threads->data = (void*)& rivals; - if ( omp_thread_limit != NULL ) { - omp_thread_limit->data = (void*)& rivals; - }; // if - - } - -#if KMP_AFFINITY_SUPPORTED - { // Initialize KMP_AFFINITY, GOMP_CPU_AFFINITY, and OMP_PROC_BIND data. - - kmp_setting_t * kmp_affinity = __kmp_stg_find( "KMP_AFFINITY" ); // 1st priority. - KMP_DEBUG_ASSERT( kmp_affinity != NULL ); - -# ifdef KMP_GOMP_COMPAT - kmp_setting_t * gomp_cpu_affinity = __kmp_stg_find( "GOMP_CPU_AFFINITY" ); // 2nd priority. - KMP_DEBUG_ASSERT( gomp_cpu_affinity != NULL ); -# endif - - kmp_setting_t * omp_proc_bind = __kmp_stg_find( "OMP_PROC_BIND" ); // 3rd priority. - KMP_DEBUG_ASSERT( omp_proc_bind != NULL ); - - // !!! volatile keyword is Intel (R) C Compiler bug CQ49908 workaround. - static kmp_setting_t * volatile rivals[ 4 ]; - int i = 0; - - rivals[ i ++ ] = kmp_affinity; - -# ifdef KMP_GOMP_COMPAT - rivals[ i ++ ] = gomp_cpu_affinity; - gomp_cpu_affinity->data = (void*)& rivals; -# endif - - rivals[ i ++ ] = omp_proc_bind; - omp_proc_bind->data = (void*)& rivals; - rivals[ i ++ ] = NULL; - -# if OMP_40_ENABLED - static kmp_setting_t * volatile places_rivals[ 4 ]; - i = 0; - - kmp_setting_t * omp_places = __kmp_stg_find( "OMP_PLACES" ); // 3rd priority. - KMP_DEBUG_ASSERT( omp_places != NULL ); - - places_rivals[ i ++ ] = kmp_affinity; -# ifdef KMP_GOMP_COMPAT - places_rivals[ i ++ ] = gomp_cpu_affinity; -# endif - places_rivals[ i ++ ] = omp_places; - omp_places->data = (void*)& places_rivals; - places_rivals[ i ++ ] = NULL; -# endif - } -#else - // KMP_AFFINITY not supported, so OMP_PROC_BIND has no rivals. - // OMP_PLACES not supported yet. -#endif // KMP_AFFINITY_SUPPORTED - - { // Initialize KMP_DETERMINISTIC_REDUCTION and KMP_FORCE_REDUCTION data. - - kmp_setting_t * kmp_force_red = __kmp_stg_find( "KMP_FORCE_REDUCTION" ); // 1st priority. - kmp_setting_t * kmp_determ_red = __kmp_stg_find( "KMP_DETERMINISTIC_REDUCTION" ); // 2nd priority. - - // !!! volatile keyword is Intel (R) C Compiler bug CQ49908 workaround. - static kmp_setting_t * volatile rivals[ 3 ]; - static kmp_stg_fr_data_t force_data = { 1, (kmp_setting_t **)rivals }; - static kmp_stg_fr_data_t determ_data = { 0, (kmp_setting_t **)rivals }; - int i = 0; - - rivals[ i ++ ] = kmp_force_red; - if ( kmp_determ_red != NULL ) { - rivals[ i ++ ] = kmp_determ_red; - }; // if - rivals[ i ++ ] = NULL; - - kmp_force_red->data = & force_data; - if ( kmp_determ_red != NULL ) { - kmp_determ_red->data = & determ_data; - }; // if - } - - initialized = 1; - - }; // if - - // Reset flags. - int i; - for ( i = 0; i < __kmp_stg_count; ++ i ) { - __kmp_stg_table[ i ].set = 0; - }; // for - -} // __kmp_stg_init - - -static void -__kmp_stg_parse( - char const * name, - char const * value -) { - - // On Windows* OS there are some nameless variables like "C:=C:\" (yeah, really nameless, they are - // presented in environment block as "=C:=C\\\x00=D:=D:\\\x00...", so let us skip them. - if ( name[ 0 ] == 0 ) { - return; - }; // if - - if ( value != NULL ) { - kmp_setting_t * setting = __kmp_stg_find( name ); - if ( setting != NULL ) { - setting->parse( name, value, setting->data ); - setting->defined = 1; - }; // if - }; // if - -} // __kmp_stg_parse - - -static int -__kmp_stg_check_rivals( // 0 -- Ok, 1 -- errors found. - char const * name, // Name of variable. - char const * value, // Value of the variable. - kmp_setting_t * * rivals // List of rival settings (the list must include current one). -) { - - if ( rivals == NULL ) { - return 0; - } - - // Loop thru higher priority settings (listed before current). - int i = 0; - for ( ; strcmp( rivals[ i ]->name, name ) != 0; i++ ) { - KMP_DEBUG_ASSERT( rivals[ i ] != NULL ); - -#if KMP_AFFINITY_SUPPORTED - if ( rivals[ i ] == __kmp_affinity_notype ) { - // - // If KMP_AFFINITY is specified without a type name, - // it does not rival OMP_PROC_BIND or GOMP_CPU_AFFINITY. - // - continue; - } -#endif - - if ( rivals[ i ]->set ) { - KMP_WARNING( StgIgnored, name, rivals[ i ]->name ); - return 1; - }; // if - }; // while - - ++ i; // Skip current setting. - return 0; - -}; // __kmp_stg_check_rivals - - -static int -__kmp_env_toPrint( char const * name, int flag ) { - int rc = 0; - kmp_setting_t * setting = __kmp_stg_find( name ); - if ( setting != NULL ) { - rc = setting->defined; - if ( flag >= 0 ) { - setting->defined = flag; - }; // if - }; // if - return rc; -} - - -static void -__kmp_aux_env_initialize( kmp_env_blk_t* block ) { - - char const * value; - - /* OMP_NUM_THREADS */ - value = __kmp_env_blk_var( block, "OMP_NUM_THREADS" ); - if ( value ) { - ompc_set_num_threads( __kmp_dflt_team_nth ); - } - - /* KMP_BLOCKTIME */ - value = __kmp_env_blk_var( block, "KMP_BLOCKTIME" ); - if ( value ) { - kmpc_set_blocktime( __kmp_dflt_blocktime ); - } - - /* OMP_NESTED */ - value = __kmp_env_blk_var( block, "OMP_NESTED" ); - if ( value ) { - ompc_set_nested( __kmp_dflt_nested ); - } - - /* OMP_DYNAMIC */ - value = __kmp_env_blk_var( block, "OMP_DYNAMIC" ); - if ( value ) { - ompc_set_dynamic( __kmp_global.g.g_dynamic ); - } - -} - -void -__kmp_env_initialize( char const * string ) { - - kmp_env_blk_t block; - int i; - - __kmp_stg_init(); - - // Hack!!! - if ( string == NULL ) { - // __kmp_max_nth = __kmp_sys_max_nth; - __kmp_threads_capacity = __kmp_initial_threads_capacity( __kmp_dflt_team_nth_ub ); - }; // if - __kmp_env_blk_init( & block, string ); - - // - // update the set flag on all entries that have an env var - // - for ( i = 0; i < block.count; ++ i ) { - if (( block.vars[ i ].name == NULL ) - || ( *block.vars[ i ].name == '\0')) { - continue; - } - if ( block.vars[ i ].value == NULL ) { - continue; - } - kmp_setting_t * setting = __kmp_stg_find( block.vars[ i ].name ); - if ( setting != NULL ) { - setting->set = 1; - } - }; // for i - - // We need to know if blocktime was set when processing OMP_WAIT_POLICY - blocktime_str = __kmp_env_blk_var( & block, "KMP_BLOCKTIME" ); - - // Special case. If we parse environment, not a string, process KMP_WARNINGS first. - if ( string == NULL ) { - char const * name = "KMP_WARNINGS"; - char const * value = __kmp_env_blk_var( & block, name ); - __kmp_stg_parse( name, value ); - }; // if - -#if KMP_AFFINITY_SUPPORTED - // - // Special case. KMP_AFFINITY is not a rival to other affinity env vars - // if no affinity type is specified. We want to allow - // KMP_AFFINITY=[no],verbose/[no]warnings/etc. to be enabled when - // specifying the affinity type via GOMP_CPU_AFFINITY or the OMP 4.0 - // affinity mechanism. - // - __kmp_affinity_notype = NULL; - char const *aff_str = __kmp_env_blk_var( & block, "KMP_AFFINITY" ); - if ( aff_str != NULL ) { - // - // Check if the KMP_AFFINITY type is specified in the string. - // We just search the string for "compact", "scatter", etc. - // without really parsing the string. The syntax of the - // KMP_AFFINITY env var is such that none of the affinity - // type names can appear anywhere other that the type - // specifier, even as substrings. - // - // I can't find a case-insensitive version of strstr on Windows* OS. - // Use the case-sensitive version for now. - // - -# if KMP_OS_WINDOWS -# define FIND strstr -# else -# define FIND strcasestr -# endif - - if ( ( FIND( aff_str, "none" ) == NULL ) - && ( FIND( aff_str, "physical" ) == NULL ) - && ( FIND( aff_str, "logical" ) == NULL ) - && ( FIND( aff_str, "compact" ) == NULL ) - && ( FIND( aff_str, "scatter" ) == NULL ) - && ( FIND( aff_str, "explicit" ) == NULL ) - && ( FIND( aff_str, "balanced" ) == NULL ) - && ( FIND( aff_str, "disabled" ) == NULL ) ) { - __kmp_affinity_notype = __kmp_stg_find( "KMP_AFFINITY" ); - } - else { - // - // A new affinity type is specified. - // Reset the affinity flags to their default values, - // in case this is called from kmp_set_defaults(). - // - __kmp_affinity_type = affinity_default; - __kmp_affinity_gran = affinity_gran_default; - __kmp_affinity_top_method = affinity_top_method_default; - __kmp_affinity_respect_mask = affinity_respect_mask_default; - } -# undef FIND - -#if OMP_40_ENABLED - // - // Also reset the affinity flags if OMP_PROC_BIND is specified. - // - aff_str = __kmp_env_blk_var( & block, "OMP_PROC_BIND" ); - if ( aff_str != NULL ) { - __kmp_affinity_type = affinity_default; - __kmp_affinity_gran = affinity_gran_default; - __kmp_affinity_top_method = affinity_top_method_default; - __kmp_affinity_respect_mask = affinity_respect_mask_default; - } -#endif /* OMP_40_ENABLED */ - } - -#endif /* KMP_AFFINITY_SUPPORTED */ - -#if OMP_40_ENABLED - // - // Set up the nested proc bind type vector. - // - if ( __kmp_nested_proc_bind.bind_types == NULL ) { - __kmp_nested_proc_bind.bind_types = (kmp_proc_bind_t *) - KMP_INTERNAL_MALLOC( sizeof(kmp_proc_bind_t) ); - if ( __kmp_nested_proc_bind.bind_types == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - } - __kmp_nested_proc_bind.size = 1; - __kmp_nested_proc_bind.used = 1; -# if KMP_AFFINITY_SUPPORTED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_default; -# else - // default proc bind is false if affinity not supported - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; -# endif - - } -#endif /* OMP_40_ENABLED */ - - // - // Now process all of the settings. - // - for ( i = 0; i < block.count; ++ i ) { - __kmp_stg_parse( block.vars[ i ].name, block.vars[ i ].value ); - }; // for i - - // - // If user locks have been allocated yet, don't reset the lock vptr table. - // - if ( ! __kmp_init_user_locks ) { - if ( __kmp_user_lock_kind == lk_default ) { - __kmp_user_lock_kind = lk_queuing; - } -#if KMP_USE_DYNAMIC_LOCK - __kmp_init_dynamic_user_locks(); -#else - __kmp_set_user_lock_vptrs( __kmp_user_lock_kind ); -#endif - } - else { - KMP_DEBUG_ASSERT( string != NULL); // kmp_set_defaults() was called - KMP_DEBUG_ASSERT( __kmp_user_lock_kind != lk_default ); - // Binds lock functions again to follow the transition between different - // KMP_CONSISTENCY_CHECK values. Calling this again is harmless as long - // as we do not allow lock kind changes after making a call to any - // user lock functions (true). -#if KMP_USE_DYNAMIC_LOCK - __kmp_init_dynamic_user_locks(); -#else - __kmp_set_user_lock_vptrs( __kmp_user_lock_kind ); -#endif - } - -#if KMP_AFFINITY_SUPPORTED - - if ( ! TCR_4(__kmp_init_middle) ) { - // - // Determine if the machine/OS is actually capable of supporting - // affinity. - // - const char *var = "KMP_AFFINITY"; - KMPAffinity::pick_api(); - if ( __kmp_affinity_type == affinity_disabled ) { - KMP_AFFINITY_DISABLE(); - } - else if ( ! KMP_AFFINITY_CAPABLE() ) { - __kmp_affinity_dispatch->determine_capable(var); - if ( ! KMP_AFFINITY_CAPABLE() ) { - if ( __kmp_affinity_verbose || ( __kmp_affinity_warnings - && ( __kmp_affinity_type != affinity_default ) - && ( __kmp_affinity_type != affinity_none ) - && ( __kmp_affinity_type != affinity_disabled ) ) ) { - KMP_WARNING( AffNotSupported, var ); - } - __kmp_affinity_type = affinity_disabled; - __kmp_affinity_respect_mask = 0; - __kmp_affinity_gran = affinity_gran_fine; - } - } - -# if OMP_40_ENABLED - if ( __kmp_affinity_type == affinity_disabled ) { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; - } - else if ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_true ) { - // - // OMP_PROC_BIND=true maps to OMP_PROC_BIND=spread. - // - __kmp_nested_proc_bind.bind_types[0] = proc_bind_spread; - } -# endif /* OMP_40_ENABLED */ - - if ( KMP_AFFINITY_CAPABLE() ) { - -# if KMP_GROUP_AFFINITY - - // - // Handle the Win 64 group affinity stuff if there are multiple - // processor groups, or if the user requested it, and OMP 4.0 - // affinity is not in effect. - // - if ( ( ( __kmp_num_proc_groups > 1 ) - && ( __kmp_affinity_type == affinity_default ) -# if OMP_40_ENABLED - && ( __kmp_nested_proc_bind.bind_types[0] == proc_bind_default ) ) -# endif - || ( __kmp_affinity_top_method == affinity_top_method_group ) ) { - if ( __kmp_affinity_respect_mask == affinity_respect_mask_default ) { - __kmp_affinity_respect_mask = FALSE; - } - if ( __kmp_affinity_type == affinity_default ) { - __kmp_affinity_type = affinity_compact; -# if OMP_40_ENABLED - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; -# endif - } - if ( __kmp_affinity_top_method == affinity_top_method_default ) { - if ( __kmp_affinity_gran == affinity_gran_default ) { - __kmp_affinity_top_method = affinity_top_method_group; - __kmp_affinity_gran = affinity_gran_group; - } - else if ( __kmp_affinity_gran == affinity_gran_group ) { - __kmp_affinity_top_method = affinity_top_method_group; - } - else { - __kmp_affinity_top_method = affinity_top_method_all; - } - } - else if ( __kmp_affinity_top_method == affinity_top_method_group ) { - if ( __kmp_affinity_gran == affinity_gran_default ) { - __kmp_affinity_gran = affinity_gran_group; - } - else if ( ( __kmp_affinity_gran != affinity_gran_group ) - && ( __kmp_affinity_gran != affinity_gran_fine ) - && ( __kmp_affinity_gran != affinity_gran_thread ) ) { - const char *str = NULL; - switch ( __kmp_affinity_gran ) { - case affinity_gran_core: str = "core"; break; - case affinity_gran_package: str = "package"; break; - case affinity_gran_node: str = "node"; break; - default: KMP_DEBUG_ASSERT( 0 ); - } - KMP_WARNING( AffGranTopGroup, var, str ); - __kmp_affinity_gran = affinity_gran_fine; - } - } - else { - if ( __kmp_affinity_gran == affinity_gran_default ) { - __kmp_affinity_gran = affinity_gran_core; - } - else if ( __kmp_affinity_gran == affinity_gran_group ) { - const char *str = NULL; - switch ( __kmp_affinity_type ) { - case affinity_physical: str = "physical"; break; - case affinity_logical: str = "logical"; break; - case affinity_compact: str = "compact"; break; - case affinity_scatter: str = "scatter"; break; - case affinity_explicit: str = "explicit"; break; - // No MIC on windows, so no affinity_balanced case - default: KMP_DEBUG_ASSERT( 0 ); - } - KMP_WARNING( AffGranGroupType, var, str ); - __kmp_affinity_gran = affinity_gran_core; - } - } - } - else - -# endif /* KMP_GROUP_AFFINITY */ - - { - if ( __kmp_affinity_respect_mask == affinity_respect_mask_default ) { -# if KMP_GROUP_AFFINITY - if ( __kmp_num_proc_groups > 1 ) { - __kmp_affinity_respect_mask = FALSE; - } - else -# endif /* KMP_GROUP_AFFINITY */ - { - __kmp_affinity_respect_mask = TRUE; - } - } -# if OMP_40_ENABLED - if ( ( __kmp_nested_proc_bind.bind_types[0] != proc_bind_intel ) - && ( __kmp_nested_proc_bind.bind_types[0] != proc_bind_default ) ) { - if ( __kmp_affinity_type == affinity_default ) { - __kmp_affinity_type = affinity_compact; - __kmp_affinity_dups = FALSE; - } - } - else -# endif /* OMP_40_ENABLED */ - if ( __kmp_affinity_type == affinity_default ) { -#if OMP_40_ENABLED -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_intel; - } else -#endif - { - __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; - } -#endif /* OMP_40_ENABLED */ -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) { - __kmp_affinity_type = affinity_scatter; - } else -#endif - { - __kmp_affinity_type = affinity_none; - } - - } - if ( ( __kmp_affinity_gran == affinity_gran_default ) - && ( __kmp_affinity_gran_levels < 0 ) ) { -#if KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) - if( __kmp_mic_type != non_mic ) { - __kmp_affinity_gran = affinity_gran_fine; - } else -#endif - { - __kmp_affinity_gran = affinity_gran_core; - } - } - if ( __kmp_affinity_top_method == affinity_top_method_default ) { - __kmp_affinity_top_method = affinity_top_method_all; - } - } - } - - K_DIAG( 1, ( "__kmp_affinity_type == %d\n", __kmp_affinity_type ) ); - K_DIAG( 1, ( "__kmp_affinity_compact == %d\n", __kmp_affinity_compact ) ); - K_DIAG( 1, ( "__kmp_affinity_offset == %d\n", __kmp_affinity_offset ) ); - K_DIAG( 1, ( "__kmp_affinity_verbose == %d\n", __kmp_affinity_verbose ) ); - K_DIAG( 1, ( "__kmp_affinity_warnings == %d\n", __kmp_affinity_warnings ) ); - K_DIAG( 1, ( "__kmp_affinity_respect_mask == %d\n", __kmp_affinity_respect_mask ) ); - K_DIAG( 1, ( "__kmp_affinity_gran == %d\n", __kmp_affinity_gran ) ); - - KMP_DEBUG_ASSERT( __kmp_affinity_type != affinity_default); -# if OMP_40_ENABLED - KMP_DEBUG_ASSERT( __kmp_nested_proc_bind.bind_types[0] != proc_bind_default ); -# endif - } - -#endif /* KMP_AFFINITY_SUPPORTED */ - - if ( __kmp_version ) { - __kmp_print_version_1(); - }; // if - - // Post-initialization step: some env. vars need their value's further processing - if ( string != NULL) { // kmp_set_defaults() was called - __kmp_aux_env_initialize( &block ); - } - - __kmp_env_blk_free( & block ); - - KMP_MB(); - -} // __kmp_env_initialize - - -void -__kmp_env_print() { - - kmp_env_blk_t block; - int i; - kmp_str_buf_t buffer; - - __kmp_stg_init(); - __kmp_str_buf_init( & buffer ); - - __kmp_env_blk_init( & block, NULL ); - __kmp_env_blk_sort( & block ); - - // Print real environment values. - __kmp_str_buf_print( & buffer, "\n%s\n\n", KMP_I18N_STR( UserSettings ) ); - for ( i = 0; i < block.count; ++ i ) { - char const * name = block.vars[ i ].name; - char const * value = block.vars[ i ].value; - if ( - ( KMP_STRLEN( name ) > 4 && strncmp( name, "KMP_", 4 ) == 0 ) - || strncmp( name, "OMP_", 4 ) == 0 - #ifdef KMP_GOMP_COMPAT - || strncmp( name, "GOMP_", 5 ) == 0 - #endif // KMP_GOMP_COMPAT - ) { - __kmp_str_buf_print( & buffer, " %s=%s\n", name, value ); - }; // if - }; // for - __kmp_str_buf_print( & buffer, "\n" ); - - // Print internal (effective) settings. - __kmp_str_buf_print( & buffer, "%s\n\n", KMP_I18N_STR( EffectiveSettings ) ); - for ( int i = 0; i < __kmp_stg_count; ++ i ) { - if ( __kmp_stg_table[ i ].print != NULL ) { - __kmp_stg_table[ i ].print( & buffer, __kmp_stg_table[ i ].name, __kmp_stg_table[ i ].data ); - }; // if - }; // for - - __kmp_printf( "%s", buffer.str ); - - __kmp_env_blk_free( & block ); - __kmp_str_buf_free( & buffer ); - - __kmp_printf("\n"); - -} // __kmp_env_print - - -#if OMP_40_ENABLED -void -__kmp_env_print_2() { - - kmp_env_blk_t block; - kmp_str_buf_t buffer; - - __kmp_env_format = 1; - - __kmp_stg_init(); - __kmp_str_buf_init( & buffer ); - - __kmp_env_blk_init( & block, NULL ); - __kmp_env_blk_sort( & block ); - - __kmp_str_buf_print( & buffer, "\n%s\n", KMP_I18N_STR( DisplayEnvBegin ) ); - __kmp_str_buf_print( & buffer, " _OPENMP='%d'\n", __kmp_openmp_version ); - - for ( int i = 0; i < __kmp_stg_count; ++ i ) { - if ( __kmp_stg_table[ i ].print != NULL && - ( ( __kmp_display_env && strncmp( __kmp_stg_table[ i ].name, "OMP_", 4 ) == 0 ) || __kmp_display_env_verbose ) ) { - __kmp_stg_table[ i ].print( & buffer, __kmp_stg_table[ i ].name, __kmp_stg_table[ i ].data ); - }; // if - }; // for - - __kmp_str_buf_print( & buffer, "%s\n", KMP_I18N_STR( DisplayEnvEnd ) ); - __kmp_str_buf_print( & buffer, "\n" ); - - __kmp_printf( "%s", buffer.str ); - - __kmp_env_blk_free( & block ); - __kmp_str_buf_free( & buffer ); - - __kmp_printf("\n"); - -} // __kmp_env_print_2 -#endif // OMP_40_ENABLED - -// end of file - Index: runtime/src/kmp_settings.cpp =================================================================== --- runtime/src/kmp_settings.cpp +++ runtime/src/kmp_settings.cpp @@ -1,5 +1,5 @@ /* - * kmp_settings.c -- Initialize environment variables + * kmp_settings.cpp -- Initialize environment variables */ Index: runtime/src/kmp_str.c =================================================================== --- runtime/src/kmp_str.c +++ runtime/src/kmp_str.c @@ -1,880 +0,0 @@ -/* - * kmp_str.c -- String manipulation routines. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp_str.h" - -#include // va_* -#include // vsnprintf() -#include // malloc(), realloc() - -#include "kmp.h" -#include "kmp_i18n.h" - -/* - ------------------------------------------------------------------------------------------------ - String buffer. - ------------------------------------------------------------------------------------------------ - - Usage: - - // Declare buffer and initialize it. - kmp_str_buf_t buffer; - __kmp_str_buf_init( & buffer ); - - // Print to buffer. - __kmp_str_buf_print( & buffer, "Error in file \"%s\" line %d\n", "foo.c", 12 ); - __kmp_str_buf_print( & buffer, " <%s>\n", line ); - - // Use buffer contents. buffer.str is a pointer to data, buffer.used is a number of printed - // characters (not including terminating zero). - write( fd, buffer.str, buffer.used ); - - // Free buffer. - __kmp_str_buf_free( & buffer ); - - // Alternatively, you can detach allocated memory from buffer: - __kmp_str_buf_detach( & buffer ); - return buffer.str; // That memory should be freed eventually. - - - Notes: - - * Buffer users may use buffer.str and buffer.used. Users should not change any fields of - buffer directly. - - * buffer.str is never NULL. If buffer is empty, buffer.str points to empty string (""). - - * For performance reasons, buffer uses stack memory (buffer.bulk) first. If stack memory is - exhausted, buffer allocates memory on heap by malloc(), and reallocates it by realloc() - as amount of used memory grows. - - * Buffer doubles amount of allocated memory each time it is exhausted. - - ------------------------------------------------------------------------------------------------ -*/ - -// TODO: __kmp_str_buf_print() can use thread local memory allocator. - -#define KMP_STR_BUF_INVARIANT( b ) \ - { \ - KMP_DEBUG_ASSERT( (b)->str != NULL ); \ - KMP_DEBUG_ASSERT( (b)->size >= sizeof( (b)->bulk ) ); \ - KMP_DEBUG_ASSERT( (b)->size % sizeof( (b)->bulk ) == 0 ); \ - KMP_DEBUG_ASSERT( (unsigned)(b)->used < (b)->size ); \ - KMP_DEBUG_ASSERT( (b)->size == sizeof( (b)->bulk ) ? (b)->str == & (b)->bulk[ 0 ] : 1 ); \ - KMP_DEBUG_ASSERT( (b)->size > sizeof( (b)->bulk ) ? (b)->str != & (b)->bulk[ 0 ] : 1 ); \ - } - -void - __kmp_str_buf_clear( - kmp_str_buf_t * buffer -) { - KMP_STR_BUF_INVARIANT( buffer ); - if ( buffer->used > 0 ) { - buffer->used = 0; - buffer->str[ 0 ] = 0; - }; // if - KMP_STR_BUF_INVARIANT( buffer ); -} // __kmp_str_buf_clear - - -void -__kmp_str_buf_reserve( - kmp_str_buf_t * buffer, - int size -) { - - KMP_STR_BUF_INVARIANT( buffer ); - KMP_DEBUG_ASSERT( size >= 0 ); - - if ( buffer->size < (unsigned int)size ) { - - // Calculate buffer size. - do { - buffer->size *= 2; - } while ( buffer->size < (unsigned int)size ); - - // Enlarge buffer. - if ( buffer->str == & buffer->bulk[ 0 ] ) { - buffer->str = (char *) KMP_INTERNAL_MALLOC( buffer->size ); - if ( buffer->str == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - KMP_MEMCPY_S( buffer->str, buffer->size, buffer->bulk, buffer->used + 1 ); - } else { - buffer->str = (char *) KMP_INTERNAL_REALLOC( buffer->str, buffer->size ); - if ( buffer->str == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - }; // if - - }; // if - - KMP_DEBUG_ASSERT( buffer->size > 0 ); - KMP_DEBUG_ASSERT( buffer->size >= (unsigned)size ); - KMP_STR_BUF_INVARIANT( buffer ); - -} // __kmp_str_buf_reserve - - -void -__kmp_str_buf_detach( - kmp_str_buf_t * buffer -) { - - KMP_STR_BUF_INVARIANT( buffer ); - - // If internal bulk is used, allocate memory and copy it. - if ( buffer->size <= sizeof( buffer->bulk ) ) { - buffer->str = (char *) KMP_INTERNAL_MALLOC( buffer->size ); - if ( buffer->str == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - KMP_MEMCPY_S( buffer->str, buffer->size, buffer->bulk, buffer->used + 1 ); - }; // if - -} // __kmp_str_buf_detach - - -void -__kmp_str_buf_free( - kmp_str_buf_t * buffer -) { - KMP_STR_BUF_INVARIANT( buffer ); - if ( buffer->size > sizeof( buffer->bulk ) ) { - KMP_INTERNAL_FREE( buffer->str ); - }; // if - buffer->str = buffer->bulk; - buffer->size = sizeof( buffer->bulk ); - buffer->used = 0; - KMP_STR_BUF_INVARIANT( buffer ); -} // __kmp_str_buf_free - - -void -__kmp_str_buf_cat( - kmp_str_buf_t * buffer, - char const * str, - int len -) { - KMP_STR_BUF_INVARIANT( buffer ); - KMP_DEBUG_ASSERT( str != NULL ); - KMP_DEBUG_ASSERT( len >= 0 ); - __kmp_str_buf_reserve( buffer, buffer->used + len + 1 ); - KMP_MEMCPY( buffer->str + buffer->used, str, len ); - buffer->str[ buffer->used + len ] = 0; - buffer->used += len; - KMP_STR_BUF_INVARIANT( buffer ); -} // __kmp_str_buf_cat - - -void -__kmp_str_buf_vprint( - kmp_str_buf_t * buffer, - char const * format, - va_list args -) { - - KMP_STR_BUF_INVARIANT( buffer ); - - for ( ; ; ) { - - int const free = buffer->size - buffer->used; - int rc; - int size; - - // Try to format string. - { - /* - On Linux* OS Intel(R) 64, vsnprintf() modifies args argument, so vsnprintf() crashes if it - is called for the second time with the same args. To prevent the crash, we have to - pass a fresh intact copy of args to vsnprintf() on each iteration. - - Unfortunately, standard va_copy() macro is not available on Windows* OS. However, it - seems vsnprintf() does not modify args argument on Windows* OS. - */ - - #if ! KMP_OS_WINDOWS - va_list _args; - __va_copy( _args, args ); // Make copy of args. - #define args _args // Substitute args with its copy, _args. - #endif // KMP_OS_WINDOWS - rc = KMP_VSNPRINTF( buffer->str + buffer->used, free, format, args ); - #if ! KMP_OS_WINDOWS - #undef args // Remove substitution. - va_end( _args ); - #endif // KMP_OS_WINDOWS - } - - // No errors, string has been formatted. - if ( rc >= 0 && rc < free ) { - buffer->used += rc; - break; - }; // if - - // Error occurred, buffer is too small. - if ( rc >= 0 ) { - // C99-conforming implementation of vsnprintf returns required buffer size. - size = buffer->used + rc + 1; - } else { - // Older implementations just return -1. Double buffer size. - size = buffer->size * 2; - }; // if - - // Enlarge buffer. - __kmp_str_buf_reserve( buffer, size ); - - // And try again. - - }; // forever - - KMP_DEBUG_ASSERT( buffer->size > 0 ); - KMP_STR_BUF_INVARIANT( buffer ); - -} // __kmp_str_buf_vprint - - -void -__kmp_str_buf_print( - kmp_str_buf_t * buffer, - char const * format, - ... -) { - - va_list args; - va_start( args, format ); - __kmp_str_buf_vprint( buffer, format, args ); - va_end( args ); - -} // __kmp_str_buf_print - - -/* - The function prints specified size to buffer. Size is expressed using biggest possible unit, for - example 1024 is printed as "1k". -*/ - -void -__kmp_str_buf_print_size( - kmp_str_buf_t * buf, - size_t size -) { - - char const * names[] = { "", "k", "M", "G", "T", "P", "E", "Z", "Y" }; - int const units = sizeof( names ) / sizeof( char const * ); - int u = 0; - if ( size > 0 ) { - while ( ( size % 1024 == 0 ) && ( u + 1 < units ) ) { - size = size / 1024; - ++ u; - }; // while - }; // if - - __kmp_str_buf_print( buf, "%" KMP_SIZE_T_SPEC "%s", size, names[ u ] ); - -} // __kmp_str_buf_print_size - - -void -__kmp_str_fname_init( - kmp_str_fname_t * fname, - char const * path -) { - - fname->path = NULL; - fname->dir = NULL; - fname->base = NULL; - - if ( path != NULL ) { - char * slash = NULL; // Pointer to the last character of dir. - char * base = NULL; // Pointer to the beginning of basename. - fname->path = __kmp_str_format( "%s", path ); - // Original code used strdup() function to copy a string, but on Windows* OS Intel(R) 64 it - // causes assertioon id debug heap, so I had to replace strdup with __kmp_str_format(). - if ( KMP_OS_WINDOWS ) { - __kmp_str_replace( fname->path, '\\', '/' ); - }; // if - fname->dir = __kmp_str_format( "%s", fname->path ); - slash = strrchr( fname->dir, '/' ); - if ( KMP_OS_WINDOWS && slash == NULL ) { // On Windows* OS, if slash not found, - char first = TOLOWER( fname->dir[ 0 ] ); // look for drive. - if ( 'a' <= first && first <= 'z' && fname->dir[ 1 ] == ':' ) { - slash = & fname->dir[ 1 ]; - }; // if - }; // if - base = ( slash == NULL ? fname->dir : slash + 1 ); - fname->base = __kmp_str_format( "%s", base ); // Copy basename - * base = 0; // and truncate dir. - }; // if - -} // kmp_str_fname_init - - -void -__kmp_str_fname_free( - kmp_str_fname_t * fname -) { - __kmp_str_free( (char const **)( & fname->path ) ); - __kmp_str_free( (char const **)( & fname->dir ) ); - __kmp_str_free( (char const **)( & fname->base ) ); -} // kmp_str_fname_free - - -int -__kmp_str_fname_match( - kmp_str_fname_t const * fname, - char const * pattern -) { - - int dir_match = 1; - int base_match = 1; - - if ( pattern != NULL ) { - kmp_str_fname_t ptrn; - __kmp_str_fname_init( & ptrn, pattern ); - dir_match = - strcmp( ptrn.dir, "*/" ) == 0 - || - ( fname->dir != NULL && __kmp_str_eqf( fname->dir, ptrn.dir ) ); - base_match = - strcmp( ptrn.base, "*" ) == 0 - || - ( fname->base != NULL && __kmp_str_eqf( fname->base, ptrn.base ) ); - __kmp_str_fname_free( & ptrn ); - }; // if - - return dir_match && base_match; - -} // __kmp_str_fname_match - - -kmp_str_loc_t -__kmp_str_loc_init( - char const * psource, - int init_fname -) { - - kmp_str_loc_t loc; - - loc._bulk = NULL; - loc.file = NULL; - loc.func = NULL; - loc.line = 0; - loc.col = 0; - - if ( psource != NULL ) { - - char * str = NULL; - char * dummy = NULL; - char * line = NULL; - char * col = NULL; - - // Copy psource to keep it intact. - loc._bulk = __kmp_str_format( "%s", psource ); - - // Parse psource string: ";file;func;line;col;;" - str = loc._bulk; - __kmp_str_split( str, ';', & dummy, & str ); - __kmp_str_split( str, ';', & loc.file, & str ); - __kmp_str_split( str, ';', & loc.func, & str ); - __kmp_str_split( str, ';', & line, & str ); - __kmp_str_split( str, ';', & col, & str ); - - // Convert line and col into numberic values. - if ( line != NULL ) { - loc.line = atoi( line ); - if ( loc.line < 0 ) { - loc.line = 0; - }; // if - }; // if - if ( col != NULL ) { - loc.col = atoi( col ); - if ( loc.col < 0 ) { - loc.col = 0; - }; // if - }; // if - - }; // if - - __kmp_str_fname_init( & loc.fname, init_fname ? loc.file : NULL ); - - return loc; - -} // kmp_str_loc_init - - -void -__kmp_str_loc_free( - kmp_str_loc_t * loc -) { - __kmp_str_fname_free( & loc->fname ); - __kmp_str_free((const char **) &(loc->_bulk)); - loc->file = NULL; - loc->func = NULL; -} // kmp_str_loc_free - - - -/* - This function is intended to compare file names. On Windows* OS file names are case-insensitive, - so functions performs case-insensitive comparison. On Linux* OS it performs case-sensitive - comparison. - Note: The function returns *true* if strings are *equal*. -*/ - -int -__kmp_str_eqf( // True, if strings are equal, false otherwise. - char const * lhs, // First string. - char const * rhs // Second string. -) { - int result; - #if KMP_OS_WINDOWS - result = ( _stricmp( lhs, rhs ) == 0 ); - #else - result = ( strcmp( lhs, rhs ) == 0 ); - #endif - return result; -} // __kmp_str_eqf - - -/* - This function is like sprintf, but it *allocates* new buffer, which must be freed eventually by - __kmp_str_free(). The function is very convenient for constructing strings, it successfully - replaces strdup(), strcat(), it frees programmer from buffer allocations and helps to avoid - buffer overflows. Examples: - - str = __kmp_str_format( "%s", orig ); // strdup(), do not care about buffer size. - __kmp_str_free( & str ); - str = __kmp_str_format( "%s%s", orig1, orig2 ); // strcat(), do not care about buffer size. - __kmp_str_free( & str ); - str = __kmp_str_format( "%s/%s.txt", path, file ); // constructing string. - __kmp_str_free( & str ); - - Performance note: - This function allocates memory with malloc() calls, so do not call it from - performance-critical code. In performance-critical code consider using kmp_str_buf_t - instead, since it uses stack-allocated buffer for short strings. - - Why does this function use malloc()? - 1. __kmp_allocate() returns cache-aligned memory allocated with malloc(). There are no - reasons in using __kmp_allocate() for strings due to extra overhead while cache-aligned - memory is not necessary. - 2. __kmp_thread_malloc() cannot be used because it requires pointer to thread structure. - We need to perform string operations during library startup (for example, in - __kmp_register_library_startup()) when no thread structures are allocated yet. - So standard malloc() is the only available option. -*/ - -char * -__kmp_str_format( // Allocated string. - char const * format, // Format string. - ... // Other parameters. -) { - - va_list args; - int size = 512; - char * buffer = NULL; - int rc; - - // Allocate buffer. - buffer = (char *) KMP_INTERNAL_MALLOC( size ); - if ( buffer == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - - for ( ; ; ) { - - // Try to format string. - va_start( args, format ); - rc = KMP_VSNPRINTF( buffer, size, format, args ); - va_end( args ); - - // No errors, string has been formatted. - if ( rc >= 0 && rc < size ) { - break; - }; // if - - // Error occurred, buffer is too small. - if ( rc >= 0 ) { - // C99-conforming implementation of vsnprintf returns required buffer size. - size = rc + 1; - } else { - // Older implementations just return -1. - size = size * 2; - }; // if - - // Enlarge buffer and try again. - buffer = (char *) KMP_INTERNAL_REALLOC( buffer, size ); - if ( buffer == NULL ) { - KMP_FATAL( MemoryAllocFailed ); - }; // if - - }; // forever - - return buffer; - -} // func __kmp_str_format - - -void -__kmp_str_free( - char const * * str -) { - KMP_DEBUG_ASSERT( str != NULL ); - KMP_INTERNAL_FREE( (void *) * str ); - * str = NULL; -} // func __kmp_str_free - - -/* If len is zero, returns true iff target and data have exact case-insensitive match. - If len is negative, returns true iff target is a case-insensitive substring of data. - If len is positive, returns true iff target is a case-insensitive substring of data or - vice versa, and neither is shorter than len. -*/ -int -__kmp_str_match( - char const * target, - int len, - char const * data -) { - int i; - if ( target == NULL || data == NULL ) { - return FALSE; - }; // if - for ( i = 0; target[i] && data[i]; ++ i ) { - if ( TOLOWER( target[i] ) != TOLOWER( data[i] ) ) { - return FALSE; - }; // if - }; // for i - return ( ( len > 0 ) ? i >= len : ( ! target[i] && ( len || ! data[i] ) ) ); -} // __kmp_str_match - - -int -__kmp_str_match_false( char const * data ) { - int result = - __kmp_str_match( "false", 1, data ) || - __kmp_str_match( "off", 2, data ) || - __kmp_str_match( "0", 1, data ) || - __kmp_str_match( ".false.", 2, data ) || - __kmp_str_match( ".f.", 2, data ) || - __kmp_str_match( "no", 1, data ); - return result; -} // __kmp_str_match_false - - -int -__kmp_str_match_true( char const * data ) { - int result = - __kmp_str_match( "true", 1, data ) || - __kmp_str_match( "on", 2, data ) || - __kmp_str_match( "1", 1, data ) || - __kmp_str_match( ".true.", 2, data ) || - __kmp_str_match( ".t.", 2, data ) || - __kmp_str_match( "yes", 1, data ); - return result; -} // __kmp_str_match_true - -void -__kmp_str_replace( - char * str, - char search_for, - char replace_with -) { - - char * found = NULL; - - found = strchr( str, search_for ); - while ( found ) { - * found = replace_with; - found = strchr( found + 1, search_for ); - }; // while - -} // __kmp_str_replace - - -void -__kmp_str_split( - char * str, // I: String to split. - char delim, // I: Character to split on. - char ** head, // O: Pointer to head (may be NULL). - char ** tail // O: Pointer to tail (may be NULL). -) { - char * h = str; - char * t = NULL; - if ( str != NULL ) { - char * ptr = strchr( str, delim ); - if ( ptr != NULL ) { - * ptr = 0; - t = ptr + 1; - }; // if - }; // if - if ( head != NULL ) { - * head = h; - }; // if - if ( tail != NULL ) { - * tail = t; - }; // if -} // __kmp_str_split - -/* - strtok_r() is not available on Windows* OS. This function reimplements strtok_r(). -*/ -char * -__kmp_str_token( - char * str, // String to split into tokens. Note: String *is* modified! - char const * delim, // Delimiters. - char ** buf // Internal buffer. -) { - char * token = NULL; - #if KMP_OS_WINDOWS - // On Windows* OS there is no strtok_r() function. Let us implement it. - if ( str != NULL ) { - * buf = str; // First call, initialize buf. - }; // if - * buf += strspn( * buf, delim ); // Skip leading delimiters. - if ( ** buf != 0 ) { // Rest of the string is not yet empty. - token = * buf; // Use it as result. - * buf += strcspn( * buf, delim ); // Skip non-delimiters. - if ( ** buf != 0 ) { // Rest of the string is not yet empty. - ** buf = 0; // Terminate token here. - * buf += 1; // Advance buf to start with the next token next time. - }; // if - }; // if - #else - // On Linux* OS and OS X*, strtok_r() is available. Let us use it. - token = strtok_r( str, delim, buf ); - #endif - return token; -}; // __kmp_str_token - - -int -__kmp_str_to_int( - char const * str, - char sentinel -) { - int result, factor; - char const * t; - - result = 0; - - for (t = str; *t != '\0'; ++t) { - if (*t < '0' || *t > '9') - break; - result = (result * 10) + (*t - '0'); - } - - switch (*t) { - case '\0': /* the current default for no suffix is bytes */ - factor = 1; - break; - case 'b': case 'B': /* bytes */ - ++t; - factor = 1; - break; - case 'k': case 'K': /* kilo-bytes */ - ++t; - factor = 1024; - break; - case 'm': case 'M': /* mega-bytes */ - ++t; - factor = (1024 * 1024); - break; - default: - if(*t != sentinel) - return (-1); - t = ""; - factor = 1; - } - - if (result > (INT_MAX / factor)) - result = INT_MAX; - else - result *= factor; - - return (*t != 0 ? 0 : result); - -} // __kmp_str_to_int - - -/* - The routine parses input string. It is expected it is a unsigned integer with optional unit. - Units are: "b" for bytes, "kb" or just "k" for kilobytes, "mb" or "m" for megabytes, ..., "yb" - or "y" for yottabytes. :-) Unit name is case-insensitive. The routine returns 0 if everything is - ok, or error code: -1 in case of overflow, -2 in case of unknown unit. *size is set to parsed - value. In case of overflow *size is set to KMP_SIZE_T_MAX, in case of unknown unit *size is set - to zero. -*/ -void -__kmp_str_to_size( // R: Error code. - char const * str, // I: String of characters, unsigned number and unit ("b", "kb", etc). - size_t * out, // O: Parsed number. - size_t dfactor, // I: The factor if none of the letters specified. - char const * * error // O: Null if everything is ok, error message otherwise. -) { - - size_t value = 0; - size_t factor = 0; - int overflow = 0; - int i = 0; - int digit; - - - KMP_DEBUG_ASSERT( str != NULL ); - - // Skip spaces. - while ( str[ i ] == ' ' || str[ i ] == '\t') { - ++ i; - }; // while - - // Parse number. - if ( str[ i ] < '0' || str[ i ] > '9' ) { - * error = KMP_I18N_STR( NotANumber ); - return; - }; // if - do { - digit = str[ i ] - '0'; - overflow = overflow || ( value > ( KMP_SIZE_T_MAX - digit ) / 10 ); - value = ( value * 10 ) + digit; - ++ i; - } while ( str[ i ] >= '0' && str[ i ] <= '9' ); - - // Skip spaces. - while ( str[ i ] == ' ' || str[ i ] == '\t' ) { - ++ i; - }; // while - - // Parse unit. - #define _case( ch, exp ) \ - case ch : \ - case ch - ( 'a' - 'A' ) : { \ - size_t shift = (exp) * 10; \ - ++ i; \ - if ( shift < sizeof( size_t ) * 8 ) { \ - factor = (size_t)( 1 ) << shift; \ - } else { \ - overflow = 1; \ - }; \ - } break; - switch ( str[ i ] ) { - _case( 'k', 1 ); // Kilo - _case( 'm', 2 ); // Mega - _case( 'g', 3 ); // Giga - _case( 't', 4 ); // Tera - _case( 'p', 5 ); // Peta - _case( 'e', 6 ); // Exa - _case( 'z', 7 ); // Zetta - _case( 'y', 8 ); // Yotta - // Oops. No more units... - }; // switch - #undef _case - if ( str[ i ] == 'b' || str[ i ] == 'B' ) { // Skip optional "b". - if ( factor == 0 ) { - factor = 1; - } - ++ i; - }; // if - if ( ! ( str[ i ] == ' ' || str[ i ] == '\t' || str[ i ] == 0 ) ) { // Bad unit - * error = KMP_I18N_STR( BadUnit ); - return; - }; // if - - if ( factor == 0 ) { - factor = dfactor; - } - - // Apply factor. - overflow = overflow || ( value > ( KMP_SIZE_T_MAX / factor ) ); - value *= factor; - - // Skip spaces. - while ( str[ i ] == ' ' || str[ i ] == '\t' ) { - ++ i; - }; // while - - if ( str[ i ] != 0 ) { - * error = KMP_I18N_STR( IllegalCharacters ); - return; - }; // if - - if ( overflow ) { - * error = KMP_I18N_STR( ValueTooLarge ); - * out = KMP_SIZE_T_MAX; - return; - }; // if - - * error = NULL; - * out = value; - -} // __kmp_str_to_size - - -void -__kmp_str_to_uint( // R: Error code. - char const * str, // I: String of characters, unsigned number. - kmp_uint64 * out, // O: Parsed number. - char const * * error // O: Null if everything is ok, error message otherwise. -) { - - size_t value = 0; - int overflow = 0; - int i = 0; - int digit; - - - KMP_DEBUG_ASSERT( str != NULL ); - - // Skip spaces. - while ( str[ i ] == ' ' || str[ i ] == '\t' ) { - ++ i; - }; // while - - // Parse number. - if ( str[ i ] < '0' || str[ i ] > '9' ) { - * error = KMP_I18N_STR( NotANumber ); - return; - }; // if - do { - digit = str[ i ] - '0'; - overflow = overflow || ( value > ( KMP_SIZE_T_MAX - digit ) / 10 ); - value = ( value * 10 ) + digit; - ++ i; - } while ( str[ i ] >= '0' && str[ i ] <= '9' ); - - // Skip spaces. - while ( str[ i ] == ' ' || str[ i ] == '\t' ) { - ++ i; - }; // while - - if ( str[ i ] != 0 ) { - * error = KMP_I18N_STR( IllegalCharacters ); - return; - }; // if - - if ( overflow ) { - * error = KMP_I18N_STR( ValueTooLarge ); - * out = (kmp_uint64) -1; - return; - }; // if - - * error = NULL; - * out = value; - -} // __kmp_str_to_unit - - - -// end of file // Index: runtime/src/kmp_str.cpp =================================================================== --- runtime/src/kmp_str.cpp +++ runtime/src/kmp_str.cpp @@ -1,5 +1,5 @@ /* - * kmp_str.c -- String manipulation routines. + * kmp_str.cpp -- String manipulation routines. */ Index: runtime/src/kmp_stub.c =================================================================== --- runtime/src/kmp_stub.c +++ runtime/src/kmp_stub.c @@ -1,270 +0,0 @@ -/* - * kmp_stub.c -- stub versions of user-callable OpenMP RT functions. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include -#include -#include - -#include "omp.h" // Function renamings. -#include "kmp.h" // KMP_DEFAULT_STKSIZE -#include "kmp_stub.h" - -#if KMP_OS_WINDOWS - #include -#else - #include -#endif - -// Moved from omp.h -#define omp_set_max_active_levels ompc_set_max_active_levels -#define omp_set_schedule ompc_set_schedule -#define omp_get_ancestor_thread_num ompc_get_ancestor_thread_num -#define omp_get_team_size ompc_get_team_size - -#define omp_set_num_threads ompc_set_num_threads -#define omp_set_dynamic ompc_set_dynamic -#define omp_set_nested ompc_set_nested -#define kmp_set_stacksize kmpc_set_stacksize -#define kmp_set_stacksize_s kmpc_set_stacksize_s -#define kmp_set_blocktime kmpc_set_blocktime -#define kmp_set_library kmpc_set_library -#define kmp_set_defaults kmpc_set_defaults -#define kmp_set_disp_num_buffers kmpc_set_disp_num_buffers -#define kmp_malloc kmpc_malloc -#define kmp_aligned_malloc kmpc_aligned_malloc -#define kmp_calloc kmpc_calloc -#define kmp_realloc kmpc_realloc -#define kmp_free kmpc_free - -static double frequency = 0.0; - -// Helper functions. -static size_t __kmps_init() { - static int initialized = 0; - static size_t dummy = 0; - if ( ! initialized ) { - - // TODO: Analyze KMP_VERSION environment variable, print __kmp_version_copyright and - // __kmp_version_build_time. - // WARNING: Do not use "fprintf( stderr, ... )" because it will cause unresolved "__iob" - // symbol (see C70080). We need to extract __kmp_printf() stuff from kmp_runtime.c and use - // it. - - // Trick with dummy variable forces linker to keep __kmp_version_copyright and - // __kmp_version_build_time strings in executable file (in case of static linkage). - // When KMP_VERSION analyze is implemented, dummy variable should be deleted, function - // should return void. - dummy = __kmp_version_copyright - __kmp_version_build_time; - - #if KMP_OS_WINDOWS - LARGE_INTEGER freq; - BOOL status = QueryPerformanceFrequency( & freq ); - if ( status ) { - frequency = double( freq.QuadPart ); - }; // if - #endif - - initialized = 1; - }; // if - return dummy; -}; // __kmps_init - -#define i __kmps_init(); - -/* set API functions */ -void omp_set_num_threads( omp_int_t num_threads ) { i; } -void omp_set_dynamic( omp_int_t dynamic ) { i; __kmps_set_dynamic( dynamic ); } -void omp_set_nested( omp_int_t nested ) { i; __kmps_set_nested( nested ); } -void omp_set_max_active_levels( omp_int_t max_active_levels ) { i; } -void omp_set_schedule( omp_sched_t kind, omp_int_t modifier ) { i; __kmps_set_schedule( (kmp_sched_t)kind, modifier ); } -int omp_get_ancestor_thread_num( omp_int_t level ) { i; return ( level ) ? ( -1 ) : ( 0 ); } -int omp_get_team_size( omp_int_t level ) { i; return ( level ) ? ( -1 ) : ( 1 ); } -int kmpc_set_affinity_mask_proc( int proc, void **mask ) { i; return -1; } -int kmpc_unset_affinity_mask_proc( int proc, void **mask ) { i; return -1; } -int kmpc_get_affinity_mask_proc( int proc, void **mask ) { i; return -1; } - -/* kmp API functions */ -void kmp_set_stacksize( omp_int_t arg ) { i; __kmps_set_stacksize( arg ); } -void kmp_set_stacksize_s( size_t arg ) { i; __kmps_set_stacksize( arg ); } -void kmp_set_blocktime( omp_int_t arg ) { i; __kmps_set_blocktime( arg ); } -void kmp_set_library( omp_int_t arg ) { i; __kmps_set_library( arg ); } -void kmp_set_defaults( char const * str ) { i; } -void kmp_set_disp_num_buffers( omp_int_t arg ) { i; } - -/* KMP memory management functions. */ -void * kmp_malloc( size_t size ) { i; return malloc( size ); } -void * kmp_aligned_malloc( size_t sz, size_t a ) { - i; -#if KMP_OS_WINDOWS - errno = ENOSYS; // not supported - return NULL; // no standard aligned allocator on Windows (pre - C11) -#else - void *res; - int err; - if( err = posix_memalign( &res, a, sz ) ) { - errno = err; // can be EINVAL or ENOMEM - return NULL; - } - return res; -#endif -} -void * kmp_calloc( size_t nelem, size_t elsize ) { i; return calloc( nelem, elsize ); } -void * kmp_realloc( void *ptr, size_t size ) { i; return realloc( ptr, size ); } -void kmp_free( void * ptr ) { i; free( ptr ); } - -static int __kmps_blocktime = INT_MAX; - -void __kmps_set_blocktime( int arg ) { - i; - __kmps_blocktime = arg; -} // __kmps_set_blocktime - -int __kmps_get_blocktime( void ) { - i; - return __kmps_blocktime; -} // __kmps_get_blocktime - -static int __kmps_dynamic = 0; - -void __kmps_set_dynamic( int arg ) { - i; - __kmps_dynamic = arg; -} // __kmps_set_dynamic - -int __kmps_get_dynamic( void ) { - i; - return __kmps_dynamic; -} // __kmps_get_dynamic - -static int __kmps_library = 1000; - -void __kmps_set_library( int arg ) { - i; - __kmps_library = arg; -} // __kmps_set_library - -int __kmps_get_library( void ) { - i; - return __kmps_library; -} // __kmps_get_library - -static int __kmps_nested = 0; - -void __kmps_set_nested( int arg ) { - i; - __kmps_nested = arg; -} // __kmps_set_nested - -int __kmps_get_nested( void ) { - i; - return __kmps_nested; -} // __kmps_get_nested - -static size_t __kmps_stacksize = KMP_DEFAULT_STKSIZE; - -void __kmps_set_stacksize( int arg ) { - i; - __kmps_stacksize = arg; -} // __kmps_set_stacksize - -int __kmps_get_stacksize( void ) { - i; - return __kmps_stacksize; -} // __kmps_get_stacksize - -static kmp_sched_t __kmps_sched_kind = kmp_sched_default; -static int __kmps_sched_modifier = 0; - - void __kmps_set_schedule( kmp_sched_t kind, int modifier ) { - i; - __kmps_sched_kind = kind; - __kmps_sched_modifier = modifier; - } // __kmps_set_schedule - - void __kmps_get_schedule( kmp_sched_t *kind, int *modifier ) { - i; - *kind = __kmps_sched_kind; - *modifier = __kmps_sched_modifier; - } // __kmps_get_schedule - -#if OMP_40_ENABLED - -static kmp_proc_bind_t __kmps_proc_bind = proc_bind_false; - -void __kmps_set_proc_bind( kmp_proc_bind_t arg ) { - i; - __kmps_proc_bind = arg; -} // __kmps_set_proc_bind - -kmp_proc_bind_t __kmps_get_proc_bind( void ) { - i; - return __kmps_proc_bind; -} // __kmps_get_proc_bind - -#endif /* OMP_40_ENABLED */ - -double __kmps_get_wtime( void ) { - // Elapsed wall clock time (in second) from "sometime in the past". - double wtime = 0.0; - i; - #if KMP_OS_WINDOWS - if ( frequency > 0.0 ) { - LARGE_INTEGER now; - BOOL status = QueryPerformanceCounter( & now ); - if ( status ) { - wtime = double( now.QuadPart ) / frequency; - }; // if - }; // if - #else - // gettimeofday() returns seconds and microseconds since the Epoch. - struct timeval tval; - int rc; - rc = gettimeofday( & tval, NULL ); - if ( rc == 0 ) { - wtime = (double)( tval.tv_sec ) + 1.0E-06 * (double)( tval.tv_usec ); - } else { - // TODO: Assert or abort here. - }; // if - #endif - return wtime; -}; // __kmps_get_wtime - -double __kmps_get_wtick( void ) { - // Number of seconds between successive clock ticks. - double wtick = 0.0; - i; - #if KMP_OS_WINDOWS - { - DWORD increment; - DWORD adjustment; - BOOL disabled; - BOOL rc; - rc = GetSystemTimeAdjustment( & adjustment, & increment, & disabled ); - if ( rc ) { - wtick = 1.0E-07 * (double)( disabled ? increment : adjustment ); - } else { - // TODO: Assert or abort here. - wtick = 1.0E-03; - }; // if - } - #else - // TODO: gettimeofday() returns in microseconds, but what the precision? - wtick = 1.0E-06; - #endif - return wtick; -}; // __kmps_get_wtick - -// end of file // - Index: runtime/src/kmp_stub.cpp =================================================================== --- runtime/src/kmp_stub.cpp +++ runtime/src/kmp_stub.cpp @@ -1,5 +1,5 @@ /* - * kmp_stub.c -- stub versions of user-callable OpenMP RT functions. + * kmp_stub.cpp -- stub versions of user-callable OpenMP RT functions. */ @@ -56,16 +56,16 @@ static size_t dummy = 0; if ( ! initialized ) { - // TODO: Analyze KMP_VERSION environment variable, print __kmp_version_copyright and - // __kmp_version_build_time. - // WARNING: Do not use "fprintf( stderr, ... )" because it will cause unresolved "__iob" - // symbol (see C70080). We need to extract __kmp_printf() stuff from kmp_runtime.c and use - // it. - - // Trick with dummy variable forces linker to keep __kmp_version_copyright and - // __kmp_version_build_time strings in executable file (in case of static linkage). - // When KMP_VERSION analyze is implemented, dummy variable should be deleted, function - // should return void. + // TODO: Analyze KMP_VERSION environment variable, print + // __kmp_version_copyright and __kmp_version_build_time. + // WARNING: Do not use "fprintf( stderr, ... )" because it will cause + // unresolved "__iob" symbol (see C70080). We need to extract + // __kmp_printf() stuff from kmp_runtime.cpp and use it. + + // Trick with dummy variable forces linker to keep __kmp_version_copyright + // and __kmp_version_build_time strings in executable file (in case of + // static linkage). When KMP_VERSION analysis is implemented, dummy + // variable should be deleted, function should return void. dummy = __kmp_version_copyright - __kmp_version_build_time; #if KMP_OS_WINDOWS Index: runtime/src/kmp_tasking.c =================================================================== --- runtime/src/kmp_tasking.c +++ runtime/src/kmp_tasking.c @@ -1,3162 +0,0 @@ -/* - * kmp_tasking.c -- OpenMP 3.0 tasking support. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_i18n.h" -#include "kmp_itt.h" -#include "kmp_wait_release.h" -#include "kmp_stats.h" - -#if OMPT_SUPPORT -#include "ompt-specific.h" -#endif - -#include "tsan_annotations.h" - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - - -/* forward declaration */ -static void __kmp_enable_tasking( kmp_task_team_t *task_team, kmp_info_t *this_thr ); -static void __kmp_alloc_task_deque( kmp_info_t *thread, kmp_thread_data_t *thread_data ); -static int __kmp_realloc_task_threads_data( kmp_info_t *thread, kmp_task_team_t *task_team ); - -#ifdef OMP_45_ENABLED -static void __kmp_bottom_half_finish_proxy( kmp_int32 gtid, kmp_task_t * ptask ); -#endif - -#ifdef BUILD_TIED_TASK_STACK - -//--------------------------------------------------------------------------- -// __kmp_trace_task_stack: print the tied tasks from the task stack in order -// from top do bottom -// -// gtid: global thread identifier for thread containing stack -// thread_data: thread data for task team thread containing stack -// threshold: value above which the trace statement triggers -// location: string identifying call site of this function (for trace) - -static void -__kmp_trace_task_stack( kmp_int32 gtid, kmp_thread_data_t *thread_data, int threshold, char *location ) -{ - kmp_task_stack_t *task_stack = & thread_data->td.td_susp_tied_tasks; - kmp_taskdata_t **stack_top = task_stack -> ts_top; - kmp_int32 entries = task_stack -> ts_entries; - kmp_taskdata_t *tied_task; - - KA_TRACE(threshold, ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, " - "first_block = %p, stack_top = %p \n", - location, gtid, entries, task_stack->ts_first_block, stack_top ) ); - - KMP_DEBUG_ASSERT( stack_top != NULL ); - KMP_DEBUG_ASSERT( entries > 0 ); - - while ( entries != 0 ) - { - KMP_DEBUG_ASSERT( stack_top != & task_stack->ts_first_block.sb_block[0] ); - // fix up ts_top if we need to pop from previous block - if ( entries & TASK_STACK_INDEX_MASK == 0 ) - { - kmp_stack_block_t *stack_block = (kmp_stack_block_t *) (stack_top) ; - - stack_block = stack_block -> sb_prev; - stack_top = & stack_block -> sb_block[TASK_STACK_BLOCK_SIZE]; - } - - // finish bookkeeping - stack_top--; - entries--; - - tied_task = * stack_top; - - KMP_DEBUG_ASSERT( tied_task != NULL ); - KMP_DEBUG_ASSERT( tied_task -> td_flags.tasktype == TASK_TIED ); - - KA_TRACE(threshold, ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, " - "stack_top=%p, tied_task=%p\n", - location, gtid, entries, stack_top, tied_task ) ); - } - KMP_DEBUG_ASSERT( stack_top == & task_stack->ts_first_block.sb_block[0] ); - - KA_TRACE(threshold, ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n", - location, gtid ) ); -} - -//--------------------------------------------------------------------------- -// __kmp_init_task_stack: initialize the task stack for the first time -// after a thread_data structure is created. -// It should not be necessary to do this again (assuming the stack works). -// -// gtid: global thread identifier of calling thread -// thread_data: thread data for task team thread containing stack - -static void -__kmp_init_task_stack( kmp_int32 gtid, kmp_thread_data_t *thread_data ) -{ - kmp_task_stack_t *task_stack = & thread_data->td.td_susp_tied_tasks; - kmp_stack_block_t *first_block; - - // set up the first block of the stack - first_block = & task_stack -> ts_first_block; - task_stack -> ts_top = (kmp_taskdata_t **) first_block; - memset( (void *) first_block, '\0', TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *)); - - // initialize the stack to be empty - task_stack -> ts_entries = TASK_STACK_EMPTY; - first_block -> sb_next = NULL; - first_block -> sb_prev = NULL; -} - - -//--------------------------------------------------------------------------- -// __kmp_free_task_stack: free the task stack when thread_data is destroyed. -// -// gtid: global thread identifier for calling thread -// thread_data: thread info for thread containing stack - -static void -__kmp_free_task_stack( kmp_int32 gtid, kmp_thread_data_t *thread_data ) -{ - kmp_task_stack_t *task_stack = & thread_data->td.td_susp_tied_tasks; - kmp_stack_block_t *stack_block = & task_stack -> ts_first_block; - - KMP_DEBUG_ASSERT( task_stack -> ts_entries == TASK_STACK_EMPTY ); - // free from the second block of the stack - while ( stack_block != NULL ) { - kmp_stack_block_t *next_block = (stack_block) ? stack_block -> sb_next : NULL; - - stack_block -> sb_next = NULL; - stack_block -> sb_prev = NULL; - if (stack_block != & task_stack -> ts_first_block) { - __kmp_thread_free( thread, stack_block ); // free the block, if not the first - } - stack_block = next_block; - } - // initialize the stack to be empty - task_stack -> ts_entries = 0; - task_stack -> ts_top = NULL; -} - - -//--------------------------------------------------------------------------- -// __kmp_push_task_stack: Push the tied task onto the task stack. -// Grow the stack if necessary by allocating another block. -// -// gtid: global thread identifier for calling thread -// thread: thread info for thread containing stack -// tied_task: the task to push on the stack - -static void -__kmp_push_task_stack( kmp_int32 gtid, kmp_info_t *thread, kmp_taskdata_t * tied_task ) -{ - // GEH - need to consider what to do if tt_threads_data not allocated yet - kmp_thread_data_t *thread_data = & thread -> th.th_task_team -> - tt.tt_threads_data[ __kmp_tid_from_gtid( gtid ) ]; - kmp_task_stack_t *task_stack = & thread_data->td.td_susp_tied_tasks ; - - if ( tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser ) { - return; // Don't push anything on stack if team or team tasks are serialized - } - - KMP_DEBUG_ASSERT( tied_task -> td_flags.tasktype == TASK_TIED ); - KMP_DEBUG_ASSERT( task_stack -> ts_top != NULL ); - - KA_TRACE(20, ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n", - gtid, thread, tied_task ) ); - // Store entry - * (task_stack -> ts_top) = tied_task; - - // Do bookkeeping for next push - task_stack -> ts_top++; - task_stack -> ts_entries++; - - if ( task_stack -> ts_entries & TASK_STACK_INDEX_MASK == 0 ) - { - // Find beginning of this task block - kmp_stack_block_t *stack_block = - (kmp_stack_block_t *) (task_stack -> ts_top - TASK_STACK_BLOCK_SIZE); - - // Check if we already have a block - if ( stack_block -> sb_next != NULL ) - { // reset ts_top to beginning of next block - task_stack -> ts_top = & stack_block -> sb_next -> sb_block[0]; - } - else - { // Alloc new block and link it up - kmp_stack_block_t *new_block = (kmp_stack_block_t *) - __kmp_thread_calloc(thread, sizeof(kmp_stack_block_t)); - - task_stack -> ts_top = & new_block -> sb_block[0]; - stack_block -> sb_next = new_block; - new_block -> sb_prev = stack_block; - new_block -> sb_next = NULL; - - KA_TRACE(30, ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n", - gtid, tied_task, new_block ) ); - } - } - KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid, tied_task ) ); -} - -//--------------------------------------------------------------------------- -// __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return -// the task, just check to make sure it matches the ending task passed in. -// -// gtid: global thread identifier for the calling thread -// thread: thread info structure containing stack -// tied_task: the task popped off the stack -// ending_task: the task that is ending (should match popped task) - -static void -__kmp_pop_task_stack( kmp_int32 gtid, kmp_info_t *thread, kmp_taskdata_t *ending_task ) -{ - // GEH - need to consider what to do if tt_threads_data not allocated yet - kmp_thread_data_t *thread_data = & thread -> th.th_task_team -> tt_threads_data[ __kmp_tid_from_gtid( gtid ) ]; - kmp_task_stack_t *task_stack = & thread_data->td.td_susp_tied_tasks ; - kmp_taskdata_t *tied_task; - - if ( ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser ) { - return; // Don't pop anything from stack if team or team tasks are serialized - } - - KMP_DEBUG_ASSERT( task_stack -> ts_top != NULL ); - KMP_DEBUG_ASSERT( task_stack -> ts_entries > 0 ); - - KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid, thread ) ); - - // fix up ts_top if we need to pop from previous block - if ( task_stack -> ts_entries & TASK_STACK_INDEX_MASK == 0 ) - { - kmp_stack_block_t *stack_block = - (kmp_stack_block_t *) (task_stack -> ts_top) ; - - stack_block = stack_block -> sb_prev; - task_stack -> ts_top = & stack_block -> sb_block[TASK_STACK_BLOCK_SIZE]; - } - - // finish bookkeeping - task_stack -> ts_top--; - task_stack -> ts_entries--; - - tied_task = * (task_stack -> ts_top ); - - KMP_DEBUG_ASSERT( tied_task != NULL ); - KMP_DEBUG_ASSERT( tied_task -> td_flags.tasktype == TASK_TIED ); - KMP_DEBUG_ASSERT( tied_task == ending_task ); // If we built the stack correctly - - KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid, tied_task ) ); - return; -} -#endif /* BUILD_TIED_TASK_STACK */ - -//--------------------------------------------------- -// __kmp_push_task: Add a task to the thread's deque - -static kmp_int32 -__kmp_push_task(kmp_int32 gtid, kmp_task_t * task ) -{ - kmp_info_t * thread = __kmp_threads[ gtid ]; - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_task_team_t * task_team = thread->th.th_task_team; - kmp_int32 tid = __kmp_tid_from_gtid( gtid ); - kmp_thread_data_t * thread_data; - - KA_TRACE(20, ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata ) ); - - if ( taskdata->td_flags.tiedness == TASK_UNTIED ) { - // untied task needs to increment counter so that the task structure is not freed prematurely - kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count); - KA_TRACE(20, ( "__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n", - gtid, counter, taskdata ) ); - } - - // The first check avoids building task_team thread data if serialized - if ( taskdata->td_flags.task_serial ) { - KA_TRACE(20, ( "__kmp_push_task: T#%d team serialized; returning TASK_NOT_PUSHED for task %p\n", - gtid, taskdata ) ); - return TASK_NOT_PUSHED; - } - - // Now that serialized tasks have returned, we can assume that we are not in immediate exec mode - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - if ( ! KMP_TASKING_ENABLED(task_team) ) { - __kmp_enable_tasking( task_team, thread ); - } - KMP_DEBUG_ASSERT( TCR_4(task_team -> tt.tt_found_tasks) == TRUE ); - KMP_DEBUG_ASSERT( TCR_PTR(task_team -> tt.tt_threads_data) != NULL ); - - // Find tasking deque specific to encountering thread - thread_data = & task_team -> tt.tt_threads_data[ tid ]; - - // No lock needed since only owner can allocate - if (thread_data -> td.td_deque == NULL ) { - __kmp_alloc_task_deque( thread, thread_data ); - } - - // Check if deque is full - if ( TCR_4(thread_data -> td.td_deque_ntasks) >= TASK_DEQUE_SIZE(thread_data->td) ) - { - KA_TRACE(20, ( "__kmp_push_task: T#%d deque is full; returning TASK_NOT_PUSHED for task %p\n", - gtid, taskdata ) ); - return TASK_NOT_PUSHED; - } - - // Lock the deque for the task push operation - __kmp_acquire_bootstrap_lock( & thread_data -> td.td_deque_lock ); - -#if OMP_45_ENABLED - // Need to recheck as we can get a proxy task from a thread outside of OpenMP - if ( TCR_4(thread_data -> td.td_deque_ntasks) >= TASK_DEQUE_SIZE(thread_data->td) ) - { - __kmp_release_bootstrap_lock( & thread_data -> td.td_deque_lock ); - KA_TRACE(20, ( "__kmp_push_task: T#%d deque is full on 2nd check; returning TASK_NOT_PUSHED for task %p\n", - gtid, taskdata ) ); - return TASK_NOT_PUSHED; - } -#else - // Must have room since no thread can add tasks but calling thread - KMP_DEBUG_ASSERT( TCR_4(thread_data -> td.td_deque_ntasks) < TASK_DEQUE_SIZE(thread_data->td) ); -#endif - - thread_data -> td.td_deque[ thread_data -> td.td_deque_tail ] = taskdata; // Push taskdata - // Wrap index. - thread_data -> td.td_deque_tail = ( thread_data -> td.td_deque_tail + 1 ) & TASK_DEQUE_MASK(thread_data->td); - TCW_4(thread_data -> td.td_deque_ntasks, TCR_4(thread_data -> td.td_deque_ntasks) + 1); // Adjust task count - - KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: " - "task=%p ntasks=%d head=%u tail=%u\n", - gtid, taskdata, thread_data->td.td_deque_ntasks, - thread_data->td.td_deque_head, thread_data->td.td_deque_tail) ); - - __kmp_release_bootstrap_lock( & thread_data->td.td_deque_lock ); - - return TASK_SUCCESSFULLY_PUSHED; -} - - -//----------------------------------------------------------------------------------------- -// __kmp_pop_current_task_from_thread: set up current task from called thread when team ends -// this_thr: thread structure to set current_task in. - -void -__kmp_pop_current_task_from_thread( kmp_info_t *this_thr ) -{ - KF_TRACE( 10, ("__kmp_pop_current_task_from_thread(enter): T#%d this_thread=%p, curtask=%p, " - "curtask_parent=%p\n", - 0, this_thr, this_thr -> th.th_current_task, - this_thr -> th.th_current_task -> td_parent ) ); - - this_thr -> th.th_current_task = this_thr -> th.th_current_task -> td_parent; - - KF_TRACE( 10, ("__kmp_pop_current_task_from_thread(exit): T#%d this_thread=%p, curtask=%p, " - "curtask_parent=%p\n", - 0, this_thr, this_thr -> th.th_current_task, - this_thr -> th.th_current_task -> td_parent ) ); -} - - -//--------------------------------------------------------------------------------------- -// __kmp_push_current_task_to_thread: set up current task in called thread for a new team -// this_thr: thread structure to set up -// team: team for implicit task data -// tid: thread within team to set up - -void -__kmp_push_current_task_to_thread( kmp_info_t *this_thr, kmp_team_t *team, int tid ) -{ - // current task of the thread is a parent of the new just created implicit tasks of new team - KF_TRACE( 10, ( "__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p curtask=%p " - "parent_task=%p\n", - tid, this_thr, this_thr->th.th_current_task, - team->t.t_implicit_task_taskdata[tid].td_parent ) ); - - KMP_DEBUG_ASSERT (this_thr != NULL); - - if( tid == 0 ) { - if( this_thr->th.th_current_task != & team -> t.t_implicit_task_taskdata[ 0 ] ) { - team -> t.t_implicit_task_taskdata[ 0 ].td_parent = this_thr->th.th_current_task; - this_thr->th.th_current_task = & team -> t.t_implicit_task_taskdata[ 0 ]; - } - } else { - team -> t.t_implicit_task_taskdata[ tid ].td_parent = team -> t.t_implicit_task_taskdata[ 0 ].td_parent; - this_thr->th.th_current_task = & team -> t.t_implicit_task_taskdata[ tid ]; - } - - KF_TRACE( 10, ( "__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p curtask=%p " - "parent_task=%p\n", - tid, this_thr, this_thr->th.th_current_task, - team->t.t_implicit_task_taskdata[tid].td_parent ) ); -} - - -//---------------------------------------------------------------------- -// __kmp_task_start: bookkeeping for a task starting execution -// GTID: global thread id of calling thread -// task: task starting execution -// current_task: task suspending - -static void -__kmp_task_start( kmp_int32 gtid, kmp_task_t * task, kmp_taskdata_t * current_task ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_info_t * thread = __kmp_threads[ gtid ]; - - KA_TRACE(10, ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n", - gtid, taskdata, current_task) ); - - KMP_DEBUG_ASSERT( taskdata -> td_flags.tasktype == TASK_EXPLICIT ); - - // mark currently executing task as suspended - // TODO: GEH - make sure root team implicit task is initialized properly. - // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 ); - current_task -> td_flags.executing = 0; - - // Add task to stack if tied -#ifdef BUILD_TIED_TASK_STACK - if ( taskdata -> td_flags.tiedness == TASK_TIED ) - { - __kmp_push_task_stack( gtid, thread, taskdata ); - } -#endif /* BUILD_TIED_TASK_STACK */ - - // mark starting task as executing and as current task - thread -> th.th_current_task = taskdata; - - KMP_DEBUG_ASSERT( taskdata->td_flags.started == 0 || taskdata->td_flags.tiedness == TASK_UNTIED ); - KMP_DEBUG_ASSERT( taskdata->td_flags.executing == 0 || taskdata->td_flags.tiedness == TASK_UNTIED ); - taskdata -> td_flags.started = 1; - taskdata -> td_flags.executing = 1; - KMP_DEBUG_ASSERT( taskdata -> td_flags.complete == 0 ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.freed == 0 ); - - // GEH TODO: shouldn't we pass some sort of location identifier here? - // APT: yes, we will pass location here. - // need to store current thread state (in a thread or taskdata structure) - // before setting work_state, otherwise wrong state is set after end of task - - KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", - gtid, taskdata ) ); - -#if OMPT_SUPPORT - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_task_begin)) { - kmp_taskdata_t *parent = taskdata->td_parent; - ompt_callbacks.ompt_callback(ompt_event_task_begin)( - parent ? parent->ompt_task_info.task_id : ompt_task_id_none, - parent ? &(parent->ompt_task_info.frame) : NULL, - taskdata->ompt_task_info.task_id, - taskdata->ompt_task_info.function); - } -#endif -#if OMP_40_ENABLED && OMPT_SUPPORT && OMPT_TRACE - /* OMPT emit all dependences if requested by the tool */ - if (ompt_enabled && taskdata->ompt_task_info.ndeps > 0 && - ompt_callbacks.ompt_callback(ompt_event_task_dependences)) - { - ompt_callbacks.ompt_callback(ompt_event_task_dependences)( - taskdata->ompt_task_info.task_id, - taskdata->ompt_task_info.deps, - taskdata->ompt_task_info.ndeps - ); - /* We can now free the allocated memory for the dependencies */ - KMP_OMPT_DEPS_FREE (thread, taskdata->ompt_task_info.deps); - taskdata->ompt_task_info.deps = NULL; - taskdata->ompt_task_info.ndeps = 0; - } -#endif /* OMP_40_ENABLED && OMPT_SUPPORT && OMPT_TRACE */ - - return; -} - - -//---------------------------------------------------------------------- -// __kmpc_omp_task_begin_if0: report that a given serialized task has started execution -// loc_ref: source location information; points to beginning of task block. -// gtid: global thread number. -// task: task thunk for the started task. - -void -__kmpc_omp_task_begin_if0( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * task ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_taskdata_t * current_task = __kmp_threads[ gtid ] -> th.th_current_task; - - KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p current_task=%p\n", - gtid, loc_ref, taskdata, current_task ) ); - - if ( taskdata->td_flags.tiedness == TASK_UNTIED ) { - // untied task needs to increment counter so that the task structure is not freed prematurely - kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count); - KA_TRACE(20, ( "__kmpc_omp_task_begin_if0: T#%d untied_count (%d) incremented for task %p\n", - gtid, counter, taskdata ) ); - } - - taskdata -> td_flags.task_serial = 1; // Execute this task immediately, not deferred. - __kmp_task_start( gtid, task, current_task ); - - KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", - gtid, loc_ref, taskdata ) ); - - return; -} - -#ifdef TASK_UNUSED -//---------------------------------------------------------------------- -// __kmpc_omp_task_begin: report that a given task has started execution -// NEVER GENERATED BY COMPILER, DEPRECATED!!! - -void -__kmpc_omp_task_begin( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * task ) -{ - kmp_taskdata_t * current_task = __kmp_threads[ gtid ] -> th.th_current_task; - - KA_TRACE(10, ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task ) ); - - __kmp_task_start( gtid, task, current_task ); - - KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task) ) ); - - return; -} -#endif // TASK_UNUSED - - -//------------------------------------------------------------------------------------- -// __kmp_free_task: free the current task space and the space for shareds -// gtid: Global thread ID of calling thread -// taskdata: task to free -// thread: thread data structure of caller - -static void -__kmp_free_task( kmp_int32 gtid, kmp_taskdata_t * taskdata, kmp_info_t * thread ) -{ - KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", - gtid, taskdata) ); - - // Check to make sure all flags and counters have the correct values - KMP_DEBUG_ASSERT( taskdata->td_flags.tasktype == TASK_EXPLICIT ); - KMP_DEBUG_ASSERT( taskdata->td_flags.executing == 0 ); - KMP_DEBUG_ASSERT( taskdata->td_flags.complete == 1 ); - KMP_DEBUG_ASSERT( taskdata->td_flags.freed == 0 ); - KMP_DEBUG_ASSERT( TCR_4(taskdata->td_allocated_child_tasks) == 0 || taskdata->td_flags.task_serial == 1); - KMP_DEBUG_ASSERT( TCR_4(taskdata->td_incomplete_child_tasks) == 0 ); - - taskdata->td_flags.freed = 1; - ANNOTATE_HAPPENS_BEFORE(taskdata); - // deallocate the taskdata and shared variable blocks associated with this task - #if USE_FAST_MEMORY - __kmp_fast_free( thread, taskdata ); - #else /* ! USE_FAST_MEMORY */ - __kmp_thread_free( thread, taskdata ); - #endif - - KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", - gtid, taskdata) ); -} - -//------------------------------------------------------------------------------------- -// __kmp_free_task_and_ancestors: free the current task and ancestors without children -// -// gtid: Global thread ID of calling thread -// taskdata: task to free -// thread: thread data structure of caller - -static void -__kmp_free_task_and_ancestors( kmp_int32 gtid, kmp_taskdata_t * taskdata, kmp_info_t * thread ) -{ -#if OMP_45_ENABLED - // Proxy tasks must always be allowed to free their parents - // because they can be run in background even in serial mode. - kmp_int32 team_serial = ( taskdata->td_flags.team_serial || - taskdata->td_flags.tasking_ser ) && !taskdata->td_flags.proxy; -#else - kmp_int32 team_serial = taskdata->td_flags.team_serial || - taskdata->td_flags.tasking_ser; -#endif - KMP_DEBUG_ASSERT( taskdata -> td_flags.tasktype == TASK_EXPLICIT ); - - kmp_int32 children = KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata -> td_allocated_child_tasks) ) - 1; - KMP_DEBUG_ASSERT( children >= 0 ); - - // Now, go up the ancestor tree to see if any ancestors can now be freed. - while ( children == 0 ) - { - kmp_taskdata_t * parent_taskdata = taskdata -> td_parent; - - KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete " - "and freeing itself\n", gtid, taskdata) ); - - // --- Deallocate my ancestor task --- - __kmp_free_task( gtid, taskdata, thread ); - - taskdata = parent_taskdata; - - // Stop checking ancestors at implicit task - // instead of walking up ancestor tree to avoid premature deallocation of ancestors. - if ( team_serial || taskdata -> td_flags.tasktype == TASK_IMPLICIT ) - return; - - // Predecrement simulated by "- 1" calculation - children = KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata -> td_allocated_child_tasks) ) - 1; - KMP_DEBUG_ASSERT( children >= 0 ); - } - - KA_TRACE(20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; " - "not freeing it yet\n", gtid, taskdata, children) ); -} - -//--------------------------------------------------------------------- -// __kmp_task_finish: bookkeeping to do when a task finishes execution -// gtid: global thread ID for calling thread -// task: task to be finished -// resumed_task: task to be resumed. (may be NULL if task is serialized) - -static void -__kmp_task_finish( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t *resumed_task ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_info_t * thread = __kmp_threads[ gtid ]; - kmp_task_team_t * task_team = thread->th.th_task_team; // might be NULL for serial teams... - kmp_int32 children = 0; - -#if OMPT_SUPPORT - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_task_end)) { - kmp_taskdata_t *parent = taskdata->td_parent; - ompt_callbacks.ompt_callback(ompt_event_task_end)( - taskdata->ompt_task_info.task_id); - } -#endif - - KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming task %p\n", - gtid, taskdata, resumed_task) ); - - KMP_DEBUG_ASSERT( taskdata -> td_flags.tasktype == TASK_EXPLICIT ); - - // Pop task from stack if tied -#ifdef BUILD_TIED_TASK_STACK - if ( taskdata -> td_flags.tiedness == TASK_TIED ) - { - __kmp_pop_task_stack( gtid, thread, taskdata ); - } -#endif /* BUILD_TIED_TASK_STACK */ - - if ( taskdata->td_flags.tiedness == TASK_UNTIED ) { - // untied task needs to check the counter so that the task structure is not freed prematurely - kmp_int32 counter = KMP_TEST_THEN_DEC32(&taskdata->td_untied_count) - 1; - KA_TRACE(20, ( "__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n", - gtid, counter, taskdata ) ); - if ( counter > 0 ) { - // untied task is not done, to be continued possibly by other thread, do not free it now - if (resumed_task == NULL) { - KMP_DEBUG_ASSERT( taskdata->td_flags.task_serial ); - resumed_task = taskdata->td_parent; // In a serialized task, the resumed task is the parent - } - thread->th.th_current_task = resumed_task; // restore current_task - resumed_task->td_flags.executing = 1; // resume previous task - KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, resuming task %p\n", - gtid, taskdata, resumed_task) ); - return; - } - } - - KMP_DEBUG_ASSERT( taskdata -> td_flags.complete == 0 ); - taskdata -> td_flags.complete = 1; // mark the task as completed - KMP_DEBUG_ASSERT( taskdata -> td_flags.started == 1 ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.freed == 0 ); - - // Only need to keep track of count if team parallel and tasking not serialized - if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) ) { - // Predecrement simulated by "- 1" calculation - children = KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata -> td_parent -> td_incomplete_child_tasks) ) - 1; - KMP_DEBUG_ASSERT( children >= 0 ); -#if OMP_40_ENABLED - if ( taskdata->td_taskgroup ) - KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata->td_taskgroup->count) ); -#if OMP_45_ENABLED - } - // if we found proxy tasks there could exist a dependency chain - // with the proxy task as origin - if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) || (task_team && task_team->tt.tt_found_proxy_tasks) ) { -#endif - __kmp_release_deps(gtid,taskdata); -#endif - } - - // td_flags.executing must be marked as 0 after __kmp_release_deps has been called - // Othertwise, if a task is executed immediately from the release_deps code - // the flag will be reset to 1 again by this same function - KMP_DEBUG_ASSERT( taskdata -> td_flags.executing == 1 ); - taskdata -> td_flags.executing = 0; // suspend the finishing task - - KA_TRACE(20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n", - gtid, taskdata, children) ); - -#if OMP_40_ENABLED - /* If the tasks' destructor thunk flag has been set, we need to invoke the - destructor thunk that has been generated by the compiler. - The code is placed here, since at this point other tasks might have been released - hence overlapping the destructor invokations with some other work in the - released tasks. The OpenMP spec is not specific on when the destructors are - invoked, so we should be free to choose. - */ - if (taskdata->td_flags.destructors_thunk) { - kmp_routine_entry_t destr_thunk = task->data1.destructors; - KMP_ASSERT(destr_thunk); - destr_thunk(gtid, task); - } -#endif // OMP_40_ENABLED - - // bookkeeping for resuming task: - // GEH - note tasking_ser => task_serial - KMP_DEBUG_ASSERT( (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) == - taskdata->td_flags.task_serial); - if ( taskdata->td_flags.task_serial ) - { - if (resumed_task == NULL) { - resumed_task = taskdata->td_parent; // In a serialized task, the resumed task is the parent - } - else -#if OMP_45_ENABLED - if ( !(task_team && task_team->tt.tt_found_proxy_tasks) ) -#endif - { - // verify resumed task passed in points to parent - KMP_DEBUG_ASSERT( resumed_task == taskdata->td_parent ); - } - } - else { - KMP_DEBUG_ASSERT( resumed_task != NULL ); // verify that resumed task is passed as arguemnt - } - - // Free this task and then ancestor tasks if they have no children. - // Restore th_current_task first as suggested by John: - // johnmc: if an asynchronous inquiry peers into the runtime system - // it doesn't see the freed task as the current task. - thread->th.th_current_task = resumed_task; - __kmp_free_task_and_ancestors(gtid, taskdata, thread); - - // TODO: GEH - make sure root team implicit task is initialized properly. - // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 ); - resumed_task->td_flags.executing = 1; // resume previous task - - KA_TRACE(10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n", - gtid, taskdata, resumed_task) ); - - return; -} - -//--------------------------------------------------------------------- -// __kmpc_omp_task_complete_if0: report that a task has completed execution -// loc_ref: source location information; points to end of task block. -// gtid: global thread number. -// task: task thunk for the completed task. - -void -__kmpc_omp_task_complete_if0( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task ) -{ - KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task) ) ); - - __kmp_task_finish( gtid, task, NULL ); // this routine will provide task to resume - - KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task) ) ); - - return; -} - -#ifdef TASK_UNUSED -//--------------------------------------------------------------------- -// __kmpc_omp_task_complete: report that a task has completed execution -// NEVER GENERATED BY COMPILER, DEPRECATED!!! - -void -__kmpc_omp_task_complete( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task ) -{ - KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task) ) ); - - __kmp_task_finish( gtid, task, NULL ); // Not sure how to find task to resume - - KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", - gtid, loc_ref, KMP_TASK_TO_TASKDATA(task) ) ); - return; -} -#endif // TASK_UNUSED - - -#if OMPT_SUPPORT -//---------------------------------------------------------------------------------------------------- -// __kmp_task_init_ompt: -// Initialize OMPT fields maintained by a task. This will only be called after -// ompt_tool, so we already know whether ompt is enabled or not. - -static inline void -__kmp_task_init_ompt( kmp_taskdata_t * task, int tid, void * function ) -{ - if (ompt_enabled) { - task->ompt_task_info.task_id = __ompt_task_id_new(tid); - task->ompt_task_info.function = function; - task->ompt_task_info.frame.exit_runtime_frame = NULL; - task->ompt_task_info.frame.reenter_runtime_frame = NULL; -#if OMP_40_ENABLED - task->ompt_task_info.ndeps = 0; - task->ompt_task_info.deps = NULL; -#endif /* OMP_40_ENABLED */ - } -} -#endif - - -//---------------------------------------------------------------------------------------------------- -// __kmp_init_implicit_task: Initialize the appropriate fields in the implicit task for a given thread -// -// loc_ref: reference to source location of parallel region -// this_thr: thread data structure corresponding to implicit task -// team: team for this_thr -// tid: thread id of given thread within team -// set_curr_task: TRUE if need to push current task to thread -// NOTE: Routine does not set up the implicit task ICVS. This is assumed to have already been done elsewhere. -// TODO: Get better loc_ref. Value passed in may be NULL - -void -__kmp_init_implicit_task( ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *team, int tid, int set_curr_task ) -{ - kmp_taskdata_t * task = & team->t.t_implicit_task_taskdata[ tid ]; - - KF_TRACE(10, ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n", - tid, team, task, set_curr_task ? "TRUE" : "FALSE" ) ); - - task->td_task_id = KMP_GEN_TASK_ID(); - task->td_team = team; -// task->td_parent = NULL; // fix for CQ230101 (broken parent task info in debugger) - task->td_ident = loc_ref; - task->td_taskwait_ident = NULL; - task->td_taskwait_counter = 0; - task->td_taskwait_thread = 0; - - task->td_flags.tiedness = TASK_TIED; - task->td_flags.tasktype = TASK_IMPLICIT; -#if OMP_45_ENABLED - task->td_flags.proxy = TASK_FULL; -#endif - - // All implicit tasks are executed immediately, not deferred - task->td_flags.task_serial = 1; - task->td_flags.tasking_ser = ( __kmp_tasking_mode == tskm_immediate_exec ); - task->td_flags.team_serial = ( team->t.t_serialized ) ? 1 : 0; - - task->td_flags.started = 1; - task->td_flags.executing = 1; - task->td_flags.complete = 0; - task->td_flags.freed = 0; - -#if OMP_40_ENABLED - task->td_depnode = NULL; -#endif - - if (set_curr_task) { // only do this initialization the first time a thread is created - task->td_incomplete_child_tasks = 0; - task->td_allocated_child_tasks = 0; // Not used because do not need to deallocate implicit task -#if OMP_40_ENABLED - task->td_taskgroup = NULL; // An implicit task does not have taskgroup - task->td_dephash = NULL; -#endif - __kmp_push_current_task_to_thread( this_thr, team, tid ); - } else { - KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0); - KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0); - } - -#if OMPT_SUPPORT - __kmp_task_init_ompt(task, tid, NULL); -#endif - - KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", - tid, team, task ) ); -} - - -//----------------------------------------------------------------------------- -//// __kmp_finish_implicit_task: Release resources associated to implicit tasks -//// at the end of parallel regions. Some resources are kept for reuse in the -//// next parallel region. -//// -//// thread: thread data structure corresponding to implicit task -// -void -__kmp_finish_implicit_task(kmp_info_t *thread) -{ - kmp_taskdata_t *task = thread->th.th_current_task; - if (task->td_dephash) - __kmp_dephash_free_entries(thread, task->td_dephash); -} - - -//----------------------------------------------------------------------------- -//// __kmp_free_implicit_task: Release resources associated to implicit tasks -//// when these are destroyed regions -//// -//// thread: thread data structure corresponding to implicit task -// -void -__kmp_free_implicit_task(kmp_info_t *thread) -{ - kmp_taskdata_t *task = thread->th.th_current_task; - if (task->td_dephash) - __kmp_dephash_free(thread, task->td_dephash); - task->td_dephash = NULL; -} - - -// Round up a size to a power of two specified by val -// Used to insert padding between structures co-allocated using a single malloc() call -static size_t -__kmp_round_up_to_val( size_t size, size_t val ) { - if ( size & ( val - 1 ) ) { - size &= ~ ( val - 1 ); - if ( size <= KMP_SIZE_T_MAX - val ) { - size += val; // Round up if there is no overflow. - }; // if - }; // if - return size; -} // __kmp_round_up_to_va - - -//--------------------------------------------------------------------------------- -// __kmp_task_alloc: Allocate the taskdata and task data structures for a task -// -// loc_ref: source location information -// gtid: global thread number. -// flags: include tiedness & task type (explicit vs. implicit) of the ''new'' task encountered. -// Converted from kmp_int32 to kmp_tasking_flags_t in routine. -// sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including private vars accessed in task. -// sizeof_shareds: Size in bytes of array of pointers to shared vars accessed in task. -// task_entry: Pointer to task code entry point generated by compiler. -// returns: a pointer to the allocated kmp_task_t structure (task). - -kmp_task_t * -__kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags, - size_t sizeof_kmp_task_t, size_t sizeof_shareds, - kmp_routine_entry_t task_entry ) -{ - kmp_task_t *task; - kmp_taskdata_t *taskdata; - kmp_info_t *thread = __kmp_threads[ gtid ]; - kmp_team_t *team = thread->th.th_team; - kmp_taskdata_t *parent_task = thread->th.th_current_task; - size_t shareds_offset; - - KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) " - "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", - gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t, - sizeof_shareds, task_entry) ); - - if ( parent_task->td_flags.final ) { - if (flags->merged_if0) { - } - flags->final = 1; - } - -#if OMP_45_ENABLED - if ( flags->proxy == TASK_PROXY ) { - flags->tiedness = TASK_UNTIED; - flags->merged_if0 = 1; - - /* are we running in a sequential parallel or tskm_immediate_exec... we need tasking support enabled */ - if ( (thread->th.th_task_team) == NULL ) { - /* This should only happen if the team is serialized - setup a task team and propagate it to the thread - */ - KMP_DEBUG_ASSERT(team->t.t_serialized); - KA_TRACE(30,("T#%d creating task team in __kmp_task_alloc for proxy task\n", gtid)); - __kmp_task_team_setup(thread,team,1); // 1 indicates setup the current team regardless of nthreads - thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state]; - } - kmp_task_team_t * task_team = thread->th.th_task_team; - - /* tasking must be enabled now as the task might not be pushed */ - if ( !KMP_TASKING_ENABLED( task_team ) ) { - KA_TRACE(30,("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid)); - __kmp_enable_tasking( task_team, thread ); - kmp_int32 tid = thread->th.th_info.ds.ds_tid; - kmp_thread_data_t * thread_data = & task_team -> tt.tt_threads_data[ tid ]; - // No lock needed since only owner can allocate - if (thread_data -> td.td_deque == NULL ) { - __kmp_alloc_task_deque( thread, thread_data ); - } - } - - if ( task_team->tt.tt_found_proxy_tasks == FALSE ) - TCW_4(task_team -> tt.tt_found_proxy_tasks, TRUE); - } -#endif - - // Calculate shared structure offset including padding after kmp_task_t struct - // to align pointers in shared struct - shareds_offset = sizeof( kmp_taskdata_t ) + sizeof_kmp_task_t; - shareds_offset = __kmp_round_up_to_val( shareds_offset, sizeof( void * )); - - // Allocate a kmp_taskdata_t block and a kmp_task_t block. - KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", - gtid, shareds_offset) ); - KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", - gtid, sizeof_shareds) ); - - // Avoid double allocation here by combining shareds with taskdata - #if USE_FAST_MEMORY - taskdata = (kmp_taskdata_t *) __kmp_fast_allocate( thread, shareds_offset + sizeof_shareds ); - #else /* ! USE_FAST_MEMORY */ - taskdata = (kmp_taskdata_t *) __kmp_thread_malloc( thread, shareds_offset + sizeof_shareds ); - #endif /* USE_FAST_MEMORY */ - ANNOTATE_HAPPENS_AFTER(taskdata); - - task = KMP_TASKDATA_TO_TASK(taskdata); - - // Make sure task & taskdata are aligned appropriately -#if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD - KMP_DEBUG_ASSERT( ( ((kmp_uintptr_t)taskdata) & (sizeof(double)-1) ) == 0 ); - KMP_DEBUG_ASSERT( ( ((kmp_uintptr_t)task) & (sizeof(double)-1) ) == 0 ); -#else - KMP_DEBUG_ASSERT( ( ((kmp_uintptr_t)taskdata) & (sizeof(_Quad)-1) ) == 0 ); - KMP_DEBUG_ASSERT( ( ((kmp_uintptr_t)task) & (sizeof(_Quad)-1) ) == 0 ); -#endif - if (sizeof_shareds > 0) { - // Avoid double allocation here by combining shareds with taskdata - task->shareds = & ((char *) taskdata)[ shareds_offset ]; - // Make sure shareds struct is aligned to pointer size - KMP_DEBUG_ASSERT( ( ((kmp_uintptr_t)task->shareds) & (sizeof(void *)-1) ) == 0 ); - } else { - task->shareds = NULL; - } - task->routine = task_entry; - task->part_id = 0; // AC: Always start with 0 part id - - taskdata->td_task_id = KMP_GEN_TASK_ID(); - taskdata->td_team = team; - taskdata->td_alloc_thread = thread; - taskdata->td_parent = parent_task; - taskdata->td_level = parent_task->td_level + 1; // increment nesting level - taskdata->td_untied_count = 0; - taskdata->td_ident = loc_ref; - taskdata->td_taskwait_ident = NULL; - taskdata->td_taskwait_counter = 0; - taskdata->td_taskwait_thread = 0; - KMP_DEBUG_ASSERT( taskdata->td_parent != NULL ); -#if OMP_45_ENABLED - // avoid copying icvs for proxy tasks - if ( flags->proxy == TASK_FULL ) -#endif - copy_icvs( &taskdata->td_icvs, &taskdata->td_parent->td_icvs ); - - taskdata->td_flags.tiedness = flags->tiedness; - taskdata->td_flags.final = flags->final; - taskdata->td_flags.merged_if0 = flags->merged_if0; -#if OMP_40_ENABLED - taskdata->td_flags.destructors_thunk = flags->destructors_thunk; -#endif // OMP_40_ENABLED -#if OMP_45_ENABLED - taskdata->td_flags.proxy = flags->proxy; - taskdata->td_task_team = thread->th.th_task_team; - taskdata->td_size_alloc = shareds_offset + sizeof_shareds; -#endif - taskdata->td_flags.tasktype = TASK_EXPLICIT; - - // GEH - TODO: fix this to copy parent task's value of tasking_ser flag - taskdata->td_flags.tasking_ser = ( __kmp_tasking_mode == tskm_immediate_exec ); - - // GEH - TODO: fix this to copy parent task's value of team_serial flag - taskdata->td_flags.team_serial = ( team->t.t_serialized ) ? 1 : 0; - - // GEH - Note we serialize the task if the team is serialized to make sure implicit parallel region - // tasks are not left until program termination to execute. Also, it helps locality to execute - // immediately. - taskdata->td_flags.task_serial = ( parent_task->td_flags.final - || taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser ); - - taskdata->td_flags.started = 0; - taskdata->td_flags.executing = 0; - taskdata->td_flags.complete = 0; - taskdata->td_flags.freed = 0; - - taskdata->td_flags.native = flags->native; - - taskdata->td_incomplete_child_tasks = 0; - taskdata->td_allocated_child_tasks = 1; // start at one because counts current task and children -#if OMP_40_ENABLED - taskdata->td_taskgroup = parent_task->td_taskgroup; // task inherits the taskgroup from the parent task - taskdata->td_dephash = NULL; - taskdata->td_depnode = NULL; -#endif - - // Only need to keep track of child task counts if team parallel and tasking not serialized or if it is a proxy task -#if OMP_45_ENABLED - if ( flags->proxy == TASK_PROXY || !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) ) -#else - if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) ) -#endif - { - KMP_TEST_THEN_INC32( (kmp_int32 *)(& parent_task->td_incomplete_child_tasks) ); -#if OMP_40_ENABLED - if ( parent_task->td_taskgroup ) - KMP_TEST_THEN_INC32( (kmp_int32 *)(& parent_task->td_taskgroup->count) ); -#endif - // Only need to keep track of allocated child tasks for explicit tasks since implicit not deallocated - if ( taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT ) { - KMP_TEST_THEN_INC32( (kmp_int32 *)(& taskdata->td_parent->td_allocated_child_tasks) ); - } - } - - KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n", - gtid, taskdata, taskdata->td_parent) ); - ANNOTATE_HAPPENS_BEFORE(task); - -#if OMPT_SUPPORT - __kmp_task_init_ompt(taskdata, gtid, (void*) task_entry); -#endif - - return task; -} - - -kmp_task_t * -__kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, - size_t sizeof_kmp_task_t, size_t sizeof_shareds, - kmp_routine_entry_t task_entry ) -{ - kmp_task_t *retval; - kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags; - - input_flags->native = FALSE; - // __kmp_task_alloc() sets up all other runtime flags - -#if OMP_45_ENABLED - KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) " - "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", - gtid, loc_ref, input_flags->tiedness ? "tied " : "untied", - input_flags->proxy ? "proxy" : "", - sizeof_kmp_task_t, sizeof_shareds, task_entry) ); -#else - KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) " - "sizeof_task=%ld sizeof_shared=%ld entry=%p\n", - gtid, loc_ref, input_flags->tiedness ? "tied " : "untied", - sizeof_kmp_task_t, sizeof_shareds, task_entry) ); -#endif - - retval = __kmp_task_alloc( loc_ref, gtid, input_flags, sizeof_kmp_task_t, - sizeof_shareds, task_entry ); - - KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval) ); - - return retval; -} - -//----------------------------------------------------------- -// __kmp_invoke_task: invoke the specified task -// -// gtid: global thread ID of caller -// task: the task to invoke -// current_task: the task to resume after task invokation - -static void -__kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_task ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_uint64 cur_time; -#if OMP_40_ENABLED - int discard = 0 /* false */; -#endif - KA_TRACE(30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n", - gtid, taskdata, current_task) ); - KMP_DEBUG_ASSERT(task); -#if OMP_45_ENABLED - if ( taskdata->td_flags.proxy == TASK_PROXY && - taskdata->td_flags.complete == 1) - { - // This is a proxy task that was already completed but it needs to run - // its bottom-half finish - KA_TRACE(30, ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n", - gtid, taskdata) ); - - __kmp_bottom_half_finish_proxy(gtid,task); - - KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for proxy task %p, resuming task %p\n", gtid, taskdata, current_task) ); - - return; - } -#endif - -#if USE_ITT_BUILD && USE_ITT_NOTIFY - if(__kmp_forkjoin_frames_mode == 3) { - // Get the current time stamp to measure task execution time to correct barrier imbalance time - cur_time = __itt_get_timestamp(); - } -#endif - -#if OMP_45_ENABLED - // Proxy tasks are not handled by the runtime - if ( taskdata->td_flags.proxy != TASK_PROXY ) { -#endif - ANNOTATE_HAPPENS_AFTER(task); - __kmp_task_start( gtid, task, current_task ); -#if OMP_45_ENABLED - } -#endif - -#if OMPT_SUPPORT - ompt_thread_info_t oldInfo; - kmp_info_t * thread; - if (ompt_enabled) { - // Store the threads states and restore them after the task - thread = __kmp_threads[ gtid ]; - oldInfo = thread->th.ompt_thread_info; - thread->th.ompt_thread_info.wait_id = 0; - thread->th.ompt_thread_info.state = ompt_state_work_parallel; - taskdata->ompt_task_info.frame.exit_runtime_frame = __builtin_frame_address(0); - } -#endif - -#if OMP_40_ENABLED - // TODO: cancel tasks if the parallel region has also been cancelled - // TODO: check if this sequence can be hoisted above __kmp_task_start - // if cancellation has been enabled for this run ... - if (__kmp_omp_cancellation) { - kmp_info_t *this_thr = __kmp_threads [ gtid ]; - kmp_team_t * this_team = this_thr->th.th_team; - kmp_taskgroup_t * taskgroup = taskdata->td_taskgroup; - if ((taskgroup && taskgroup->cancel_request) || (this_team->t.t_cancel_request == cancel_parallel)) { - KMP_COUNT_BLOCK(TASK_cancelled); - // this task belongs to a task group and we need to cancel it - discard = 1 /* true */; - } - } - - // - // Invoke the task routine and pass in relevant data. - // Thunks generated by gcc take a different argument list. - // - if (!discard) { -#if KMP_STATS_ENABLED - KMP_COUNT_BLOCK(TASK_executed); - switch(KMP_GET_THREAD_STATE()) { - case FORK_JOIN_BARRIER: KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar); break; - case PLAIN_BARRIER: KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar); break; - case TASKYIELD: KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield); break; - case TASKWAIT: KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait); break; - case TASKGROUP: KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup); break; - default: KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate); break; - } -#endif // KMP_STATS_ENABLED -#endif // OMP_40_ENABLED - -#if OMPT_SUPPORT && OMPT_TRACE - /* let OMPT know that we're about to run this task */ - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_task_switch)) - { - ompt_callbacks.ompt_callback(ompt_event_task_switch)( - current_task->ompt_task_info.task_id, - taskdata->ompt_task_info.task_id); - } -#endif - -#ifdef KMP_GOMP_COMPAT - if (taskdata->td_flags.native) { - ((void (*)(void *))(*(task->routine)))(task->shareds); - } - else -#endif /* KMP_GOMP_COMPAT */ - { - (*(task->routine))(gtid, task); - } - KMP_POP_PARTITIONED_TIMER(); - -#if OMPT_SUPPORT && OMPT_TRACE - /* let OMPT know that we're returning to the callee task */ - if (ompt_enabled && - ompt_callbacks.ompt_callback(ompt_event_task_switch)) - { - ompt_callbacks.ompt_callback(ompt_event_task_switch)( - taskdata->ompt_task_info.task_id, - current_task->ompt_task_info.task_id); - } -#endif - -#if OMP_40_ENABLED - } -#endif // OMP_40_ENABLED - - -#if OMPT_SUPPORT - if (ompt_enabled) { - thread->th.ompt_thread_info = oldInfo; - taskdata->ompt_task_info.frame.exit_runtime_frame = NULL; - } -#endif - -#if OMP_45_ENABLED - // Proxy tasks are not handled by the runtime - if ( taskdata->td_flags.proxy != TASK_PROXY ) { -#endif - ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent); - __kmp_task_finish( gtid, task, current_task ); -#if OMP_45_ENABLED - } -#endif - -#if USE_ITT_BUILD && USE_ITT_NOTIFY - // Barrier imbalance - correct arrive time after the task finished - if(__kmp_forkjoin_frames_mode == 3) { - kmp_info_t *this_thr = __kmp_threads [ gtid ]; - if(this_thr->th.th_bar_arrive_time) { - this_thr->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time); - } - } -#endif - KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n", - gtid, taskdata, current_task) ); - return; -} - -//----------------------------------------------------------------------- -// __kmpc_omp_task_parts: Schedule a thread-switchable task for execution -// -// loc_ref: location of original task pragma (ignored) -// gtid: Global Thread ID of encountering thread -// new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task'' -// Returns: -// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to be resumed later. -// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be resumed later. - -kmp_int32 -__kmpc_omp_task_parts( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task) -{ - kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task); - - KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", - gtid, loc_ref, new_taskdata ) ); - - /* Should we execute the new task or queue it? For now, let's just always try to - queue it. If the queue fills up, then we'll execute it. */ - - if ( __kmp_push_task( gtid, new_task ) == TASK_NOT_PUSHED ) // if cannot defer - { // Execute this task immediately - kmp_taskdata_t * current_task = __kmp_threads[ gtid ] -> th.th_current_task; - new_taskdata->td_flags.task_serial = 1; - __kmp_invoke_task( gtid, new_task, current_task ); - } - - KA_TRACE(10, ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: " - "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref, - new_taskdata ) ); - - ANNOTATE_HAPPENS_BEFORE(new_task); - return TASK_CURRENT_NOT_QUEUED; -} - -//--------------------------------------------------------------------- -// __kmp_omp_task: Schedule a non-thread-switchable task for execution -// gtid: Global Thread ID of encountering thread -// new_task: non-thread-switchable task thunk allocated by __kmp_omp_task_alloc() -// serialize_immediate: if TRUE then if the task is executed immediately its execution will be serialized -// returns: -// -// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to be resumed later. -// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be resumed later. -kmp_int32 -__kmp_omp_task( kmp_int32 gtid, kmp_task_t * new_task, bool serialize_immediate ) -{ - kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task); - -#if OMPT_SUPPORT - if (ompt_enabled) { - new_taskdata->ompt_task_info.frame.reenter_runtime_frame = - __builtin_frame_address(1); - } -#endif - - /* Should we execute the new task or queue it? For now, let's just always try to - queue it. If the queue fills up, then we'll execute it. */ -#if OMP_45_ENABLED - if ( new_taskdata->td_flags.proxy == TASK_PROXY || __kmp_push_task( gtid, new_task ) == TASK_NOT_PUSHED ) // if cannot defer -#else - if ( __kmp_push_task( gtid, new_task ) == TASK_NOT_PUSHED ) // if cannot defer -#endif - { // Execute this task immediately - kmp_taskdata_t * current_task = __kmp_threads[ gtid ] -> th.th_current_task; - if ( serialize_immediate ) - new_taskdata -> td_flags.task_serial = 1; - __kmp_invoke_task( gtid, new_task, current_task ); - } - -#if OMPT_SUPPORT - if (ompt_enabled) { - new_taskdata->ompt_task_info.frame.reenter_runtime_frame = NULL; - } -#endif - - ANNOTATE_HAPPENS_BEFORE(new_task); - return TASK_CURRENT_NOT_QUEUED; -} - -//--------------------------------------------------------------------- -// __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a non-thread-switchable task from -// the parent thread only! -// loc_ref: location of original task pragma (ignored) -// gtid: Global Thread ID of encountering thread -// new_task: non-thread-switchable task thunk allocated by __kmp_omp_task_alloc() -// returns: -// -// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to be resumed later. -// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be resumed later. - -kmp_int32 -__kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task) -{ - kmp_int32 res; - KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK); - -#if KMP_DEBUG - kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task); -#endif - KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", - gtid, loc_ref, new_taskdata ) ); - - res = __kmp_omp_task(gtid,new_task,true); - - KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n", - gtid, loc_ref, new_taskdata ) ); - return res; -} - -//------------------------------------------------------------------------------------- -// __kmpc_omp_taskwait: Wait until all tasks generated by the current task are complete - -kmp_int32 -__kmpc_omp_taskwait( ident_t *loc_ref, kmp_int32 gtid ) -{ - kmp_taskdata_t * taskdata; - kmp_info_t * thread; - int thread_finished = FALSE; - KMP_SET_THREAD_STATE_BLOCK(TASKWAIT); - - KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref) ); - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { - // GEH TODO: shouldn't we have some sort of OMPRAP API calls here to mark begin wait? - - thread = __kmp_threads[ gtid ]; - taskdata = thread -> th.th_current_task; - -#if OMPT_SUPPORT && OMPT_TRACE - ompt_task_id_t my_task_id; - ompt_parallel_id_t my_parallel_id; - - if (ompt_enabled) { - kmp_team_t *team = thread->th.th_team; - my_task_id = taskdata->ompt_task_info.task_id; - my_parallel_id = team->t.ompt_team_info.parallel_id; - - taskdata->ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(1); - if (ompt_callbacks.ompt_callback(ompt_event_taskwait_begin)) { - ompt_callbacks.ompt_callback(ompt_event_taskwait_begin)( - my_parallel_id, my_task_id); - } - } -#endif - - // Debugger: The taskwait is active. Store location and thread encountered the taskwait. -#if USE_ITT_BUILD - // Note: These values are used by ITT events as well. -#endif /* USE_ITT_BUILD */ - taskdata->td_taskwait_counter += 1; - taskdata->td_taskwait_ident = loc_ref; - taskdata->td_taskwait_thread = gtid + 1; - -#if USE_ITT_BUILD - void * itt_sync_obj = __kmp_itt_taskwait_object( gtid ); - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_starting( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - - bool must_wait = ! taskdata->td_flags.team_serial && ! taskdata->td_flags.final; - -#if OMP_45_ENABLED - must_wait = must_wait || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks); -#endif - if (must_wait) - { - kmp_flag_32 flag(&(taskdata->td_incomplete_child_tasks), 0U); - while ( TCR_4(taskdata -> td_incomplete_child_tasks) != 0 ) { - flag.execute_tasks(thread, gtid, FALSE, &thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), __kmp_task_stealing_constraint ); - } - } -#if USE_ITT_BUILD - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_finished( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - - // GEH TODO: shouldn't we have some sort of OMPRAP API calls here to mark end of wait? - // Debugger: The taskwait is completed. Location remains, but thread is negated. - taskdata->td_taskwait_thread = - taskdata->td_taskwait_thread; - -#if OMPT_SUPPORT && OMPT_TRACE - if (ompt_enabled) { - if (ompt_callbacks.ompt_callback(ompt_event_taskwait_end)) { - ompt_callbacks.ompt_callback(ompt_event_taskwait_end)( - my_parallel_id, my_task_id); - } - taskdata->ompt_task_info.frame.reenter_runtime_frame = NULL; - } -#endif - ANNOTATE_HAPPENS_AFTER(taskdata); - } - - KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, " - "returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata) ); - - return TASK_CURRENT_NOT_QUEUED; -} - - -//------------------------------------------------- -// __kmpc_omp_taskyield: switch to a different task - -kmp_int32 -__kmpc_omp_taskyield( ident_t *loc_ref, kmp_int32 gtid, int end_part ) -{ - kmp_taskdata_t * taskdata; - kmp_info_t * thread; - int thread_finished = FALSE; - - KMP_COUNT_BLOCK(OMP_TASKYIELD); - KMP_SET_THREAD_STATE_BLOCK(TASKYIELD); - - KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n", - gtid, loc_ref, end_part) ); - - if ( __kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel ) { - // GEH TODO: shouldn't we have some sort of OMPRAP API calls here to mark begin wait? - - thread = __kmp_threads[ gtid ]; - taskdata = thread -> th.th_current_task; - // Should we model this as a task wait or not? - // Debugger: The taskwait is active. Store location and thread encountered the taskwait. -#if USE_ITT_BUILD - // Note: These values are used by ITT events as well. -#endif /* USE_ITT_BUILD */ - taskdata->td_taskwait_counter += 1; - taskdata->td_taskwait_ident = loc_ref; - taskdata->td_taskwait_thread = gtid + 1; - -#if USE_ITT_BUILD - void * itt_sync_obj = __kmp_itt_taskwait_object( gtid ); - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_starting( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - if ( ! taskdata->td_flags.team_serial ) { - kmp_task_team_t * task_team = thread->th.th_task_team; - if (task_team != NULL) { - if (KMP_TASKING_ENABLED(task_team)) { - __kmp_execute_tasks_32( thread, gtid, NULL, FALSE, &thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), __kmp_task_stealing_constraint ); - } - } - } -#if USE_ITT_BUILD - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_finished( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - - // GEH TODO: shouldn't we have some sort of OMPRAP API calls here to mark end of wait? - // Debugger: The taskwait is completed. Location remains, but thread is negated. - taskdata->td_taskwait_thread = - taskdata->td_taskwait_thread; - } - - KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, " - "returning TASK_CURRENT_NOT_QUEUED\n", gtid, taskdata) ); - - return TASK_CURRENT_NOT_QUEUED; -} - - -#if OMP_40_ENABLED -//------------------------------------------------------------------------------------- -// __kmpc_taskgroup: Start a new taskgroup - -void -__kmpc_taskgroup( ident_t* loc, int gtid ) -{ - kmp_info_t * thread = __kmp_threads[ gtid ]; - kmp_taskdata_t * taskdata = thread->th.th_current_task; - kmp_taskgroup_t * tg_new = - (kmp_taskgroup_t *)__kmp_thread_malloc( thread, sizeof( kmp_taskgroup_t ) ); - KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new) ); - tg_new->count = 0; - tg_new->cancel_request = cancel_noreq; - tg_new->parent = taskdata->td_taskgroup; - taskdata->td_taskgroup = tg_new; -} - - -//------------------------------------------------------------------------------------- -// __kmpc_end_taskgroup: Wait until all tasks generated by the current task -// and its descendants are complete - -void -__kmpc_end_taskgroup( ident_t* loc, int gtid ) -{ - kmp_info_t * thread = __kmp_threads[ gtid ]; - kmp_taskdata_t * taskdata = thread->th.th_current_task; - kmp_taskgroup_t * taskgroup = taskdata->td_taskgroup; - int thread_finished = FALSE; - - KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc) ); - KMP_DEBUG_ASSERT( taskgroup != NULL ); - KMP_SET_THREAD_STATE_BLOCK(TASKGROUP); - - if ( __kmp_tasking_mode != tskm_immediate_exec ) { -#if USE_ITT_BUILD - // For ITT the taskgroup wait is similar to taskwait until we need to distinguish them - void * itt_sync_obj = __kmp_itt_taskwait_object( gtid ); - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_starting( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - -#if OMP_45_ENABLED - if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) ) -#else - if ( ! taskdata->td_flags.team_serial ) -#endif - { - kmp_flag_32 flag(&(taskgroup->count), 0U); - while ( TCR_4(taskgroup->count) != 0 ) { - flag.execute_tasks(thread, gtid, FALSE, &thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), __kmp_task_stealing_constraint ); - } - } - -#if USE_ITT_BUILD - if ( itt_sync_obj != NULL ) - __kmp_itt_taskwait_finished( gtid, itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - } - KMP_DEBUG_ASSERT( taskgroup->count == 0 ); - - // Restore parent taskgroup for the current task - taskdata->td_taskgroup = taskgroup->parent; - __kmp_thread_free( thread, taskgroup ); - - KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n", gtid, taskdata) ); - ANNOTATE_HAPPENS_AFTER(taskdata); -} -#endif - - -//------------------------------------------------------ -// __kmp_remove_my_task: remove a task from my own deque - -static kmp_task_t * -__kmp_remove_my_task( kmp_info_t * thread, kmp_int32 gtid, kmp_task_team_t *task_team, - kmp_int32 is_constrained ) -{ - kmp_task_t * task; - kmp_taskdata_t * taskdata; - kmp_thread_data_t *thread_data; - kmp_uint32 tail; - - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - KMP_DEBUG_ASSERT( task_team -> tt.tt_threads_data != NULL ); // Caller should check this condition - - thread_data = & task_team -> tt.tt_threads_data[ __kmp_tid_from_gtid( gtid ) ]; - - KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n", - gtid, thread_data->td.td_deque_ntasks, thread_data->td.td_deque_head, - thread_data->td.td_deque_tail) ); - - if (TCR_4(thread_data -> td.td_deque_ntasks) == 0) { - KA_TRACE(10, ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: ntasks=%d head=%u tail=%u\n", - gtid, thread_data->td.td_deque_ntasks, thread_data->td.td_deque_head, - thread_data->td.td_deque_tail) ); - return NULL; - } - - __kmp_acquire_bootstrap_lock( & thread_data -> td.td_deque_lock ); - - if (TCR_4(thread_data -> td.td_deque_ntasks) == 0) { - __kmp_release_bootstrap_lock( & thread_data -> td.td_deque_lock ); - KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: ntasks=%d head=%u tail=%u\n", - gtid, thread_data->td.td_deque_ntasks, thread_data->td.td_deque_head, - thread_data->td.td_deque_tail) ); - return NULL; - } - - tail = ( thread_data -> td.td_deque_tail - 1 ) & TASK_DEQUE_MASK(thread_data->td); // Wrap index. - taskdata = thread_data -> td.td_deque[ tail ]; - - if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) { - // we need to check if the candidate obeys task scheduling constraint: - // only child of current task can be scheduled - kmp_taskdata_t * current = thread->th.th_current_task; - kmp_int32 level = current->td_level; - kmp_taskdata_t * parent = taskdata->td_parent; - while ( parent != current && parent->td_level > level ) { - parent = parent->td_parent; // check generation up to the level of the current task - KMP_DEBUG_ASSERT(parent != NULL); - } - if ( parent != current ) { - // If the tail task is not a child, then no other child can appear in the deque. - __kmp_release_bootstrap_lock( & thread_data -> td.td_deque_lock ); - KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: ntasks=%d head=%u tail=%u\n", - gtid, thread_data->td.td_deque_ntasks, thread_data->td.td_deque_head, - thread_data->td.td_deque_tail) ); - return NULL; - } - } - - thread_data -> td.td_deque_tail = tail; - TCW_4(thread_data -> td.td_deque_ntasks, thread_data -> td.td_deque_ntasks - 1); - - __kmp_release_bootstrap_lock( & thread_data->td.td_deque_lock ); - - KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d task %p removed: ntasks=%d head=%u tail=%u\n", - gtid, taskdata, thread_data->td.td_deque_ntasks, thread_data->td.td_deque_head, - thread_data->td.td_deque_tail) ); - - task = KMP_TASKDATA_TO_TASK( taskdata ); - return task; -} - - -//----------------------------------------------------------- -// __kmp_steal_task: remove a task from another thread's deque -// Assume that calling thread has already checked existence of -// task_team thread_data before calling this routine. - -static kmp_task_t * -__kmp_steal_task( kmp_info_t *victim, kmp_int32 gtid, kmp_task_team_t *task_team, - volatile kmp_uint32 *unfinished_threads, int *thread_finished, - kmp_int32 is_constrained ) -{ - kmp_task_t * task; - kmp_taskdata_t * taskdata; - kmp_thread_data_t *victim_td, *threads_data; - kmp_int32 victim_tid; - - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - - threads_data = task_team -> tt.tt_threads_data; - KMP_DEBUG_ASSERT( threads_data != NULL ); // Caller should check this condition - - victim_tid = victim->th.th_info.ds.ds_tid; - victim_td = & threads_data[ victim_tid ]; - - KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: task_team=%p ntasks=%d " - "head=%u tail=%u\n", - gtid, __kmp_gtid_from_thread( victim ), task_team, victim_td->td.td_deque_ntasks, - victim_td->td.td_deque_head, victim_td->td.td_deque_tail) ); - - if ( (TCR_4(victim_td -> td.td_deque_ntasks) == 0) || // Caller should not check this condition - (TCR_PTR(victim->th.th_task_team) != task_team)) // GEH: why would this happen? - { - KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: task_team=%p " - "ntasks=%d head=%u tail=%u\n", - gtid, __kmp_gtid_from_thread( victim ), task_team, victim_td->td.td_deque_ntasks, - victim_td->td.td_deque_head, victim_td->td.td_deque_tail) ); - return NULL; - } - - __kmp_acquire_bootstrap_lock( & victim_td -> td.td_deque_lock ); - - // Check again after we acquire the lock - if ( (TCR_4(victim_td -> td.td_deque_ntasks) == 0) || - (TCR_PTR(victim->th.th_task_team) != task_team)) // GEH: why would this happen? - { - __kmp_release_bootstrap_lock( & victim_td -> td.td_deque_lock ); - KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: task_team=%p " - "ntasks=%d head=%u tail=%u\n", - gtid, __kmp_gtid_from_thread( victim ), task_team, victim_td->td.td_deque_ntasks, - victim_td->td.td_deque_head, victim_td->td.td_deque_tail) ); - return NULL; - } - - KMP_DEBUG_ASSERT( victim_td -> td.td_deque != NULL ); - - taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head]; - if ( is_constrained ) { - // we need to check if the candidate obeys task scheduling constraint: - // only descendant of current task can be scheduled - kmp_taskdata_t * current = __kmp_threads[ gtid ]->th.th_current_task; - kmp_int32 level = current->td_level; - kmp_taskdata_t * parent = taskdata->td_parent; - while ( parent != current && parent->td_level > level ) { - parent = parent->td_parent; // check generation up to the level of the current task - KMP_DEBUG_ASSERT(parent != NULL); - } - if ( parent != current ) { - // If the head task is not a descendant of the current task then do not - // steal it. No other task in victim's deque can be a descendant of the - // current task. - __kmp_release_bootstrap_lock( & victim_td -> td.td_deque_lock ); - KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: task_team=%p " - "ntasks=%d head=%u tail=%u\n", - gtid, __kmp_gtid_from_thread( threads_data[victim_tid].td.td_thr ), - task_team, victim_td->td.td_deque_ntasks, - victim_td->td.td_deque_head, victim_td->td.td_deque_tail) ); - return NULL; - } - } - // Bump head pointer and Wrap. - victim_td->td.td_deque_head = (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td); - if (*thread_finished) { - // We need to un-mark this victim as a finished victim. This must be done before - // releasing the lock, or else other threads (starting with the master victim) - // might be prematurely released from the barrier!!! - kmp_uint32 count; - - count = KMP_TEST_THEN_INC32( (kmp_int32 *)unfinished_threads ); - - KA_TRACE(20, ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n", - gtid, count + 1, task_team) ); - - *thread_finished = FALSE; - } - TCW_4(victim_td -> td.td_deque_ntasks, TCR_4(victim_td -> td.td_deque_ntasks) - 1); - - __kmp_release_bootstrap_lock( & victim_td -> td.td_deque_lock ); - - KMP_COUNT_BLOCK(TASK_stolen); - KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d stole task %p from T#%d: task_team=%p " - "ntasks=%d head=%u tail=%u\n", - gtid, taskdata, __kmp_gtid_from_thread( victim ), task_team, - victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head, - victim_td->td.td_deque_tail) ); - - task = KMP_TASKDATA_TO_TASK( taskdata ); - return task; -} - - -//----------------------------------------------------------------------------- -// __kmp_execute_tasks_template: Choose and execute tasks until either the condition -// is statisfied (return true) or there are none left (return false). -// final_spin is TRUE if this is the spin at the release barrier. -// thread_finished indicates whether the thread is finished executing all -// the tasks it has on its deque, and is at the release barrier. -// spinner is the location on which to spin. -// spinner == NULL means only execute a single task and return. -// checker is the value to check to terminate the spin. -template -static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin, - int *thread_finished - USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) -{ - kmp_task_team_t * task_team = thread->th.th_task_team; - kmp_thread_data_t * threads_data; - kmp_task_t * task; - kmp_info_t * other_thread; - kmp_taskdata_t * current_task = thread -> th.th_current_task; - volatile kmp_uint32 * unfinished_threads; - kmp_int32 nthreads, victim=-2, use_own_tasks=1, new_victim=0, tid=thread->th.th_info.ds.ds_tid; - - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - KMP_DEBUG_ASSERT( thread == __kmp_threads[ gtid ] ); - - if (task_team == NULL) return FALSE; - - KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d *thread_finished=%d\n", - gtid, final_spin, *thread_finished) ); - - threads_data = (kmp_thread_data_t *)TCR_PTR(task_team -> tt.tt_threads_data); - KMP_DEBUG_ASSERT( threads_data != NULL ); - - nthreads = task_team -> tt.tt_nproc; - unfinished_threads = &(task_team -> tt.tt_unfinished_threads); -#if OMP_45_ENABLED - KMP_DEBUG_ASSERT( nthreads > 1 || task_team->tt.tt_found_proxy_tasks); -#else - KMP_DEBUG_ASSERT( nthreads > 1 ); -#endif - KMP_DEBUG_ASSERT( (int)(TCR_4(*unfinished_threads)) >= 0 ); - - while (1) { // Outer loop keeps trying to find tasks in case of single thread getting tasks from target constructs - while (1) { // Inner loop to find a task and execute it - task = NULL; - if (use_own_tasks) { // check on own queue first - task = __kmp_remove_my_task( thread, gtid, task_team, is_constrained ); - } - if ((task == NULL) && (nthreads > 1)) { // Steal a task - int asleep = 1; - use_own_tasks = 0; - // Try to steal from the last place I stole from successfully. - if (victim == -2) { // haven't stolen anything yet - victim = threads_data[tid].td.td_deque_last_stolen; - if (victim != -1) // if we have a last stolen from victim, get the thread - other_thread = threads_data[victim].td.td_thr; - } - if (victim != -1) { // found last victim - asleep = 0; - } - else if (!new_victim) { // no recent steals and we haven't already used a new victim; select a random thread - do { // Find a different thread to steal work from. - // Pick a random thread. Initial plan was to cycle through all the threads, and only return if - // we tried to steal from every thread, and failed. Arch says that's not such a great idea. - victim = __kmp_get_random(thread) % (nthreads - 1); - if (victim >= tid) { - ++victim; // Adjusts random distribution to exclude self - } - // Found a potential victim - other_thread = threads_data[victim].td.td_thr; - // There is a slight chance that __kmp_enable_tasking() did not wake up all threads - // waiting at the barrier. If victim is sleeping, then wake it up. Since we were going to - // pay the cache miss penalty for referencing another thread's kmp_info_t struct anyway, - // the check shouldn't cost too much performance at this point. In extra barrier mode, tasks - // do not sleep at the separate tasking barrier, so this isn't a problem. - asleep = 0; - if ( ( __kmp_tasking_mode == tskm_task_teams ) && - (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) && - (TCR_PTR(other_thread->th.th_sleep_loc) != NULL)) { - asleep = 1; - __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread), other_thread->th.th_sleep_loc); - // A sleeping thread should not have any tasks on it's queue. There is a slight - // possibility that it resumes, steals a task from another thread, which spawns more - // tasks, all in the time that it takes this thread to check => don't write an assertion - // that the victim's queue is empty. Try stealing from a different thread. - } - } while (asleep); - } - - if (!asleep) { - // We have a victim to try to steal from - task = __kmp_steal_task(other_thread, gtid, task_team, unfinished_threads, thread_finished, is_constrained); - } - if (task != NULL) { // set last stolen to victim - if (threads_data[tid].td.td_deque_last_stolen != victim) { - threads_data[tid].td.td_deque_last_stolen = victim; - // The pre-refactored code did not try more than 1 successful new vicitm, - // unless the last one generated more local tasks; new_victim keeps track of this - new_victim = 1; - } - } - else { // No tasks found; unset last_stolen - KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1); - victim = -2; // no successful victim found - } - } - - if (task == NULL) // break out of tasking loop - break; - - // Found a task; execute it -#if USE_ITT_BUILD && USE_ITT_NOTIFY - if ( __itt_sync_create_ptr || KMP_ITT_DEBUG ) { - if ( itt_sync_obj == NULL ) { // we are at fork barrier where we could not get the object reliably - itt_sync_obj = __kmp_itt_barrier_object( gtid, bs_forkjoin_barrier ); - } - __kmp_itt_task_starting( itt_sync_obj ); - } -#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */ - __kmp_invoke_task( gtid, task, current_task ); -#if USE_ITT_BUILD - if ( itt_sync_obj != NULL ) __kmp_itt_task_finished( itt_sync_obj ); -#endif /* USE_ITT_BUILD */ - // If this thread is only partway through the barrier and the condition is met, then return now, - // so that the barrier gather/release pattern can proceed. If this thread is in the last spin loop - // in the barrier, waiting to be released, we know that the termination condition will not be - // satisified, so don't waste any cycles checking it. - if (flag == NULL || (!final_spin && flag->done_check())) { - KA_TRACE(15, ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n", gtid) ); - return TRUE; - } - if (thread->th.th_task_team == NULL) { - break; - } - KMP_YIELD( __kmp_library == library_throughput ); // Yield before executing next task - // If execution of a stolen task results in more tasks being placed on our run queue, reset use_own_tasks - if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) { - KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned other tasks, restart\n", gtid)); - use_own_tasks = 1; - new_victim = 0; - } - } - - // The task source has been exhausted. If in final spin loop of barrier, check if termination condition is satisfied. -#if OMP_45_ENABLED - // The work queue may be empty but there might be proxy tasks still executing - if (final_spin && TCR_4(current_task->td_incomplete_child_tasks) == 0) -#else - if (final_spin) -#endif - { - // First, decrement the #unfinished threads, if that has not already been done. This decrement - // might be to the spin location, and result in the termination condition being satisfied. - if (! *thread_finished) { - kmp_uint32 count; - - count = KMP_TEST_THEN_DEC32( (kmp_int32 *)unfinished_threads ) - 1; - KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec unfinished_threads to %d task_team=%p\n", - gtid, count, task_team) ); - *thread_finished = TRUE; - } - - // It is now unsafe to reference thread->th.th_team !!! - // Decrementing task_team->tt.tt_unfinished_threads can allow the master thread to pass through - // the barrier, where it might reset each thread's th.th_team field for the next parallel region. - // If we can steal more work, we know that this has not happened yet. - if (flag != NULL && flag->done_check()) { - KA_TRACE(15, ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n", gtid) ); - return TRUE; - } - } - - // If this thread's task team is NULL, master has recognized that there are no more tasks; bail out - if (thread->th.th_task_team == NULL) { - KA_TRACE(15, ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid) ); - return FALSE; - } - -#if OMP_45_ENABLED - // We could be getting tasks from target constructs; if this is the only thread, keep trying to execute - // tasks from own queue - if (nthreads == 1) - use_own_tasks = 1; - else -#endif - { - KA_TRACE(15, ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid) ); - return FALSE; - } - } -} - -int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin, - int *thread_finished - USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) -{ - return __kmp_execute_tasks_template(thread, gtid, flag, final_spin, thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); -} - -int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin, - int *thread_finished - USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) -{ - return __kmp_execute_tasks_template(thread, gtid, flag, final_spin, thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); -} - -int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin, - int *thread_finished - USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) -{ - return __kmp_execute_tasks_template(thread, gtid, flag, final_spin, thread_finished - USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained); -} - - - -//----------------------------------------------------------------------------- -// __kmp_enable_tasking: Allocate task team and resume threads sleeping at the -// next barrier so they can assist in executing enqueued tasks. -// First thread in allocates the task team atomically. - -static void -__kmp_enable_tasking( kmp_task_team_t *task_team, kmp_info_t *this_thr ) -{ - kmp_thread_data_t *threads_data; - int nthreads, i, is_init_thread; - - KA_TRACE( 10, ( "__kmp_enable_tasking(enter): T#%d\n", - __kmp_gtid_from_thread( this_thr ) ) ); - - KMP_DEBUG_ASSERT(task_team != NULL); - KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL); - - nthreads = task_team->tt.tt_nproc; - KMP_DEBUG_ASSERT(nthreads > 0); - KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc); - - // Allocate or increase the size of threads_data if necessary - is_init_thread = __kmp_realloc_task_threads_data( this_thr, task_team ); - - if (!is_init_thread) { - // Some other thread already set up the array. - KA_TRACE( 20, ( "__kmp_enable_tasking(exit): T#%d: threads array already set up.\n", - __kmp_gtid_from_thread( this_thr ) ) ); - return; - } - threads_data = (kmp_thread_data_t *)TCR_PTR(task_team -> tt.tt_threads_data); - KMP_DEBUG_ASSERT( threads_data != NULL ); - - if ( ( __kmp_tasking_mode == tskm_task_teams ) && - ( __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME ) ) - { - // Release any threads sleeping at the barrier, so that they can steal - // tasks and execute them. In extra barrier mode, tasks do not sleep - // at the separate tasking barrier, so this isn't a problem. - for (i = 0; i < nthreads; i++) { - volatile void *sleep_loc; - kmp_info_t *thread = threads_data[i].td.td_thr; - - if (i == this_thr->th.th_info.ds.ds_tid) { - continue; - } - // Since we haven't locked the thread's suspend mutex lock at this - // point, there is a small window where a thread might be putting - // itself to sleep, but hasn't set the th_sleep_loc field yet. - // To work around this, __kmp_execute_tasks_template() periodically checks - // see if other threads are sleeping (using the same random - // mechanism that is used for task stealing) and awakens them if - // they are. - if ( ( sleep_loc = TCR_PTR( thread -> th.th_sleep_loc) ) != NULL ) - { - KF_TRACE( 50, ( "__kmp_enable_tasking: T#%d waking up thread T#%d\n", - __kmp_gtid_from_thread( this_thr ), - __kmp_gtid_from_thread( thread ) ) ); - __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc); - } - else { - KF_TRACE( 50, ( "__kmp_enable_tasking: T#%d don't wake up thread T#%d\n", - __kmp_gtid_from_thread( this_thr ), - __kmp_gtid_from_thread( thread ) ) ); - } - } - } - - KA_TRACE( 10, ( "__kmp_enable_tasking(exit): T#%d\n", - __kmp_gtid_from_thread( this_thr ) ) ); -} - - -/* ------------------------------------------------------------------------ */ -/* // TODO: Check the comment consistency - * Utility routines for "task teams". A task team (kmp_task_t) is kind of - * like a shadow of the kmp_team_t data struct, with a different lifetime. - * After a child * thread checks into a barrier and calls __kmp_release() from - * the particular variant of __kmp__barrier_gather(), it can no - * longer assume that the kmp_team_t structure is intact (at any moment, the - * master thread may exit the barrier code and free the team data structure, - * and return the threads to the thread pool). - * - * This does not work with the the tasking code, as the thread is still - * expected to participate in the execution of any tasks that may have been - * spawned my a member of the team, and the thread still needs access to all - * to each thread in the team, so that it can steal work from it. - * - * Enter the existence of the kmp_task_team_t struct. It employs a reference - * counting mechanims, and is allocated by the master thread before calling - * __kmp__release, and then is release by the last thread to - * exit __kmp__release at the next barrier. I.e. the lifetimes - * of the kmp_task_team_t structs for consecutive barriers can overlap - * (and will, unless the master thread is the last thread to exit the barrier - * release phase, which is not typical). - * - * The existence of such a struct is useful outside the context of tasking, - * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro, - * so that any performance differences show up when comparing the 2.5 vs. 3.0 - * libraries. - * - * We currently use the existence of the threads array as an indicator that - * tasks were spawned since the last barrier. If the structure is to be - * useful outside the context of tasking, then this will have to change, but - * not settting the field minimizes the performance impact of tasking on - * barriers, when no explicit tasks were spawned (pushed, actually). - */ - - -static kmp_task_team_t *__kmp_free_task_teams = NULL; // Free list for task_team data structures -// Lock for task team data structures -static kmp_bootstrap_lock_t __kmp_task_team_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_task_team_lock ); - - -//------------------------------------------------------------------------------ -// __kmp_alloc_task_deque: -// Allocates a task deque for a particular thread, and initialize the necessary -// data structures relating to the deque. This only happens once per thread -// per task team since task teams are recycled. -// No lock is needed during allocation since each thread allocates its own -// deque. - -static void -__kmp_alloc_task_deque( kmp_info_t *thread, kmp_thread_data_t *thread_data ) -{ - __kmp_init_bootstrap_lock( & thread_data -> td.td_deque_lock ); - KMP_DEBUG_ASSERT( thread_data -> td.td_deque == NULL ); - - // Initialize last stolen task field to "none" - thread_data -> td.td_deque_last_stolen = -1; - - KMP_DEBUG_ASSERT( TCR_4(thread_data -> td.td_deque_ntasks) == 0 ); - KMP_DEBUG_ASSERT( thread_data -> td.td_deque_head == 0 ); - KMP_DEBUG_ASSERT( thread_data -> td.td_deque_tail == 0 ); - - KE_TRACE( 10, ( "__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n", - __kmp_gtid_from_thread( thread ), INITIAL_TASK_DEQUE_SIZE, thread_data ) ); - // Allocate space for task deque, and zero the deque - // Cannot use __kmp_thread_calloc() because threads not around for - // kmp_reap_task_team( ). - thread_data -> td.td_deque = (kmp_taskdata_t **) - __kmp_allocate( INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *)); - thread_data -> td.td_deque_size = INITIAL_TASK_DEQUE_SIZE; -} - -//------------------------------------------------------------------------------ -// __kmp_realloc_task_deque: -// Re-allocates a task deque for a particular thread, copies the content from the old deque -// and adjusts the necessary data structures relating to the deque. -// This operation must be done with a the deque_lock being held - -static void __kmp_realloc_task_deque ( kmp_info_t *thread, kmp_thread_data_t *thread_data ) -{ - kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td); - kmp_int32 new_size = 2 * size; - - KE_TRACE( 10, ( "__kmp_realloc_task_deque: T#%d reallocating deque[from %d to %d] for thread_data %p\n", - __kmp_gtid_from_thread( thread ), size, new_size, thread_data ) ); - - kmp_taskdata_t ** new_deque = (kmp_taskdata_t **) __kmp_allocate( new_size * sizeof(kmp_taskdata_t *)); - - int i,j; - for ( i = thread_data->td.td_deque_head, j = 0; j < size; i = (i+1) & TASK_DEQUE_MASK(thread_data->td), j++ ) - new_deque[j] = thread_data->td.td_deque[i]; - - __kmp_free(thread_data->td.td_deque); - - thread_data -> td.td_deque_head = 0; - thread_data -> td.td_deque_tail = size; - thread_data -> td.td_deque = new_deque; - thread_data -> td.td_deque_size = new_size; -} - -//------------------------------------------------------------------------------ -// __kmp_free_task_deque: -// Deallocates a task deque for a particular thread. -// Happens at library deallocation so don't need to reset all thread data fields. - -static void -__kmp_free_task_deque( kmp_thread_data_t *thread_data ) -{ - __kmp_acquire_bootstrap_lock( & thread_data -> td.td_deque_lock ); - - if ( thread_data -> td.td_deque != NULL ) { - TCW_4(thread_data -> td.td_deque_ntasks, 0); - __kmp_free( thread_data -> td.td_deque ); - thread_data -> td.td_deque = NULL; - } - __kmp_release_bootstrap_lock( & thread_data -> td.td_deque_lock ); - -#ifdef BUILD_TIED_TASK_STACK - // GEH: Figure out what to do here for td_susp_tied_tasks - if ( thread_data -> td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY ) { - __kmp_free_task_stack( __kmp_thread_from_gtid( gtid ), thread_data ); - } -#endif // BUILD_TIED_TASK_STACK -} - - -//------------------------------------------------------------------------------ -// __kmp_realloc_task_threads_data: -// Allocates a threads_data array for a task team, either by allocating an initial -// array or enlarging an existing array. Only the first thread to get the lock -// allocs or enlarges the array and re-initializes the array eleemnts. -// That thread returns "TRUE", the rest return "FALSE". -// Assumes that the new array size is given by task_team -> tt.tt_nproc. -// The current size is given by task_team -> tt.tt_max_threads. - -static int -__kmp_realloc_task_threads_data( kmp_info_t *thread, kmp_task_team_t *task_team ) -{ - kmp_thread_data_t ** threads_data_p; - kmp_int32 nthreads, maxthreads; - int is_init_thread = FALSE; - - if ( TCR_4(task_team -> tt.tt_found_tasks) ) { - // Already reallocated and initialized. - return FALSE; - } - - threads_data_p = & task_team -> tt.tt_threads_data; - nthreads = task_team -> tt.tt_nproc; - maxthreads = task_team -> tt.tt_max_threads; - - // All threads must lock when they encounter the first task of the implicit task - // region to make sure threads_data fields are (re)initialized before used. - __kmp_acquire_bootstrap_lock( & task_team -> tt.tt_threads_lock ); - - if ( ! TCR_4(task_team -> tt.tt_found_tasks) ) { - // first thread to enable tasking - kmp_team_t *team = thread -> th.th_team; - int i; - - is_init_thread = TRUE; - if ( maxthreads < nthreads ) { - - if ( *threads_data_p != NULL ) { - kmp_thread_data_t *old_data = *threads_data_p; - kmp_thread_data_t *new_data = NULL; - - KE_TRACE( 10, ( "__kmp_realloc_task_threads_data: T#%d reallocating " - "threads data for task_team %p, new_size = %d, old_size = %d\n", - __kmp_gtid_from_thread( thread ), task_team, - nthreads, maxthreads ) ); - // Reallocate threads_data to have more elements than current array - // Cannot use __kmp_thread_realloc() because threads not around for - // kmp_reap_task_team( ). Note all new array entries are initialized - // to zero by __kmp_allocate(). - new_data = (kmp_thread_data_t *) - __kmp_allocate( nthreads * sizeof(kmp_thread_data_t) ); - // copy old data to new data - KMP_MEMCPY_S( (void *) new_data, nthreads * sizeof(kmp_thread_data_t), - (void *) old_data, - maxthreads * sizeof(kmp_taskdata_t *) ); - -#ifdef BUILD_TIED_TASK_STACK - // GEH: Figure out if this is the right thing to do - for (i = maxthreads; i < nthreads; i++) { - kmp_thread_data_t *thread_data = & (*threads_data_p)[i]; - __kmp_init_task_stack( __kmp_gtid_from_thread( thread ), thread_data ); - } -#endif // BUILD_TIED_TASK_STACK - // Install the new data and free the old data - (*threads_data_p) = new_data; - __kmp_free( old_data ); - } - else { - KE_TRACE( 10, ( "__kmp_realloc_task_threads_data: T#%d allocating " - "threads data for task_team %p, size = %d\n", - __kmp_gtid_from_thread( thread ), task_team, nthreads ) ); - // Make the initial allocate for threads_data array, and zero entries - // Cannot use __kmp_thread_calloc() because threads not around for - // kmp_reap_task_team( ). - ANNOTATE_IGNORE_WRITES_BEGIN(); - *threads_data_p = (kmp_thread_data_t *) - __kmp_allocate( nthreads * sizeof(kmp_thread_data_t) ); - ANNOTATE_IGNORE_WRITES_END(); -#ifdef BUILD_TIED_TASK_STACK - // GEH: Figure out if this is the right thing to do - for (i = 0; i < nthreads; i++) { - kmp_thread_data_t *thread_data = & (*threads_data_p)[i]; - __kmp_init_task_stack( __kmp_gtid_from_thread( thread ), thread_data ); - } -#endif // BUILD_TIED_TASK_STACK - } - task_team -> tt.tt_max_threads = nthreads; - } - else { - // If array has (more than) enough elements, go ahead and use it - KMP_DEBUG_ASSERT( *threads_data_p != NULL ); - } - - // initialize threads_data pointers back to thread_info structures - for (i = 0; i < nthreads; i++) { - kmp_thread_data_t *thread_data = & (*threads_data_p)[i]; - thread_data -> td.td_thr = team -> t.t_threads[i]; - - if ( thread_data -> td.td_deque_last_stolen >= nthreads) { - // The last stolen field survives across teams / barrier, and the number - // of threads may have changed. It's possible (likely?) that a new - // parallel region will exhibit the same behavior as the previous region. - thread_data -> td.td_deque_last_stolen = -1; - } - } - - KMP_MB(); - TCW_SYNC_4(task_team -> tt.tt_found_tasks, TRUE); - } - - __kmp_release_bootstrap_lock( & task_team -> tt.tt_threads_lock ); - return is_init_thread; -} - - -//------------------------------------------------------------------------------ -// __kmp_free_task_threads_data: -// Deallocates a threads_data array for a task team, including any attached -// tasking deques. Only occurs at library shutdown. - -static void -__kmp_free_task_threads_data( kmp_task_team_t *task_team ) -{ - __kmp_acquire_bootstrap_lock( & task_team -> tt.tt_threads_lock ); - if ( task_team -> tt.tt_threads_data != NULL ) { - int i; - for (i = 0; i < task_team->tt.tt_max_threads; i++ ) { - __kmp_free_task_deque( & task_team -> tt.tt_threads_data[i] ); - } - __kmp_free( task_team -> tt.tt_threads_data ); - task_team -> tt.tt_threads_data = NULL; - } - __kmp_release_bootstrap_lock( & task_team -> tt.tt_threads_lock ); -} - - -//------------------------------------------------------------------------------ -// __kmp_allocate_task_team: -// Allocates a task team associated with a specific team, taking it from -// the global task team free list if possible. Also initializes data structures. - -static kmp_task_team_t * -__kmp_allocate_task_team( kmp_info_t *thread, kmp_team_t *team ) -{ - kmp_task_team_t *task_team = NULL; - int nthreads; - - KA_TRACE( 20, ( "__kmp_allocate_task_team: T#%d entering; team = %p\n", - (thread ? __kmp_gtid_from_thread( thread ) : -1), team ) ); - - if (TCR_PTR(__kmp_free_task_teams) != NULL) { - // Take a task team from the task team pool - __kmp_acquire_bootstrap_lock( &__kmp_task_team_lock ); - if (__kmp_free_task_teams != NULL) { - task_team = __kmp_free_task_teams; - TCW_PTR(__kmp_free_task_teams, task_team -> tt.tt_next); - task_team -> tt.tt_next = NULL; - } - __kmp_release_bootstrap_lock( &__kmp_task_team_lock ); - } - - if (task_team == NULL) { - KE_TRACE( 10, ( "__kmp_allocate_task_team: T#%d allocating " - "task team for team %p\n", - __kmp_gtid_from_thread( thread ), team ) ); - // Allocate a new task team if one is not available. - // Cannot use __kmp_thread_malloc() because threads not around for - // kmp_reap_task_team( ). - task_team = (kmp_task_team_t *) __kmp_allocate( sizeof(kmp_task_team_t) ); - __kmp_init_bootstrap_lock( & task_team -> tt.tt_threads_lock ); - //task_team -> tt.tt_threads_data = NULL; // AC: __kmp_allocate zeroes returned memory - //task_team -> tt.tt_max_threads = 0; - //task_team -> tt.tt_next = NULL; - } - - TCW_4(task_team -> tt.tt_found_tasks, FALSE); -#if OMP_45_ENABLED - TCW_4(task_team -> tt.tt_found_proxy_tasks, FALSE); -#endif - task_team -> tt.tt_nproc = nthreads = team->t.t_nproc; - - TCW_4( task_team -> tt.tt_unfinished_threads, nthreads ); - TCW_4( task_team -> tt.tt_active, TRUE ); - - KA_TRACE( 20, ( "__kmp_allocate_task_team: T#%d exiting; task_team = %p unfinished_threads init'd to %d\n", - (thread ? __kmp_gtid_from_thread( thread ) : -1), task_team, task_team -> tt.tt_unfinished_threads) ); - return task_team; -} - - -//------------------------------------------------------------------------------ -// __kmp_free_task_team: -// Frees the task team associated with a specific thread, and adds it -// to the global task team free list. - -void -__kmp_free_task_team( kmp_info_t *thread, kmp_task_team_t *task_team ) -{ - KA_TRACE( 20, ( "__kmp_free_task_team: T#%d task_team = %p\n", - thread ? __kmp_gtid_from_thread( thread ) : -1, task_team ) ); - - // Put task team back on free list - __kmp_acquire_bootstrap_lock( & __kmp_task_team_lock ); - - KMP_DEBUG_ASSERT( task_team -> tt.tt_next == NULL ); - task_team -> tt.tt_next = __kmp_free_task_teams; - TCW_PTR(__kmp_free_task_teams, task_team); - - __kmp_release_bootstrap_lock( & __kmp_task_team_lock ); -} - - -//------------------------------------------------------------------------------ -// __kmp_reap_task_teams: -// Free all the task teams on the task team free list. -// Should only be done during library shutdown. -// Cannot do anything that needs a thread structure or gtid since they are already gone. - -void -__kmp_reap_task_teams( void ) -{ - kmp_task_team_t *task_team; - - if ( TCR_PTR(__kmp_free_task_teams) != NULL ) { - // Free all task_teams on the free list - __kmp_acquire_bootstrap_lock( &__kmp_task_team_lock ); - while ( ( task_team = __kmp_free_task_teams ) != NULL ) { - __kmp_free_task_teams = task_team -> tt.tt_next; - task_team -> tt.tt_next = NULL; - - // Free threads_data if necessary - if ( task_team -> tt.tt_threads_data != NULL ) { - __kmp_free_task_threads_data( task_team ); - } - __kmp_free( task_team ); - } - __kmp_release_bootstrap_lock( &__kmp_task_team_lock ); - } -} - -//------------------------------------------------------------------------------ -// __kmp_wait_to_unref_task_teams: -// Some threads could still be in the fork barrier release code, possibly -// trying to steal tasks. Wait for each thread to unreference its task team. -// -void -__kmp_wait_to_unref_task_teams(void) -{ - kmp_info_t *thread; - kmp_uint32 spins; - int done; - - KMP_INIT_YIELD( spins ); - - for (;;) { - done = TRUE; - - // TODO: GEH - this may be is wrong because some sync would be necessary - // in case threads are added to the pool during the traversal. - // Need to verify that lock for thread pool is held when calling - // this routine. - for (thread = (kmp_info_t *)__kmp_thread_pool; - thread != NULL; - thread = thread->th.th_next_pool) - { -#if KMP_OS_WINDOWS - DWORD exit_val; -#endif - if ( TCR_PTR(thread->th.th_task_team) == NULL ) { - KA_TRACE( 10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n", - __kmp_gtid_from_thread( thread ) ) ); - continue; - } -#if KMP_OS_WINDOWS - // TODO: GEH - add this check for Linux* OS / OS X* as well? - if (!__kmp_is_thread_alive(thread, &exit_val)) { - thread->th.th_task_team = NULL; - continue; - } -#endif - - done = FALSE; // Because th_task_team pointer is not NULL for this thread - - KA_TRACE( 10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to unreference task_team\n", - __kmp_gtid_from_thread( thread ) ) ); - - if ( __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME ) { - volatile void *sleep_loc; - // If the thread is sleeping, awaken it. - if ( ( sleep_loc = TCR_PTR( thread->th.th_sleep_loc) ) != NULL ) { - KA_TRACE( 10, ( "__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n", - __kmp_gtid_from_thread( thread ), __kmp_gtid_from_thread( thread ) ) ); - __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc); - } - } - } - if (done) { - break; - } - - // If we are oversubscribed, - // or have waited a bit (and library mode is throughput), yield. - // Pause is in the following code. - KMP_YIELD( TCR_4(__kmp_nth) > __kmp_avail_proc ); - KMP_YIELD_SPIN( spins ); // Yields only if KMP_LIBRARY=throughput - } -} - - -//------------------------------------------------------------------------------ -// __kmp_task_team_setup: Create a task_team for the current team, but use -// an already created, unused one if it already exists. -void -__kmp_task_team_setup( kmp_info_t *this_thr, kmp_team_t *team, int always ) -{ - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - - // If this task_team hasn't been created yet, allocate it. It will be used in the region after the next. - // If it exists, it is the current task team and shouldn't be touched yet as it may still be in use. - if (team->t.t_task_team[this_thr->th.th_task_state] == NULL && (always || team->t.t_nproc > 1) ) { - team->t.t_task_team[this_thr->th.th_task_state] = __kmp_allocate_task_team( this_thr, team ); - KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p for team %d at parity=%d\n", - __kmp_gtid_from_thread(this_thr), team->t.t_task_team[this_thr->th.th_task_state], - ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state)); - } - - // After threads exit the release, they will call sync, and then point to this other task_team; make sure it is - // allocated and properly initialized. As threads spin in the barrier release phase, they will continue to use the - // previous task_team struct(above), until they receive the signal to stop checking for tasks (they can't safely - // reference the kmp_team_t struct, which could be reallocated by the master thread). No task teams are formed for - // serialized teams. - if (team->t.t_nproc > 1) { - int other_team = 1 - this_thr->th.th_task_state; - if (team->t.t_task_team[other_team] == NULL) { // setup other team as well - team->t.t_task_team[other_team] = __kmp_allocate_task_team( this_thr, team ); - KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new task_team %p for team %d at parity=%d\n", - __kmp_gtid_from_thread( this_thr ), team->t.t_task_team[other_team], - ((team != NULL) ? team->t.t_id : -1), other_team )); - } - else { // Leave the old task team struct in place for the upcoming region; adjust as needed - kmp_task_team_t *task_team = team->t.t_task_team[other_team]; - if (!task_team->tt.tt_active || team->t.t_nproc != task_team->tt.tt_nproc) { - TCW_4(task_team->tt.tt_nproc, team->t.t_nproc); - TCW_4(task_team->tt.tt_found_tasks, FALSE); -#if OMP_45_ENABLED - TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE); -#endif - TCW_4(task_team->tt.tt_unfinished_threads, team->t.t_nproc ); - TCW_4(task_team->tt.tt_active, TRUE ); - } - // if team size has changed, the first thread to enable tasking will realloc threads_data if necessary - KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team %p for team %d at parity=%d\n", - __kmp_gtid_from_thread( this_thr ), team->t.t_task_team[other_team], - ((team != NULL) ? team->t.t_id : -1), other_team )); - } - } -} - - -//------------------------------------------------------------------------------ -// __kmp_task_team_sync: Propagation of task team data from team to threads -// which happens just after the release phase of a team barrier. This may be -// called by any thread, but only for teams with # threads > 1. - -void -__kmp_task_team_sync( kmp_info_t *this_thr, kmp_team_t *team ) -{ - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - - // Toggle the th_task_state field, to switch which task_team this thread refers to - this_thr->th.th_task_state = 1 - this_thr->th.th_task_state; - // It is now safe to propagate the task team pointer from the team struct to the current thread. - TCW_PTR(this_thr->th.th_task_team, team->t.t_task_team[this_thr->th.th_task_state]); - KA_TRACE(20, ("__kmp_task_team_sync: Thread T#%d task team switched to task_team %p from Team #%d (parity=%d)\n", - __kmp_gtid_from_thread( this_thr ), this_thr->th.th_task_team, - ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state)); -} - - -//-------------------------------------------------------------------------------------------- -// __kmp_task_team_wait: Master thread waits for outstanding tasks after the barrier gather -// phase. Only called by master thread if #threads in team > 1 or if proxy tasks were created. -// wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off by passing in 0 -// optionally as the last argument. When wait is zero, master thread does not wait for -// unfinished_threads to reach 0. -void -__kmp_task_team_wait( kmp_info_t *this_thr, kmp_team_t *team - USE_ITT_BUILD_ARG(void * itt_sync_obj) - , int wait) -{ - kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state]; - - KMP_DEBUG_ASSERT( __kmp_tasking_mode != tskm_immediate_exec ); - KMP_DEBUG_ASSERT( task_team == this_thr->th.th_task_team ); - - if ( ( task_team != NULL ) && KMP_TASKING_ENABLED(task_team) ) { - if (wait) { - KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks (for unfinished_threads to reach 0) on task_team = %p\n", - __kmp_gtid_from_thread(this_thr), task_team)); - // Worker threads may have dropped through to release phase, but could still be executing tasks. Wait - // here for tasks to complete. To avoid memory contention, only master thread checks termination condition. - kmp_flag_32 flag(&task_team->tt.tt_unfinished_threads, 0U); - flag.wait(this_thr, TRUE - USE_ITT_BUILD_ARG(itt_sync_obj)); - } - // Deactivate the old task team, so that the worker threads will stop referencing it while spinning. - KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: " - "setting active to false, setting local and team's pointer to NULL\n", - __kmp_gtid_from_thread(this_thr), task_team)); -#if OMP_45_ENABLED - KMP_DEBUG_ASSERT( task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks == TRUE ); - TCW_SYNC_4( task_team->tt.tt_found_proxy_tasks, FALSE ); -#else - KMP_DEBUG_ASSERT( task_team->tt.tt_nproc > 1 ); -#endif - TCW_SYNC_4( task_team->tt.tt_active, FALSE ); - KMP_MB(); - - TCW_PTR(this_thr->th.th_task_team, NULL); - } -} - - -//------------------------------------------------------------------------------ -// __kmp_tasking_barrier: -// This routine may only called when __kmp_tasking_mode == tskm_extra_barrier. -// Internal function to execute all tasks prior to a regular barrier or a -// join barrier. It is a full barrier itself, which unfortunately turns -// regular barriers into double barriers and join barriers into 1 1/2 -// barriers. -void -__kmp_tasking_barrier( kmp_team_t *team, kmp_info_t *thread, int gtid ) -{ - volatile kmp_uint32 *spin = &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads; - int flag = FALSE; - KMP_DEBUG_ASSERT( __kmp_tasking_mode == tskm_extra_barrier ); - -#if USE_ITT_BUILD - KMP_FSYNC_SPIN_INIT( spin, (kmp_uint32*) NULL ); -#endif /* USE_ITT_BUILD */ - kmp_flag_32 spin_flag(spin, 0U); - while (! spin_flag.execute_tasks(thread, gtid, TRUE, &flag - USE_ITT_BUILD_ARG(NULL), 0 ) ) { -#if USE_ITT_BUILD - // TODO: What about itt_sync_obj?? - KMP_FSYNC_SPIN_PREPARE( spin ); -#endif /* USE_ITT_BUILD */ - - if( TCR_4(__kmp_global.g.g_done) ) { - if( __kmp_global.g.g_abort ) - __kmp_abort_thread( ); - break; - } - KMP_YIELD( TRUE ); // GH: We always yield here - } -#if USE_ITT_BUILD - KMP_FSYNC_SPIN_ACQUIRED( (void*) spin ); -#endif /* USE_ITT_BUILD */ -} - - -#if OMP_45_ENABLED - -/* __kmp_give_task puts a task into a given thread queue if: - - the queue for that thread was created - - there's space in that queue - - Because of this, __kmp_push_task needs to check if there's space after getting the lock - */ -static bool __kmp_give_task ( kmp_info_t *thread, kmp_int32 tid, kmp_task_t * task, kmp_int32 pass ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - kmp_task_team_t * task_team = taskdata->td_task_team; - - KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n", taskdata, tid ) ); - - // If task_team is NULL something went really bad... - KMP_DEBUG_ASSERT( task_team != NULL ); - - bool result = false; - kmp_thread_data_t * thread_data = & task_team -> tt.tt_threads_data[ tid ]; - - if (thread_data -> td.td_deque == NULL ) { - // There's no queue in this thread, go find another one - // We're guaranteed that at least one thread has a queue - KA_TRACE(30, ("__kmp_give_task: thread %d has no queue while giving task %p.\n", tid, taskdata ) ); - return result; - } - - if ( TCR_4(thread_data -> td.td_deque_ntasks) >= TASK_DEQUE_SIZE(thread_data->td) ) - { - KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to thread %d.\n", taskdata, tid ) ); - - // if this deque is bigger than the pass ratio give a chance to another thread - if ( TASK_DEQUE_SIZE(thread_data->td)/INITIAL_TASK_DEQUE_SIZE >= pass ) return result; - - __kmp_acquire_bootstrap_lock( & thread_data-> td.td_deque_lock ); - __kmp_realloc_task_deque(thread,thread_data); - - } else { - - __kmp_acquire_bootstrap_lock( & thread_data-> td.td_deque_lock ); - - if ( TCR_4(thread_data -> td.td_deque_ntasks) >= TASK_DEQUE_SIZE(thread_data->td) ) - { - KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to thread %d.\n", taskdata, tid ) ); - - // if this deque is bigger than the pass ratio give a chance to another thread - if ( TASK_DEQUE_SIZE(thread_data->td)/INITIAL_TASK_DEQUE_SIZE >= pass ) - goto release_and_exit; - - __kmp_realloc_task_deque(thread,thread_data); - } - } - - // lock is held here, and there is space in the deque - - thread_data -> td.td_deque[ thread_data -> td.td_deque_tail ] = taskdata; - // Wrap index. - thread_data -> td.td_deque_tail = ( thread_data -> td.td_deque_tail + 1 ) & TASK_DEQUE_MASK(thread_data->td); - TCW_4(thread_data -> td.td_deque_ntasks, TCR_4(thread_data -> td.td_deque_ntasks) + 1); - - result = true; - KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n", taskdata, tid ) ); - -release_and_exit: - __kmp_release_bootstrap_lock( & thread_data-> td.td_deque_lock ); - - return result; -} - - -/* The finish of the a proxy tasks is divided in two pieces: - - the top half is the one that can be done from a thread outside the team - - the bottom half must be run from a them within the team - - In order to run the bottom half the task gets queued back into one of the threads of the team. - Once the td_incomplete_child_task counter of the parent is decremented the threads can leave the barriers. - So, the bottom half needs to be queued before the counter is decremented. The top half is therefore divided in two parts: - - things that can be run before queuing the bottom half - - things that must be run after queuing the bottom half - - This creates a second race as the bottom half can free the task before the second top half is executed. To avoid this - we use the td_incomplete_child_task of the proxy task to synchronize the top and bottom half. -*/ - -static void __kmp_first_top_half_finish_proxy( kmp_taskdata_t * taskdata ) -{ - KMP_DEBUG_ASSERT( taskdata -> td_flags.tasktype == TASK_EXPLICIT ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.proxy == TASK_PROXY ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.complete == 0 ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.freed == 0 ); - - taskdata -> td_flags.complete = 1; // mark the task as completed - - if ( taskdata->td_taskgroup ) - KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata->td_taskgroup->count) ); - - // Create an imaginary children for this task so the bottom half cannot release the task before we have completed the second top half - TCI_4(taskdata->td_incomplete_child_tasks); -} - -static void __kmp_second_top_half_finish_proxy( kmp_taskdata_t * taskdata ) -{ - kmp_int32 children = 0; - - // Predecrement simulated by "- 1" calculation - children = KMP_TEST_THEN_DEC32( (kmp_int32 *)(& taskdata -> td_parent -> td_incomplete_child_tasks) ) - 1; - KMP_DEBUG_ASSERT( children >= 0 ); - - // Remove the imaginary children - TCD_4(taskdata->td_incomplete_child_tasks); -} - -static void __kmp_bottom_half_finish_proxy( kmp_int32 gtid, kmp_task_t * ptask ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(ptask); - kmp_info_t * thread = __kmp_threads[ gtid ]; - - KMP_DEBUG_ASSERT( taskdata -> td_flags.proxy == TASK_PROXY ); - KMP_DEBUG_ASSERT( taskdata -> td_flags.complete == 1 ); // top half must run before bottom half - - // We need to wait to make sure the top half is finished - // Spinning here should be ok as this should happen quickly - while ( TCR_4(taskdata->td_incomplete_child_tasks) > 0 ) ; - - __kmp_release_deps(gtid,taskdata); - __kmp_free_task_and_ancestors(gtid, taskdata, thread); -} - -/*! -@ingroup TASKING -@param gtid Global Thread ID of encountering thread -@param ptask Task which execution is completed - -Execute the completation of a proxy task from a thread of that is part of the team. Run first and bottom halves directly. -*/ -void __kmpc_proxy_task_completed( kmp_int32 gtid, kmp_task_t *ptask ) -{ - KMP_DEBUG_ASSERT( ptask != NULL ); - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(ptask); - KA_TRACE(10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n", gtid, taskdata ) ); - - KMP_DEBUG_ASSERT( taskdata->td_flags.proxy == TASK_PROXY ); - - __kmp_first_top_half_finish_proxy(taskdata); - __kmp_second_top_half_finish_proxy(taskdata); - __kmp_bottom_half_finish_proxy(gtid,ptask); - - KA_TRACE(10, ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n", gtid, taskdata ) ); -} - -/*! -@ingroup TASKING -@param ptask Task which execution is completed - -Execute the completation of a proxy task from a thread that could not belong to the team. -*/ -void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ) -{ - KMP_DEBUG_ASSERT( ptask != NULL ); - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(ptask); - - KA_TRACE(10, ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n", taskdata ) ); - - KMP_DEBUG_ASSERT( taskdata->td_flags.proxy == TASK_PROXY ); - - __kmp_first_top_half_finish_proxy(taskdata); - - // Enqueue task to complete bottom half completion from a thread within the corresponding team - kmp_team_t * team = taskdata->td_team; - kmp_int32 nthreads = team->t.t_nproc; - kmp_info_t *thread; - - //This should be similar to start_k = __kmp_get_random( thread ) % nthreads but we cannot use __kmp_get_random here - kmp_int32 start_k = 0; - kmp_int32 pass = 1; - kmp_int32 k = start_k; - - do { - //For now we're just linearly trying to find a thread - thread = team->t.t_threads[k]; - k = (k+1) % nthreads; - - // we did a full pass through all the threads - if ( k == start_k ) pass = pass << 1; - - } while ( !__kmp_give_task( thread, k, ptask, pass ) ); - - __kmp_second_top_half_finish_proxy(taskdata); - - KA_TRACE(10, ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n", taskdata ) ); -} - -//--------------------------------------------------------------------------------- -// __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task for taskloop -// -// thread: allocating thread -// task_src: pointer to source task to be duplicated -// returns: a pointer to the allocated kmp_task_t structure (task). -kmp_task_t * -__kmp_task_dup_alloc( kmp_info_t *thread, kmp_task_t *task_src ) -{ - kmp_task_t *task; - kmp_taskdata_t *taskdata; - kmp_taskdata_t *taskdata_src; - kmp_taskdata_t *parent_task = thread->th.th_current_task; - size_t shareds_offset; - size_t task_size; - - KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread, task_src) ); - taskdata_src = KMP_TASK_TO_TASKDATA( task_src ); - KMP_DEBUG_ASSERT( taskdata_src->td_flags.proxy == TASK_FULL ); // it should not be proxy task - KMP_DEBUG_ASSERT( taskdata_src->td_flags.tasktype == TASK_EXPLICIT ); - task_size = taskdata_src->td_size_alloc; - - // Allocate a kmp_taskdata_t block and a kmp_task_t block. - KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread, task_size) ); - #if USE_FAST_MEMORY - taskdata = (kmp_taskdata_t *)__kmp_fast_allocate( thread, task_size ); - #else - taskdata = (kmp_taskdata_t *)__kmp_thread_malloc( thread, task_size ); - #endif /* USE_FAST_MEMORY */ - KMP_MEMCPY(taskdata, taskdata_src, task_size); - - task = KMP_TASKDATA_TO_TASK(taskdata); - - // Initialize new task (only specific fields not affected by memcpy) - taskdata->td_task_id = KMP_GEN_TASK_ID(); - if( task->shareds != NULL ) { // need setup shareds pointer - shareds_offset = (char*)task_src->shareds - (char*)taskdata_src; - task->shareds = &((char*)taskdata)[shareds_offset]; - KMP_DEBUG_ASSERT( (((kmp_uintptr_t)task->shareds) & (sizeof(void*)-1)) == 0 ); - } - taskdata->td_alloc_thread = thread; - taskdata->td_taskgroup = parent_task->td_taskgroup; // task inherits the taskgroup from the parent task - - // Only need to keep track of child task counts if team parallel and tasking not serialized - if ( !( taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser ) ) { - KMP_TEST_THEN_INC32( (kmp_int32 *)(& parent_task->td_incomplete_child_tasks) ); - if ( parent_task->td_taskgroup ) - KMP_TEST_THEN_INC32( (kmp_int32 *)(& parent_task->td_taskgroup->count) ); - // Only need to keep track of allocated child tasks for explicit tasks since implicit not deallocated - if ( taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT ) - KMP_TEST_THEN_INC32( (kmp_int32 *)(& taskdata->td_parent->td_allocated_child_tasks) ); - } - - KA_TRACE(20, ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n", - thread, taskdata, taskdata->td_parent) ); -#if OMPT_SUPPORT - __kmp_task_init_ompt(taskdata, thread->th.th_info.ds.ds_gtid, (void*)task->routine); -#endif - return task; -} - -// Routine optionally generated by th ecompiler for setting the lastprivate flag -// and calling needed constructors for private/firstprivate objects -// (used to form taskloop tasks from pattern task) -typedef void(*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32); - -//--------------------------------------------------------------------------------- -// __kmp_taskloop_linear: Start tasks of the taskloop linearly -// -// loc Source location information -// gtid Global thread ID -// task Task with whole loop iteration range -// lb Pointer to loop lower bound -// ub Pointer to loop upper bound -// st Loop stride -// sched Schedule specified 0/1/2 for none/grainsize/num_tasks -// grainsize Schedule value if specified -// task_dup Tasks duplication routine -void -__kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task, - kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, - int sched, kmp_uint64 grainsize, void *task_dup ) -{ - KMP_COUNT_BLOCK(OMP_TASKLOOP); - KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling); - p_task_dup_t ptask_dup = (p_task_dup_t)task_dup; - kmp_uint64 tc; - kmp_uint64 lower = *lb; // compiler provides global bounds here - kmp_uint64 upper = *ub; - kmp_uint64 i, num_tasks = 0, extras = 0; - kmp_info_t *thread = __kmp_threads[gtid]; - kmp_taskdata_t *current_task = thread->th.th_current_task; - kmp_task_t *next_task; - kmp_int32 lastpriv = 0; - size_t lower_offset = (char*)lb - (char*)task; // remember offset of lb in the task structure - size_t upper_offset = (char*)ub - (char*)task; // remember offset of ub in the task structure - - // compute trip count - if ( st == 1 ) { // most common case - tc = upper - lower + 1; - } else if ( st < 0 ) { - tc = (lower - upper) / (-st) + 1; - } else { // st > 0 - tc = (upper - lower) / st + 1; - } - if(tc == 0) { - KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid)); - // free the pattern task and exit - __kmp_task_start( gtid, task, current_task ); - // do not execute anything for zero-trip loop - __kmp_task_finish( gtid, task, current_task ); - return; - } - - // compute num_tasks/grainsize based on the input provided - switch( sched ) { - case 0: // no schedule clause specified, we can choose the default - // let's try to schedule (team_size*10) tasks - grainsize = thread->th.th_team_nproc * 10; - case 2: // num_tasks provided - if( grainsize > tc ) { - num_tasks = tc; // too big num_tasks requested, adjust values - grainsize = 1; - extras = 0; - } else { - num_tasks = grainsize; - grainsize = tc / num_tasks; - extras = tc % num_tasks; - } - break; - case 1: // grainsize provided - if( grainsize > tc ) { - num_tasks = 1; // too big grainsize requested, adjust values - grainsize = tc; - extras = 0; - } else { - num_tasks = tc / grainsize; - grainsize = tc / num_tasks; // adjust grainsize for balanced distribution of iterations - extras = tc % num_tasks; - } - break; - default: - KMP_ASSERT2(0, "unknown scheduling of taskloop"); - } - KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras); - KMP_DEBUG_ASSERT(num_tasks > extras); - KMP_DEBUG_ASSERT(num_tasks > 0); - KA_TRACE(20, ("__kmpc_taskloop: T#%d will launch: num_tasks %lld, grainsize %lld, extras %lld\n", - gtid, num_tasks, grainsize, extras)); - - // Main loop, launch num_tasks tasks, assign grainsize iterations each task - for( i = 0; i < num_tasks; ++i ) { - kmp_uint64 chunk_minus_1; - if( extras == 0 ) { - chunk_minus_1 = grainsize - 1; - } else { - chunk_minus_1 = grainsize; - --extras; // first extras iterations get bigger chunk (grainsize+1) - } - upper = lower + st * chunk_minus_1; - if( i == num_tasks - 1 ) { - // schedule the last task, set lastprivate flag - lastpriv = 1; -#if KMP_DEBUG - if( st == 1 ) - KMP_DEBUG_ASSERT(upper == *ub); - else if( st > 0 ) - KMP_DEBUG_ASSERT(upper+st > *ub); - else - KMP_DEBUG_ASSERT(upper+st < *ub); -#endif - } - next_task = __kmp_task_dup_alloc(thread, task); // allocate new task - *(kmp_uint64*)((char*)next_task + lower_offset) = lower; // adjust task-specific bounds - *(kmp_uint64*)((char*)next_task + upper_offset) = upper; - if( ptask_dup != NULL ) - ptask_dup(next_task, task, lastpriv); // set lastprivate flag, construct fistprivates, etc. - KA_TRACE(20, ("__kmpc_taskloop: T#%d schedule task %p: lower %lld, upper %lld (offsets %p %p)\n", - gtid, next_task, lower, upper, lower_offset, upper_offset)); - __kmp_omp_task(gtid, next_task, true); // schedule new task - lower = upper + st; // adjust lower bound for the next iteration - } - // free the pattern task and exit - __kmp_task_start( gtid, task, current_task ); - // do not execute the pattern task, just do bookkeeping - __kmp_task_finish( gtid, task, current_task ); -} - -/*! -@ingroup TASKING -@param loc Source location information -@param gtid Global thread ID -@param task Task structure -@param if_val Value of the if clause -@param lb Pointer to loop lower bound -@param ub Pointer to loop upper bound -@param st Loop stride -@param nogroup Flag, 1 if nogroup clause specified, 0 otherwise -@param sched Schedule specified 0/1/2 for none/grainsize/num_tasks -@param grainsize Schedule value if specified -@param task_dup Tasks duplication routine - -Execute the taskloop construct. -*/ -void -__kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, - kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, - int nogroup, int sched, kmp_uint64 grainsize, void *task_dup ) -{ - kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task); - KMP_DEBUG_ASSERT( task != NULL ); - - KA_TRACE(10, ("__kmpc_taskloop(enter): T#%d, pattern task %p, lb %lld ub %lld st %lld, grain %llu(%d)\n", - gtid, taskdata, *lb, *ub, st, grainsize, sched)); - - // check if clause value first - if( if_val == 0 ) { // if(0) specified, mark task as serial - taskdata->td_flags.task_serial = 1; - taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied - } - if( nogroup == 0 ) { - __kmpc_taskgroup( loc, gtid ); - } - - if( 1 /* AC: use some heuristic here to choose task scheduling method */ ) { - __kmp_taskloop_linear( loc, gtid, task, lb, ub, st, sched, grainsize, task_dup ); - } - - if( nogroup == 0 ) { - __kmpc_end_taskgroup( loc, gtid ); - } - KA_TRACE(10, ("__kmpc_taskloop(exit): T#%d\n", gtid)); -} - -#endif Index: runtime/src/kmp_tasking.cpp =================================================================== --- runtime/src/kmp_tasking.cpp +++ runtime/src/kmp_tasking.cpp @@ -1,5 +1,5 @@ /* - * kmp_tasking.c -- OpenMP 3.0 tasking support. + * kmp_tasking.cpp -- OpenMP 3.0 tasking support. */ Index: runtime/src/kmp_taskq.c =================================================================== --- runtime/src/kmp_taskq.c +++ runtime/src/kmp_taskq.c @@ -1,2032 +0,0 @@ -/* - * kmp_taskq.c -- TASKQ support for OpenMP. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_i18n.h" -#include "kmp_io.h" -#include "kmp_error.h" - -#define MAX_MESSAGE 512 - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* - * Taskq routines and global variables - */ - -#define KMP_DEBUG_REF_CTS(x) KF_TRACE(1, x); - -#define THREAD_ALLOC_FOR_TASKQ - -static int -in_parallel_context( kmp_team_t *team ) -{ - return ! team -> t.t_serialized; -} - -static void -__kmp_taskq_eo( int *gtid_ref, int *cid_ref, ident_t *loc_ref ) -{ - int gtid = *gtid_ref; - int tid = __kmp_tid_from_gtid( gtid ); - kmp_uint32 my_token; - kmpc_task_queue_t *taskq; - kmp_taskq_t *tq = & __kmp_threads[gtid] -> th.th_team -> t.t_taskq; - - if ( __kmp_env_consistency_check ) -#if KMP_USE_DYNAMIC_LOCK - __kmp_push_sync( gtid, ct_ordered_in_taskq, loc_ref, NULL, 0 ); -#else - __kmp_push_sync( gtid, ct_ordered_in_taskq, loc_ref, NULL ); -#endif - - if ( ! __kmp_threads[ gtid ]-> th.th_team -> t.t_serialized ) { - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* GEH - need check here under stats to make sure */ - /* inside task (curr_thunk[*tid_ref] != NULL) */ - - my_token =tq->tq_curr_thunk[ tid ]-> th_tasknum; - - taskq = tq->tq_curr_thunk[ tid ]-> th.th_shareds -> sv_queue; - - KMP_WAIT_YIELD(&taskq->tq_tasknum_serving, my_token, KMP_EQ, NULL); - KMP_MB(); - } -} - -static void -__kmp_taskq_xo( int *gtid_ref, int *cid_ref, ident_t *loc_ref ) -{ - int gtid = *gtid_ref; - int tid = __kmp_tid_from_gtid( gtid ); - kmp_uint32 my_token; - kmp_taskq_t *tq = & __kmp_threads[gtid] -> th.th_team -> t.t_taskq; - - if ( __kmp_env_consistency_check ) - __kmp_pop_sync( gtid, ct_ordered_in_taskq, loc_ref ); - - if ( ! __kmp_threads[ gtid ]-> th.th_team -> t.t_serialized ) { - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* GEH - need check here under stats to make sure */ - /* inside task (curr_thunk[tid] != NULL) */ - - my_token = tq->tq_curr_thunk[ tid ]->th_tasknum; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - tq->tq_curr_thunk[ tid ]-> th.th_shareds -> sv_queue -> tq_tasknum_serving = my_token + 1; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - } -} - -static void -__kmp_taskq_check_ordered( kmp_int32 gtid, kmpc_thunk_t *thunk ) -{ - kmp_uint32 my_token; - kmpc_task_queue_t *taskq; - - /* assume we are always called from an active parallel context */ - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - my_token = thunk -> th_tasknum; - - taskq = thunk -> th.th_shareds -> sv_queue; - - if(taskq->tq_tasknum_serving <= my_token) { - KMP_WAIT_YIELD(&taskq->tq_tasknum_serving, my_token, KMP_GE, NULL); - KMP_MB(); - taskq->tq_tasknum_serving = my_token +1; - KMP_MB(); - } -} - -#ifdef KMP_DEBUG - -static void -__kmp_dump_TQF(kmp_int32 flags) -{ - if (flags & TQF_IS_ORDERED) - __kmp_printf("ORDERED "); - if (flags & TQF_IS_LASTPRIVATE) - __kmp_printf("LAST_PRIV "); - if (flags & TQF_IS_NOWAIT) - __kmp_printf("NOWAIT "); - if (flags & TQF_HEURISTICS) - __kmp_printf("HEURIST "); - if (flags & TQF_INTERFACE_RESERVED1) - __kmp_printf("RESERV1 "); - if (flags & TQF_INTERFACE_RESERVED2) - __kmp_printf("RESERV2 "); - if (flags & TQF_INTERFACE_RESERVED3) - __kmp_printf("RESERV3 "); - if (flags & TQF_INTERFACE_RESERVED4) - __kmp_printf("RESERV4 "); - if (flags & TQF_IS_LAST_TASK) - __kmp_printf("LAST_TASK "); - if (flags & TQF_TASKQ_TASK) - __kmp_printf("TASKQ_TASK "); - if (flags & TQF_RELEASE_WORKERS) - __kmp_printf("RELEASE "); - if (flags & TQF_ALL_TASKS_QUEUED) - __kmp_printf("ALL_QUEUED "); - if (flags & TQF_PARALLEL_CONTEXT) - __kmp_printf("PARALLEL "); - if (flags & TQF_DEALLOCATED) - __kmp_printf("DEALLOC "); - if (!(flags & (TQF_INTERNAL_FLAGS|TQF_INTERFACE_FLAGS))) - __kmp_printf("(NONE)"); -} - -static void -__kmp_dump_thunk( kmp_taskq_t *tq, kmpc_thunk_t *thunk, kmp_int32 global_tid ) -{ - int i; - int nproc = __kmp_threads[global_tid] -> th.th_team -> t.t_nproc; - - __kmp_printf("\tThunk at %p on (%d): ", thunk, global_tid); - - if (thunk != NULL) { - for (i = 0; i < nproc; i++) { - if( tq->tq_curr_thunk[i] == thunk ) { - __kmp_printf("[%i] ", i); - } - } - __kmp_printf("th_shareds=%p, ", thunk->th.th_shareds); - __kmp_printf("th_task=%p, ", thunk->th_task); - __kmp_printf("th_encl_thunk=%p, ", thunk->th_encl_thunk); - __kmp_printf("th_status=%d, ", thunk->th_status); - __kmp_printf("th_tasknum=%u, ", thunk->th_tasknum); - __kmp_printf("th_flags="); __kmp_dump_TQF(thunk->th_flags); - } - - __kmp_printf("\n"); -} - -static void -__kmp_dump_thunk_stack(kmpc_thunk_t *thunk, kmp_int32 thread_num) -{ - kmpc_thunk_t *th; - - __kmp_printf(" Thunk stack for T#%d: ", thread_num); - - for (th = thunk; th != NULL; th = th->th_encl_thunk ) - __kmp_printf("%p ", th); - - __kmp_printf("\n"); -} - -static void -__kmp_dump_task_queue( kmp_taskq_t *tq, kmpc_task_queue_t *queue, kmp_int32 global_tid ) -{ - int qs, count, i; - kmpc_thunk_t *thunk; - kmpc_task_queue_t *taskq; - - __kmp_printf("Task Queue at %p on (%d):\n", queue, global_tid); - - if (queue != NULL) { - int in_parallel = queue->tq_flags & TQF_PARALLEL_CONTEXT; - - if ( __kmp_env_consistency_check ) { - __kmp_printf(" tq_loc : "); - } - if (in_parallel) { - - //if (queue->tq.tq_parent != 0) - //__kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - //__kmp_acquire_lock(& queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - __kmp_printf(" tq_parent : %p\n", queue->tq.tq_parent); - __kmp_printf(" tq_first_child : %p\n", queue->tq_first_child); - __kmp_printf(" tq_next_child : %p\n", queue->tq_next_child); - __kmp_printf(" tq_prev_child : %p\n", queue->tq_prev_child); - __kmp_printf(" tq_ref_count : %d\n", queue->tq_ref_count); - - //__kmp_release_lock(& queue->tq_link_lck, global_tid); - - //if (queue->tq.tq_parent != 0) - //__kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - //__kmp_acquire_lock(& queue->tq_free_thunks_lck, global_tid); - //__kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - __kmp_printf(" tq_shareds : "); - for (i=0; i<((queue == tq->tq_root) ? queue->tq_nproc : 1); i++) - __kmp_printf("%p ", queue->tq_shareds[i].ai_data); - __kmp_printf("\n"); - - if (in_parallel) { - __kmp_printf(" tq_tasknum_queuing : %u\n", queue->tq_tasknum_queuing); - __kmp_printf(" tq_tasknum_serving : %u\n", queue->tq_tasknum_serving); - } - - __kmp_printf(" tq_queue : %p\n", queue->tq_queue); - __kmp_printf(" tq_thunk_space : %p\n", queue->tq_thunk_space); - __kmp_printf(" tq_taskq_slot : %p\n", queue->tq_taskq_slot); - - __kmp_printf(" tq_free_thunks : "); - for (thunk = queue->tq_free_thunks; thunk != NULL; thunk = thunk->th.th_next_free ) - __kmp_printf("%p ", thunk); - __kmp_printf("\n"); - - __kmp_printf(" tq_nslots : %d\n", queue->tq_nslots); - __kmp_printf(" tq_head : %d\n", queue->tq_head); - __kmp_printf(" tq_tail : %d\n", queue->tq_tail); - __kmp_printf(" tq_nfull : %d\n", queue->tq_nfull); - __kmp_printf(" tq_hiwat : %d\n", queue->tq_hiwat); - __kmp_printf(" tq_flags : "); __kmp_dump_TQF(queue->tq_flags); - __kmp_printf("\n"); - - if (in_parallel) { - __kmp_printf(" tq_th_thunks : "); - for (i = 0; i < queue->tq_nproc; i++) { - __kmp_printf("%d ", queue->tq_th_thunks[i].ai_data); - } - __kmp_printf("\n"); - } - - __kmp_printf("\n"); - __kmp_printf(" Queue slots:\n"); - - - qs = queue->tq_tail; - for ( count = 0; count < queue->tq_nfull; ++count ) { - __kmp_printf("(%d)", qs); - __kmp_dump_thunk( tq, queue->tq_queue[qs].qs_thunk, global_tid ); - qs = (qs+1) % queue->tq_nslots; - } - - __kmp_printf("\n"); - - if (in_parallel) { - if (queue->tq_taskq_slot != NULL) { - __kmp_printf(" TaskQ slot:\n"); - __kmp_dump_thunk( tq, (kmpc_thunk_t *) queue->tq_taskq_slot, global_tid ); - __kmp_printf("\n"); - } - //__kmp_release_lock(& queue->tq_queue_lck, global_tid); - //__kmp_release_lock(& queue->tq_free_thunks_lck, global_tid); - } - } - - __kmp_printf(" Taskq freelist: "); - - //__kmp_acquire_lock( & tq->tq_freelist_lck, global_tid ); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - for( taskq = tq->tq_freelist; taskq != NULL; taskq = taskq->tq.tq_next_free ) - __kmp_printf("%p ", taskq); - - //__kmp_release_lock( & tq->tq_freelist_lck, global_tid ); - - __kmp_printf("\n\n"); -} - -static void -__kmp_aux_dump_task_queue_tree( kmp_taskq_t *tq, kmpc_task_queue_t *curr_queue, kmp_int32 level, kmp_int32 global_tid ) -{ - int i, count, qs; - int nproc = __kmp_threads[global_tid] -> th.th_team -> t.t_nproc; - kmpc_task_queue_t *queue = curr_queue; - - if (curr_queue == NULL) - return; - - __kmp_printf(" "); - - for (i=0; itq_curr_thunk[i] && tq->tq_curr_thunk[i]->th.th_shareds->sv_queue == curr_queue ) { - __kmp_printf(" [%i]", i); - } - } - - __kmp_printf(":"); - - //__kmp_acquire_lock(& curr_queue->tq_queue_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - qs = curr_queue->tq_tail; - - for ( count = 0; count < curr_queue->tq_nfull; ++count ) { - __kmp_printf("%p ", curr_queue->tq_queue[qs].qs_thunk); - qs = (qs+1) % curr_queue->tq_nslots; - } - - //__kmp_release_lock(& curr_queue->tq_queue_lck, global_tid); - - __kmp_printf("\n"); - - if (curr_queue->tq_first_child) { - //__kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - if (curr_queue->tq_first_child) { - for(queue = (kmpc_task_queue_t *)curr_queue->tq_first_child; - queue != NULL; - queue = queue->tq_next_child) { - __kmp_aux_dump_task_queue_tree( tq, queue, level+1, global_tid ); - } - } - - //__kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - } -} - -static void -__kmp_dump_task_queue_tree( kmp_taskq_t *tq, kmpc_task_queue_t *tqroot, kmp_int32 global_tid) -{ - __kmp_printf("TaskQ Tree at root %p on (%d):\n", tqroot, global_tid); - - __kmp_aux_dump_task_queue_tree( tq, tqroot, 0, global_tid ); - - __kmp_printf("\n"); -} -#endif - -/* --------------------------------------------------------------------------- */ - -/* - New taskq storage routines that try to minimize overhead of mallocs but - still provide cache line alignment. -*/ - - -static void * -__kmp_taskq_allocate(size_t size, kmp_int32 global_tid) -{ - void *addr, *orig_addr; - size_t bytes; - - KB_TRACE( 5, ("__kmp_taskq_allocate: called size=%d, gtid=%d\n", (int) size, global_tid ) ); - - bytes = sizeof(void *) + CACHE_LINE + size; - -#ifdef THREAD_ALLOC_FOR_TASKQ - orig_addr = (void *) __kmp_thread_malloc( __kmp_thread_from_gtid(global_tid), bytes ); -#else - KE_TRACE( 10, ("%%%%%% MALLOC( %d )\n", bytes ) ); - orig_addr = (void *) KMP_INTERNAL_MALLOC( bytes ); -#endif /* THREAD_ALLOC_FOR_TASKQ */ - - if (orig_addr == 0) - KMP_FATAL( OutOfHeapMemory ); - - addr = orig_addr; - - if (((kmp_uintptr_t) addr & ( CACHE_LINE - 1 )) != 0) { - KB_TRACE( 50, ("__kmp_taskq_allocate: adjust for cache alignment\n" ) ); - addr = (void *) (((kmp_uintptr_t) addr + CACHE_LINE) & ~( CACHE_LINE - 1 )); - } - - (* (void **) addr) = orig_addr; - - KB_TRACE( 10, ("__kmp_taskq_allocate: allocate: %p, use: %p - %p, size: %d, gtid: %d\n", - orig_addr, ((void **) addr) + 1, ((char *)(((void **) addr) + 1)) + size-1, - (int) size, global_tid )); - - return ( ((void **) addr) + 1 ); -} - -static void -__kmpc_taskq_free(void *p, kmp_int32 global_tid) -{ - KB_TRACE( 5, ("__kmpc_taskq_free: called addr=%p, gtid=%d\n", p, global_tid ) ); - - KB_TRACE(10, ("__kmpc_taskq_free: freeing: %p, gtid: %d\n", (*( ((void **) p)-1)), global_tid )); - -#ifdef THREAD_ALLOC_FOR_TASKQ - __kmp_thread_free( __kmp_thread_from_gtid(global_tid), *( ((void **) p)-1) ); -#else - KMP_INTERNAL_FREE( *( ((void **) p)-1) ); -#endif /* THREAD_ALLOC_FOR_TASKQ */ -} - -/* --------------------------------------------------------------------------- */ - -/* - * Keep freed kmpc_task_queue_t on an internal freelist and recycle since - * they're of constant size. - */ - -static kmpc_task_queue_t * -__kmp_alloc_taskq ( kmp_taskq_t *tq, int in_parallel, kmp_int32 nslots, kmp_int32 nthunks, - kmp_int32 nshareds, kmp_int32 nproc, size_t sizeof_thunk, - size_t sizeof_shareds, kmpc_thunk_t **new_taskq_thunk, kmp_int32 global_tid ) -{ - kmp_int32 i; - size_t bytes; - kmpc_task_queue_t *new_queue; - kmpc_aligned_shared_vars_t *shared_var_array; - char *shared_var_storage; - char *pt; /* for doing byte-adjusted address computations */ - - __kmp_acquire_lock( & tq->tq_freelist_lck, global_tid ); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - if( tq->tq_freelist ) { - new_queue = tq -> tq_freelist; - tq -> tq_freelist = tq -> tq_freelist -> tq.tq_next_free; - - KMP_DEBUG_ASSERT(new_queue->tq_flags & TQF_DEALLOCATED); - - new_queue->tq_flags = 0; - - __kmp_release_lock( & tq->tq_freelist_lck, global_tid ); - } - else { - __kmp_release_lock( & tq->tq_freelist_lck, global_tid ); - - new_queue = (kmpc_task_queue_t *) __kmp_taskq_allocate (sizeof (kmpc_task_queue_t), global_tid); - new_queue->tq_flags = 0; - } - - /* space in the task queue for queue slots (allocate as one big chunk */ - /* of storage including new_taskq_task space) */ - - sizeof_thunk += (CACHE_LINE - (sizeof_thunk % CACHE_LINE)); /* pad to cache line size */ - pt = (char *) __kmp_taskq_allocate (nthunks * sizeof_thunk, global_tid); - new_queue->tq_thunk_space = (kmpc_thunk_t *)pt; - *new_taskq_thunk = (kmpc_thunk_t *)(pt + (nthunks - 1) * sizeof_thunk); - - /* chain the allocated thunks into a freelist for this queue */ - - new_queue->tq_free_thunks = (kmpc_thunk_t *)pt; - - for (i = 0; i < (nthunks - 2); i++) { - ((kmpc_thunk_t *)(pt+i*sizeof_thunk))->th.th_next_free = (kmpc_thunk_t *)(pt + (i+1)*sizeof_thunk); -#ifdef KMP_DEBUG - ((kmpc_thunk_t *)(pt+i*sizeof_thunk))->th_flags = TQF_DEALLOCATED; -#endif - } - - ((kmpc_thunk_t *)(pt+(nthunks-2)*sizeof_thunk))->th.th_next_free = NULL; -#ifdef KMP_DEBUG - ((kmpc_thunk_t *)(pt+(nthunks-2)*sizeof_thunk))->th_flags = TQF_DEALLOCATED; -#endif - - /* initialize the locks */ - - if (in_parallel) { - __kmp_init_lock( & new_queue->tq_link_lck ); - __kmp_init_lock( & new_queue->tq_free_thunks_lck ); - __kmp_init_lock( & new_queue->tq_queue_lck ); - } - - /* now allocate the slots */ - - bytes = nslots * sizeof (kmpc_aligned_queue_slot_t); - new_queue->tq_queue = (kmpc_aligned_queue_slot_t *) __kmp_taskq_allocate( bytes, global_tid ); - - /* space for array of pointers to shared variable structures */ - sizeof_shareds += sizeof(kmpc_task_queue_t *); - sizeof_shareds += (CACHE_LINE - (sizeof_shareds % CACHE_LINE)); /* pad to cache line size */ - - bytes = nshareds * sizeof (kmpc_aligned_shared_vars_t); - shared_var_array = (kmpc_aligned_shared_vars_t *) __kmp_taskq_allocate ( bytes, global_tid); - - bytes = nshareds * sizeof_shareds; - shared_var_storage = (char *) __kmp_taskq_allocate ( bytes, global_tid); - - for (i=0; isv_queue = new_queue; - } - new_queue->tq_shareds = shared_var_array; - - - /* array for number of outstanding thunks per thread */ - - if (in_parallel) { - bytes = nproc * sizeof(kmpc_aligned_int32_t); - new_queue->tq_th_thunks = (kmpc_aligned_int32_t *) __kmp_taskq_allocate ( bytes, global_tid); - new_queue->tq_nproc = nproc; - - for (i=0; itq_th_thunks[i].ai_data = 0; - } - - return new_queue; -} - -static void -__kmp_free_taskq (kmp_taskq_t *tq, kmpc_task_queue_t *p, int in_parallel, kmp_int32 global_tid) -{ - __kmpc_taskq_free(p->tq_thunk_space, global_tid); - __kmpc_taskq_free(p->tq_queue, global_tid); - - /* free shared var structure storage */ - __kmpc_taskq_free((void *) p->tq_shareds[0].ai_data, global_tid); - - /* free array of pointers to shared vars storage */ - __kmpc_taskq_free(p->tq_shareds, global_tid); - -#ifdef KMP_DEBUG - p->tq_first_child = NULL; - p->tq_next_child = NULL; - p->tq_prev_child = NULL; - p->tq_ref_count = -10; - p->tq_shareds = NULL; - p->tq_tasknum_queuing = 0; - p->tq_tasknum_serving = 0; - p->tq_queue = NULL; - p->tq_thunk_space = NULL; - p->tq_taskq_slot = NULL; - p->tq_free_thunks = NULL; - p->tq_nslots = 0; - p->tq_head = 0; - p->tq_tail = 0; - p->tq_nfull = 0; - p->tq_hiwat = 0; - - if (in_parallel) { - int i; - - for (i=0; itq_nproc; i++) - p->tq_th_thunks[i].ai_data = 0; - } - if ( __kmp_env_consistency_check ) - p->tq_loc = NULL; - KMP_DEBUG_ASSERT( p->tq_flags & TQF_DEALLOCATED ); - p->tq_flags = TQF_DEALLOCATED; -#endif /* KMP_DEBUG */ - - if (in_parallel) { - __kmpc_taskq_free(p->tq_th_thunks, global_tid); - __kmp_destroy_lock(& p->tq_link_lck); - __kmp_destroy_lock(& p->tq_queue_lck); - __kmp_destroy_lock(& p->tq_free_thunks_lck); - } -#ifdef KMP_DEBUG - p->tq_th_thunks = NULL; -#endif /* KMP_DEBUG */ - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - __kmp_acquire_lock( & tq->tq_freelist_lck, global_tid ); - p->tq.tq_next_free = tq->tq_freelist; - - tq->tq_freelist = p; - __kmp_release_lock( & tq->tq_freelist_lck, global_tid ); -} - -/* - * Once a group of thunks has been allocated for use in a particular queue, - * these are managed via a per-queue freelist. - * We force a check that there's always a thunk free if we need one. - */ - -static kmpc_thunk_t * -__kmp_alloc_thunk (kmpc_task_queue_t *queue, int in_parallel, kmp_int32 global_tid) -{ - kmpc_thunk_t *fl; - - if (in_parallel) { - __kmp_acquire_lock(& queue->tq_free_thunks_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - fl = queue->tq_free_thunks; - - KMP_DEBUG_ASSERT (fl != NULL); - - queue->tq_free_thunks = fl->th.th_next_free; - fl->th_flags = 0; - - if (in_parallel) - __kmp_release_lock(& queue->tq_free_thunks_lck, global_tid); - - return fl; -} - -static void -__kmp_free_thunk (kmpc_task_queue_t *queue, kmpc_thunk_t *p, int in_parallel, kmp_int32 global_tid) -{ -#ifdef KMP_DEBUG - p->th_task = 0; - p->th_encl_thunk = 0; - p->th_status = 0; - p->th_tasknum = 0; - /* Also could zero pointers to private vars */ -#endif - - if (in_parallel) { - __kmp_acquire_lock(& queue->tq_free_thunks_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - p->th.th_next_free = queue->tq_free_thunks; - queue->tq_free_thunks = p; - -#ifdef KMP_DEBUG - p->th_flags = TQF_DEALLOCATED; -#endif - - if (in_parallel) - __kmp_release_lock(& queue->tq_free_thunks_lck, global_tid); -} - -/* --------------------------------------------------------------------------- */ - -/* returns nonzero if the queue just became full after the enqueue */ - -static kmp_int32 -__kmp_enqueue_task ( kmp_taskq_t *tq, kmp_int32 global_tid, kmpc_task_queue_t *queue, kmpc_thunk_t *thunk, int in_parallel ) -{ - kmp_int32 ret; - - /* dkp: can we get around the lock in the TQF_RELEASE_WORKERS case (only the master is executing then) */ - if (in_parallel) { - __kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - KMP_DEBUG_ASSERT (queue->tq_nfull < queue->tq_nslots); /* check queue not full */ - - queue->tq_queue[(queue->tq_head)++].qs_thunk = thunk; - - if (queue->tq_head >= queue->tq_nslots) - queue->tq_head = 0; - - (queue->tq_nfull)++; - - KMP_MB(); /* to assure that nfull is seen to increase before TQF_ALL_TASKS_QUEUED is set */ - - ret = (in_parallel) ? (queue->tq_nfull == queue->tq_nslots) : FALSE; - - if (in_parallel) { - /* don't need to wait until workers are released before unlocking */ - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - - if( tq->tq_global_flags & TQF_RELEASE_WORKERS ) { - /* If just creating the root queue, the worker threads are waiting at */ - /* a join barrier until now, when there's something in the queue for */ - /* them to do; release them now to do work. */ - /* This should only be done when this is the first task enqueued, */ - /* so reset the flag here also. */ - - tq->tq_global_flags &= ~TQF_RELEASE_WORKERS; /* no lock needed, workers are still in spin mode */ - - KMP_MB(); /* avoid releasing barrier twice if taskq_task switches threads */ - - __kmpc_end_barrier_master( NULL, global_tid); - } - } - - return ret; -} - -static kmpc_thunk_t * -__kmp_dequeue_task (kmp_int32 global_tid, kmpc_task_queue_t *queue, int in_parallel) -{ - kmpc_thunk_t *pt; - int tid = __kmp_tid_from_gtid( global_tid ); - - KMP_DEBUG_ASSERT (queue->tq_nfull > 0); /* check queue not empty */ - - if (queue->tq.tq_parent != NULL && in_parallel) { - int ct; - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - ct = ++(queue->tq_ref_count); - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p inc %d\n", - __LINE__, global_tid, queue, ct)); - } - - pt = queue->tq_queue[(queue->tq_tail)++].qs_thunk; - - if (queue->tq_tail >= queue->tq_nslots) - queue->tq_tail = 0; - - if (in_parallel) { - queue->tq_th_thunks[tid].ai_data++; - - KMP_MB(); /* necessary so ai_data increment is propagated to other threads immediately (digital) */ - - KF_TRACE(200, ("__kmp_dequeue_task: T#%d(:%d) now has %d outstanding thunks from queue %p\n", - global_tid, tid, queue->tq_th_thunks[tid].ai_data, queue)); - } - - (queue->tq_nfull)--; - -#ifdef KMP_DEBUG - KMP_MB(); - - /* necessary so (queue->tq_nfull > 0) above succeeds after tq_nfull is decremented */ - - KMP_DEBUG_ASSERT(queue->tq_nfull >= 0); - - if (in_parallel) { - KMP_DEBUG_ASSERT(queue->tq_th_thunks[tid].ai_data <= __KMP_TASKQ_THUNKS_PER_TH); - } -#endif - - return pt; -} - -/* - * Find the next (non-null) task to dequeue and return it. - * This is never called unless in_parallel=TRUE - * - * Here are the rules for deciding which queue to take the task from: - * 1. Walk up the task queue tree from the current queue's parent and look - * on the way up (for loop, below). - * 2. Do a depth-first search back down the tree from the root and - * look (find_task_in_descendant_queue()). - * - * Here are the rules for deciding which task to take from a queue - * (__kmp_find_task_in_queue ()): - * 1. Never take the last task from a queue if TQF_IS_LASTPRIVATE; this task - * must be staged to make sure we execute the last one with - * TQF_IS_LAST_TASK at the end of task queue execution. - * 2. If the queue length is below some high water mark and the taskq task - * is enqueued, prefer running the taskq task. - * 3. Otherwise, take a (normal) task from the queue. - * - * If we do all this and return pt == NULL at the bottom of this routine, - * this means there are no more tasks to execute (except possibly for - * TQF_IS_LASTPRIVATE). - */ - -static kmpc_thunk_t * -__kmp_find_task_in_queue (kmp_int32 global_tid, kmpc_task_queue_t *queue) -{ - kmpc_thunk_t *pt = NULL; - int tid = __kmp_tid_from_gtid( global_tid ); - - /* To prevent deadlock from tq_queue_lck if queue already deallocated */ - if ( !(queue->tq_flags & TQF_DEALLOCATED) ) { - - __kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - - /* Check again to avoid race in __kmpc_end_taskq() */ - if ( !(queue->tq_flags & TQF_DEALLOCATED) ) { - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - if ((queue->tq_taskq_slot != NULL) && (queue->tq_nfull <= queue->tq_hiwat)) { - /* if there's enough room in the queue and the dispatcher */ - /* (taskq task) is available, schedule more tasks */ - pt = (kmpc_thunk_t *) queue->tq_taskq_slot; - queue->tq_taskq_slot = NULL; - } - else if (queue->tq_nfull == 0 || - queue->tq_th_thunks[tid].ai_data >= __KMP_TASKQ_THUNKS_PER_TH) { - /* do nothing if no thunks available or this thread can't */ - /* run any because it already is executing too many */ - - pt = NULL; - } - else if (queue->tq_nfull > 1) { - /* always safe to schedule a task even if TQF_IS_LASTPRIVATE */ - - pt = __kmp_dequeue_task (global_tid, queue, TRUE); - } - else if (!(queue->tq_flags & TQF_IS_LASTPRIVATE)) { - /* one thing in queue, always safe to schedule if !TQF_IS_LASTPRIVATE */ - - pt = __kmp_dequeue_task (global_tid, queue, TRUE); - } - else if (queue->tq_flags & TQF_IS_LAST_TASK) { - /* TQF_IS_LASTPRIVATE, one thing in queue, kmpc_end_taskq_task() */ - /* has been run so this is last task, run with TQF_IS_LAST_TASK so */ - /* instrumentation does copy-out. */ - - pt = __kmp_dequeue_task (global_tid, queue, TRUE); - pt->th_flags |= TQF_IS_LAST_TASK; /* don't need test_then_or since already locked */ - } - } - - /* GEH - What happens here if is lastprivate, but not last task? */ - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - } - - return pt; -} - -/* - * Walk a tree of queues starting at queue's first child - * and return a non-NULL thunk if one can be scheduled. - * Must only be called when in_parallel=TRUE - */ - -static kmpc_thunk_t * -__kmp_find_task_in_descendant_queue (kmp_int32 global_tid, kmpc_task_queue_t *curr_queue) -{ - kmpc_thunk_t *pt = NULL; - kmpc_task_queue_t *queue = curr_queue; - - if (curr_queue->tq_first_child != NULL) { - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - queue = (kmpc_task_queue_t *) curr_queue->tq_first_child; - if (queue == NULL) { - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - return NULL; - } - - while (queue != NULL) { - int ct; - kmpc_task_queue_t *next; - - ct= ++(queue->tq_ref_count); - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p inc %d\n", - __LINE__, global_tid, queue, ct)); - - pt = __kmp_find_task_in_queue (global_tid, queue); - - if (pt != NULL) { - int ct; - - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( queue->tq_ref_count >= 0 ); - - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - - return pt; - } - - /* although reference count stays active during descendant walk, shouldn't matter */ - /* since if children still exist, reference counts aren't being monitored anyway */ - - pt = __kmp_find_task_in_descendant_queue (global_tid, queue); - - if (pt != NULL) { - int ct; - - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - - return pt; - } - - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - next = queue->tq_next_child; - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - - queue = next; - } - - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - } - - return pt; -} - -/* - * Walk up the taskq tree looking for a task to execute. - * If we get to the root, search the tree for a descendent queue task. - * Must only be called when in_parallel=TRUE - */ - -static kmpc_thunk_t * -__kmp_find_task_in_ancestor_queue (kmp_taskq_t *tq, kmp_int32 global_tid, kmpc_task_queue_t *curr_queue) -{ - kmpc_task_queue_t *queue; - kmpc_thunk_t *pt; - - pt = NULL; - - if (curr_queue->tq.tq_parent != NULL) { - queue = curr_queue->tq.tq_parent; - - while (queue != NULL) { - if (queue->tq.tq_parent != NULL) { - int ct; - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - ct = ++(queue->tq_ref_count); - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p inc %d\n", - __LINE__, global_tid, queue, ct)); - } - - pt = __kmp_find_task_in_queue (global_tid, queue); - if (pt != NULL) { - if (queue->tq.tq_parent != NULL) { - int ct; - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work without this call for digital/alpha, needed for IBM/RS6000 */ - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - } - - return pt; - } - - if (queue->tq.tq_parent != NULL) { - int ct; - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - } - queue = queue->tq.tq_parent; - - if (queue != NULL) - __kmp_release_lock(& queue->tq_link_lck, global_tid); - } - - } - - pt = __kmp_find_task_in_descendant_queue( global_tid, tq->tq_root ); - - return pt; -} - -static int -__kmp_taskq_tasks_finished (kmpc_task_queue_t *queue) -{ - int i; - - /* KMP_MB(); *//* is this really necessary? */ - - for (i=0; itq_nproc; i++) { - if (queue->tq_th_thunks[i].ai_data != 0) - return FALSE; - } - - return TRUE; -} - -static int -__kmp_taskq_has_any_children (kmpc_task_queue_t *queue) -{ - return (queue->tq_first_child != NULL); -} - -static void -__kmp_remove_queue_from_tree( kmp_taskq_t *tq, kmp_int32 global_tid, kmpc_task_queue_t *queue, int in_parallel ) -{ -#ifdef KMP_DEBUG - kmp_int32 i; - kmpc_thunk_t *thunk; -#endif - - KF_TRACE(50, ("Before Deletion of TaskQ at %p on (%d):\n", queue, global_tid)); - KF_DUMP(50, __kmp_dump_task_queue( tq, queue, global_tid )); - - /* sub-queue in a recursion, not the root task queue */ - KMP_DEBUG_ASSERT (queue->tq.tq_parent != NULL); - - if (in_parallel) { - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - KMP_DEBUG_ASSERT (queue->tq_first_child == NULL); - - /* unlink queue from its siblings if any at this level */ - if (queue->tq_prev_child != NULL) - queue->tq_prev_child->tq_next_child = queue->tq_next_child; - if (queue->tq_next_child != NULL) - queue->tq_next_child->tq_prev_child = queue->tq_prev_child; - if (queue->tq.tq_parent->tq_first_child == queue) - queue->tq.tq_parent->tq_first_child = queue->tq_next_child; - - queue->tq_prev_child = NULL; - queue->tq_next_child = NULL; - - if (in_parallel) { - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p waiting for ref_count of %d to reach 1\n", - __LINE__, global_tid, queue, queue->tq_ref_count)); - - /* wait until all other threads have stopped accessing this queue */ - while (queue->tq_ref_count > 1) { - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_WAIT_YIELD((volatile kmp_uint32*)&queue->tq_ref_count, 1, KMP_LE, NULL); - - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - } - - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - } - - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p freeing queue\n", - __LINE__, global_tid, queue)); - -#ifdef KMP_DEBUG - KMP_DEBUG_ASSERT(queue->tq_flags & TQF_ALL_TASKS_QUEUED); - KMP_DEBUG_ASSERT(queue->tq_nfull == 0); - - for (i=0; itq_nproc; i++) { - KMP_DEBUG_ASSERT(queue->tq_th_thunks[i].ai_data == 0); - } - - i = 0; - for (thunk=queue->tq_free_thunks; thunk != NULL; thunk=thunk->th.th_next_free) - ++i; - - KMP_ASSERT (i == queue->tq_nslots + (queue->tq_nproc * __KMP_TASKQ_THUNKS_PER_TH)); -#endif - - /* release storage for queue entry */ - __kmp_free_taskq ( tq, queue, TRUE, global_tid ); - - KF_TRACE(50, ("After Deletion of TaskQ at %p on (%d):\n", queue, global_tid)); - KF_DUMP(50, __kmp_dump_task_queue_tree( tq, tq->tq_root, global_tid )); -} - -/* - * Starting from indicated queue, proceed downward through tree and - * remove all taskqs which are finished, but only go down to taskqs - * which have the "nowait" clause present. Assume this is only called - * when in_parallel=TRUE. - */ - -static void -__kmp_find_and_remove_finished_child_taskq( kmp_taskq_t *tq, kmp_int32 global_tid, kmpc_task_queue_t *curr_queue ) -{ - kmpc_task_queue_t *queue = curr_queue; - - if (curr_queue->tq_first_child != NULL) { - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - queue = (kmpc_task_queue_t *) curr_queue->tq_first_child; - if (queue != NULL) { - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - return; - } - - while (queue != NULL) { - kmpc_task_queue_t *next; - int ct = ++(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p inc %d\n", - __LINE__, global_tid, queue, ct)); - - - /* although reference count stays active during descendant walk, */ - /* shouldn't matter since if children still exist, reference */ - /* counts aren't being monitored anyway */ - - if (queue->tq_flags & TQF_IS_NOWAIT) { - __kmp_find_and_remove_finished_child_taskq ( tq, global_tid, queue ); - - if ((queue->tq_flags & TQF_ALL_TASKS_QUEUED) && (queue->tq_nfull == 0) && - __kmp_taskq_tasks_finished(queue) && ! __kmp_taskq_has_any_children(queue)) { - - /* - Only remove this if we have not already marked it for deallocation. - This should prevent multiple threads from trying to free this. - */ - - if ( __kmp_test_lock(& queue->tq_queue_lck, global_tid) ) { - if ( !(queue->tq_flags & TQF_DEALLOCATED) ) { - queue->tq_flags |= TQF_DEALLOCATED; - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - - __kmp_remove_queue_from_tree( tq, global_tid, queue, TRUE ); - - /* Can't do any more here since can't be sure where sibling queue is so just exit this level */ - return; - } - else { - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - } - } - /* otherwise, just fall through and decrement reference count */ - } - } - - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - next = queue->tq_next_child; - - ct = --(queue->tq_ref_count); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - - queue = next; - } - - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - } -} - -/* - * Starting from indicated queue, proceed downward through tree and - * remove all taskq's assuming all are finished and - * assuming NO other threads are executing at this point. - */ - -static void -__kmp_remove_all_child_taskq( kmp_taskq_t *tq, kmp_int32 global_tid, kmpc_task_queue_t *queue ) -{ - kmpc_task_queue_t *next_child; - - queue = (kmpc_task_queue_t *) queue->tq_first_child; - - while (queue != NULL) { - __kmp_remove_all_child_taskq ( tq, global_tid, queue ); - - next_child = queue->tq_next_child; - queue->tq_flags |= TQF_DEALLOCATED; - __kmp_remove_queue_from_tree ( tq, global_tid, queue, FALSE ); - queue = next_child; - } -} - -static void -__kmp_execute_task_from_queue( kmp_taskq_t *tq, ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *thunk, int in_parallel ) -{ - kmpc_task_queue_t *queue = thunk->th.th_shareds->sv_queue; - kmp_int32 tid = __kmp_tid_from_gtid( global_tid ); - - KF_TRACE(100, ("After dequeueing this Task on (%d):\n", global_tid)); - KF_DUMP(100, __kmp_dump_thunk( tq, thunk, global_tid )); - KF_TRACE(100, ("Task Queue: %p looks like this (%d):\n", queue, global_tid)); - KF_DUMP(100, __kmp_dump_task_queue( tq, queue, global_tid )); - - /* - * For the taskq task, the curr_thunk pushes and pop pairs are set up as follows: - * - * happens exactly once: - * 1) __kmpc_taskq : push (if returning thunk only) - * 4) __kmpc_end_taskq_task : pop - * - * optionally happens *each* time taskq task is dequeued/enqueued: - * 2) __kmpc_taskq_task : pop - * 3) __kmp_execute_task_from_queue : push - * - * execution ordering: 1,(2,3)*,4 - */ - - if (!(thunk->th_flags & TQF_TASKQ_TASK)) { - kmp_int32 index = (queue == tq->tq_root) ? tid : 0; - thunk->th.th_shareds = (kmpc_shared_vars_t *) queue->tq_shareds[index].ai_data; - - if ( __kmp_env_consistency_check ) { - __kmp_push_workshare( global_tid, - (queue->tq_flags & TQF_IS_ORDERED) ? ct_task_ordered : ct_task, - queue->tq_loc ); - } - } - else { - if ( __kmp_env_consistency_check ) - __kmp_push_workshare( global_tid, ct_taskq, queue->tq_loc ); - } - - if (in_parallel) { - thunk->th_encl_thunk = tq->tq_curr_thunk[tid]; - tq->tq_curr_thunk[tid] = thunk; - - KF_DUMP( 200, __kmp_dump_thunk_stack( tq->tq_curr_thunk[tid], global_tid )); - } - - KF_TRACE( 50, ("Begin Executing Thunk %p from queue %p on (%d)\n", thunk, queue, global_tid)); - thunk->th_task (global_tid, thunk); - KF_TRACE( 50, ("End Executing Thunk %p from queue %p on (%d)\n", thunk, queue, global_tid)); - - if (!(thunk->th_flags & TQF_TASKQ_TASK)) { - if ( __kmp_env_consistency_check ) - __kmp_pop_workshare( global_tid, (queue->tq_flags & TQF_IS_ORDERED) ? ct_task_ordered : ct_task, - queue->tq_loc ); - - if (in_parallel) { - tq->tq_curr_thunk[tid] = thunk->th_encl_thunk; - thunk->th_encl_thunk = NULL; - KF_DUMP( 200, __kmp_dump_thunk_stack( tq->tq_curr_thunk[tid], global_tid )); - } - - if ((thunk->th_flags & TQF_IS_ORDERED) && in_parallel) { - __kmp_taskq_check_ordered(global_tid, thunk); - } - - __kmp_free_thunk (queue, thunk, in_parallel, global_tid); - - KF_TRACE(100, ("T#%d After freeing thunk: %p, TaskQ looks like this:\n", global_tid, thunk)); - KF_DUMP(100, __kmp_dump_task_queue( tq, queue, global_tid )); - - if (in_parallel) { - KMP_MB(); /* needed so thunk put on free list before outstanding thunk count is decremented */ - - KMP_DEBUG_ASSERT(queue->tq_th_thunks[tid].ai_data >= 1); - - KF_TRACE( 200, ("__kmp_execute_task_from_queue: T#%d has %d thunks in queue %p\n", - global_tid, queue->tq_th_thunks[tid].ai_data-1, queue)); - - queue->tq_th_thunks[tid].ai_data--; - - /* KMP_MB(); */ /* is MB really necessary ? */ - } - - if (queue->tq.tq_parent != NULL && in_parallel) { - int ct; - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - ct = --(queue->tq_ref_count); - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p dec %d\n", - __LINE__, global_tid, queue, ct)); - KMP_DEBUG_ASSERT( ct >= 0 ); - } - } -} - -/* --------------------------------------------------------------------------- */ - -/* starts a taskq; creates and returns a thunk for the taskq_task */ -/* also, returns pointer to shared vars for this thread in "shareds" arg */ - -kmpc_thunk_t * -__kmpc_taskq( ident_t *loc, kmp_int32 global_tid, kmpc_task_t taskq_task, - size_t sizeof_thunk, size_t sizeof_shareds, - kmp_int32 flags, kmpc_shared_vars_t **shareds ) -{ - int in_parallel; - kmp_int32 nslots, nthunks, nshareds, nproc; - kmpc_task_queue_t *new_queue, *curr_queue; - kmpc_thunk_t *new_taskq_thunk; - kmp_info_t *th; - kmp_team_t *team; - kmp_taskq_t *tq; - kmp_int32 tid; - - KE_TRACE( 10, ("__kmpc_taskq called (%d)\n", global_tid)); - - th = __kmp_threads[ global_tid ]; - team = th -> th.th_team; - tq = & team -> t.t_taskq; - nproc = team -> t.t_nproc; - tid = __kmp_tid_from_gtid( global_tid ); - - /* find out whether this is a parallel taskq or serialized one. */ - in_parallel = in_parallel_context( team ); - - if( ! tq->tq_root ) { - if (in_parallel) { - /* Vector ORDERED SECTION to taskq version */ - th->th.th_dispatch->th_deo_fcn = __kmp_taskq_eo; - - /* Vector ORDERED SECTION to taskq version */ - th->th.th_dispatch->th_dxo_fcn = __kmp_taskq_xo; - } - - if (in_parallel) { - /* This shouldn't be a barrier region boundary, it will confuse the user. */ - /* Need the boundary to be at the end taskq instead. */ - if ( __kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL )) { - /* Creating the active root queue, and we are not the master thread. */ - /* The master thread below created the queue and tasks have been */ - /* enqueued, and the master thread released this barrier. This */ - /* worker thread can now proceed and execute tasks. See also the */ - /* TQF_RELEASE_WORKERS which is used to handle this case. */ - - *shareds = (kmpc_shared_vars_t *) tq->tq_root->tq_shareds[tid].ai_data; - - KE_TRACE( 10, ("__kmpc_taskq return (%d)\n", global_tid)); - - return NULL; - } - } - - /* master thread only executes this code */ - - if( tq->tq_curr_thunk_capacity < nproc ) { - if(tq->tq_curr_thunk) - __kmp_free(tq->tq_curr_thunk); - else { - /* only need to do this once at outer level, i.e. when tq_curr_thunk is still NULL */ - __kmp_init_lock( & tq->tq_freelist_lck ); - } - - tq->tq_curr_thunk = (kmpc_thunk_t **) __kmp_allocate( nproc * sizeof(kmpc_thunk_t *) ); - tq -> tq_curr_thunk_capacity = nproc; - } - - if (in_parallel) - tq->tq_global_flags = TQF_RELEASE_WORKERS; - } - - /* dkp: in future, if flags & TQF_HEURISTICS, will choose nslots based */ - /* on some heuristics (e.g., depth of queue nesting?). */ - - nslots = (in_parallel) ? (2 * nproc) : 1; - - /* There must be nproc * __KMP_TASKQ_THUNKS_PER_TH extra slots for pending */ - /* jobs being executed by other threads, and one extra for taskq slot */ - - nthunks = (in_parallel) ? (nslots + (nproc * __KMP_TASKQ_THUNKS_PER_TH) + 1) : nslots + 2; - - /* Only the root taskq gets a per-thread array of shareds. */ - /* The rest of the taskq's only get one copy of the shared vars. */ - - nshareds = ( !tq->tq_root && in_parallel) ? nproc : 1; - - /* create overall queue data structure and its components that require allocation */ - - new_queue = __kmp_alloc_taskq ( tq, in_parallel, nslots, nthunks, nshareds, nproc, - sizeof_thunk, sizeof_shareds, &new_taskq_thunk, global_tid ); - - /* rest of new_queue initializations */ - - new_queue->tq_flags = flags & TQF_INTERFACE_FLAGS; - - if (in_parallel) { - new_queue->tq_tasknum_queuing = 0; - new_queue->tq_tasknum_serving = 0; - new_queue->tq_flags |= TQF_PARALLEL_CONTEXT; - } - - new_queue->tq_taskq_slot = NULL; - new_queue->tq_nslots = nslots; - new_queue->tq_hiwat = HIGH_WATER_MARK (nslots); - new_queue->tq_nfull = 0; - new_queue->tq_head = 0; - new_queue->tq_tail = 0; - new_queue->tq_loc = loc; - - if ((new_queue->tq_flags & TQF_IS_ORDERED) && in_parallel) { - /* prepare to serve the first-queued task's ORDERED directive */ - new_queue->tq_tasknum_serving = 1; - - /* Vector ORDERED SECTION to taskq version */ - th->th.th_dispatch->th_deo_fcn = __kmp_taskq_eo; - - /* Vector ORDERED SECTION to taskq version */ - th->th.th_dispatch->th_dxo_fcn = __kmp_taskq_xo; - } - - /* create a new thunk for the taskq_task in the new_queue */ - *shareds = (kmpc_shared_vars_t *) new_queue->tq_shareds[0].ai_data; - - new_taskq_thunk->th.th_shareds = *shareds; - new_taskq_thunk->th_task = taskq_task; - new_taskq_thunk->th_flags = new_queue->tq_flags | TQF_TASKQ_TASK; - new_taskq_thunk->th_status = 0; - - KMP_DEBUG_ASSERT (new_taskq_thunk->th_flags & TQF_TASKQ_TASK); - - /* KMP_MB(); */ /* make sure these inits complete before threads start using this queue (necessary?) */ - - /* insert the new task queue into the tree, but only after all fields initialized */ - - if (in_parallel) { - if( ! tq->tq_root ) { - new_queue->tq.tq_parent = NULL; - new_queue->tq_first_child = NULL; - new_queue->tq_next_child = NULL; - new_queue->tq_prev_child = NULL; - new_queue->tq_ref_count = 1; - tq->tq_root = new_queue; - } - else { - curr_queue = tq->tq_curr_thunk[tid]->th.th_shareds->sv_queue; - new_queue->tq.tq_parent = curr_queue; - new_queue->tq_first_child = NULL; - new_queue->tq_prev_child = NULL; - new_queue->tq_ref_count = 1; /* for this the thread that built the queue */ - - KMP_DEBUG_REF_CTS(("line %d gtid %d: Q %p alloc %d\n", - __LINE__, global_tid, new_queue, new_queue->tq_ref_count)); - - __kmp_acquire_lock(& curr_queue->tq_link_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - new_queue->tq_next_child = (struct kmpc_task_queue_t *) curr_queue->tq_first_child; - - if (curr_queue->tq_first_child != NULL) - curr_queue->tq_first_child->tq_prev_child = new_queue; - - curr_queue->tq_first_child = new_queue; - - __kmp_release_lock(& curr_queue->tq_link_lck, global_tid); - } - - /* set up thunk stack only after code that determines curr_queue above */ - new_taskq_thunk->th_encl_thunk = tq->tq_curr_thunk[tid]; - tq->tq_curr_thunk[tid] = new_taskq_thunk; - - KF_DUMP( 200, __kmp_dump_thunk_stack( tq->tq_curr_thunk[tid], global_tid )); - } - else { - new_taskq_thunk->th_encl_thunk = 0; - new_queue->tq.tq_parent = NULL; - new_queue->tq_first_child = NULL; - new_queue->tq_next_child = NULL; - new_queue->tq_prev_child = NULL; - new_queue->tq_ref_count = 1; - } - -#ifdef KMP_DEBUG - KF_TRACE(150, ("Creating TaskQ Task on (%d):\n", global_tid)); - KF_DUMP(150, __kmp_dump_thunk( tq, new_taskq_thunk, global_tid )); - - if (in_parallel) { - KF_TRACE(25, ("After TaskQ at %p Creation on (%d):\n", new_queue, global_tid)); - } else { - KF_TRACE(25, ("After Serial TaskQ at %p Creation on (%d):\n", new_queue, global_tid)); - } - - KF_DUMP(25, __kmp_dump_task_queue( tq, new_queue, global_tid )); - - if (in_parallel) { - KF_DUMP(50, __kmp_dump_task_queue_tree( tq, tq->tq_root, global_tid )); - } -#endif /* KMP_DEBUG */ - - if ( __kmp_env_consistency_check ) - __kmp_push_workshare( global_tid, ct_taskq, new_queue->tq_loc ); - - KE_TRACE( 10, ("__kmpc_taskq return (%d)\n", global_tid)); - - return new_taskq_thunk; -} - - -/* ends a taskq; last thread out destroys the queue */ - -void -__kmpc_end_taskq(ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *taskq_thunk) -{ -#ifdef KMP_DEBUG - kmp_int32 i; -#endif - kmp_taskq_t *tq; - int in_parallel; - kmp_info_t *th; - kmp_int32 is_outermost; - kmpc_task_queue_t *queue; - kmpc_thunk_t *thunk; - int nproc; - - KE_TRACE( 10, ("__kmpc_end_taskq called (%d)\n", global_tid)); - - tq = & __kmp_threads[global_tid] -> th.th_team -> t.t_taskq; - nproc = __kmp_threads[global_tid] -> th.th_team -> t.t_nproc; - - /* For the outermost taskq only, all but one thread will have taskq_thunk == NULL */ - queue = (taskq_thunk == NULL) ? tq->tq_root : taskq_thunk->th.th_shareds->sv_queue; - - KE_TRACE( 50, ("__kmpc_end_taskq queue=%p (%d) \n", queue, global_tid)); - is_outermost = (queue == tq->tq_root); - in_parallel = (queue->tq_flags & TQF_PARALLEL_CONTEXT); - - if (in_parallel) { - kmp_uint32 spins; - - /* this is just a safeguard to release the waiting threads if */ - /* the outermost taskq never queues a task */ - - if (is_outermost && (KMP_MASTER_GTID( global_tid ))) { - if( tq->tq_global_flags & TQF_RELEASE_WORKERS ) { - /* no lock needed, workers are still in spin mode */ - tq->tq_global_flags &= ~TQF_RELEASE_WORKERS; - - __kmp_end_split_barrier( bs_plain_barrier, global_tid ); - } - } - - /* keep dequeueing work until all tasks are queued and dequeued */ - - do { - /* wait until something is available to dequeue */ - KMP_INIT_YIELD(spins); - - while ( (queue->tq_nfull == 0) - && (queue->tq_taskq_slot == NULL) - && (! __kmp_taskq_has_any_children(queue) ) - && (! (queue->tq_flags & TQF_ALL_TASKS_QUEUED) ) - ) { - KMP_YIELD_WHEN( TRUE, spins ); - } - - /* check to see if we can execute tasks in the queue */ - while ( ( (queue->tq_nfull != 0) || (queue->tq_taskq_slot != NULL) ) - && (thunk = __kmp_find_task_in_queue(global_tid, queue)) != NULL - ) { - KF_TRACE(50, ("Found thunk: %p in primary queue %p (%d)\n", thunk, queue, global_tid)); - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - /* see if work found can be found in a descendant queue */ - if ( (__kmp_taskq_has_any_children(queue)) - && (thunk = __kmp_find_task_in_descendant_queue(global_tid, queue)) != NULL - ) { - - KF_TRACE(50, ("Stole thunk: %p in descendant queue: %p while waiting in queue: %p (%d)\n", - thunk, thunk->th.th_shareds->sv_queue, queue, global_tid )); - - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - } while ( (! (queue->tq_flags & TQF_ALL_TASKS_QUEUED)) - || (queue->tq_nfull != 0) - ); - - KF_TRACE(50, ("All tasks queued and dequeued in queue: %p (%d)\n", queue, global_tid)); - - /* wait while all tasks are not finished and more work found - in descendant queues */ - - while ( (!__kmp_taskq_tasks_finished(queue)) - && (thunk = __kmp_find_task_in_descendant_queue(global_tid, queue)) != NULL - ) { - - KF_TRACE(50, ("Stole thunk: %p in descendant queue: %p while waiting in queue: %p (%d)\n", - thunk, thunk->th.th_shareds->sv_queue, queue, global_tid)); - - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - KF_TRACE(50, ("No work found in descendent queues or all work finished in queue: %p (%d)\n", queue, global_tid)); - - if (!is_outermost) { - /* need to return if NOWAIT present and not outermost taskq */ - - if (queue->tq_flags & TQF_IS_NOWAIT) { - __kmp_acquire_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - queue->tq_ref_count--; - KMP_DEBUG_ASSERT( queue->tq_ref_count >= 0 ); - __kmp_release_lock(& queue->tq.tq_parent->tq_link_lck, global_tid); - - KE_TRACE( 10, ("__kmpc_end_taskq return for nowait case (%d)\n", global_tid)); - - return; - } - - __kmp_find_and_remove_finished_child_taskq( tq, global_tid, queue ); - - /* WAIT until all tasks are finished and no child queues exist before proceeding */ - KMP_INIT_YIELD(spins); - - while (!__kmp_taskq_tasks_finished(queue) || __kmp_taskq_has_any_children(queue)) { - thunk = __kmp_find_task_in_ancestor_queue( tq, global_tid, queue ); - - if (thunk != NULL) { - KF_TRACE(50, ("Stole thunk: %p in ancestor queue: %p while waiting in queue: %p (%d)\n", - thunk, thunk->th.th_shareds->sv_queue, queue, global_tid)); - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - KMP_YIELD_WHEN( thunk == NULL, spins ); - - __kmp_find_and_remove_finished_child_taskq( tq, global_tid, queue ); - } - - __kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - if ( !(queue->tq_flags & TQF_DEALLOCATED) ) { - queue->tq_flags |= TQF_DEALLOCATED; - } - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - - /* only the allocating thread can deallocate the queue */ - if (taskq_thunk != NULL) { - __kmp_remove_queue_from_tree( tq, global_tid, queue, TRUE ); - } - - KE_TRACE( 10, ("__kmpc_end_taskq return for non_outermost queue, wait case (%d)\n", global_tid)); - - return; - } - - /* Outermost Queue: steal work from descendants until all tasks are finished */ - - KMP_INIT_YIELD(spins); - - while (!__kmp_taskq_tasks_finished(queue)) { - thunk = __kmp_find_task_in_descendant_queue(global_tid, queue); - - if (thunk != NULL) { - KF_TRACE(50, ("Stole thunk: %p in descendant queue: %p while waiting in queue: %p (%d)\n", - thunk, thunk->th.th_shareds->sv_queue, queue, global_tid)); - - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - KMP_YIELD_WHEN( thunk == NULL, spins ); - } - - /* Need this barrier to prevent destruction of queue before threads have all executed above code */ - /* This may need to be done earlier when NOWAIT is implemented for the outermost level */ - - if ( !__kmp_barrier( bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL )) { - /* the queue->tq_flags & TQF_IS_NOWAIT case is not yet handled here; */ - /* for right now, everybody waits, and the master thread destroys the */ - /* remaining queues. */ - - __kmp_remove_all_child_taskq( tq, global_tid, queue ); - - /* Now destroy the root queue */ - KF_TRACE(100, ("T#%d Before Deletion of top-level TaskQ at %p:\n", global_tid, queue )); - KF_DUMP(100, __kmp_dump_task_queue( tq, queue, global_tid )); - -#ifdef KMP_DEBUG - /* the root queue entry */ - KMP_DEBUG_ASSERT ((queue->tq.tq_parent == NULL) && (queue->tq_next_child == NULL)); - - /* children must all be gone by now because of barrier above */ - KMP_DEBUG_ASSERT (queue->tq_first_child == NULL); - - for (i=0; itq_th_thunks[i].ai_data == 0); - } - - for (i=0, thunk=queue->tq_free_thunks; thunk != NULL; i++, thunk=thunk->th.th_next_free); - - KMP_DEBUG_ASSERT (i == queue->tq_nslots + (nproc * __KMP_TASKQ_THUNKS_PER_TH)); - - for (i = 0; i < nproc; i++) { - KMP_DEBUG_ASSERT( ! tq->tq_curr_thunk[i] ); - } -#endif - /* unlink the root queue entry */ - tq -> tq_root = NULL; - - /* release storage for root queue entry */ - KF_TRACE(50, ("After Deletion of top-level TaskQ at %p on (%d):\n", queue, global_tid)); - - queue->tq_flags |= TQF_DEALLOCATED; - __kmp_free_taskq ( tq, queue, in_parallel, global_tid ); - - KF_DUMP(50, __kmp_dump_task_queue_tree( tq, tq->tq_root, global_tid )); - - /* release the workers now that the data structures are up to date */ - __kmp_end_split_barrier( bs_plain_barrier, global_tid ); - } - - th = __kmp_threads[ global_tid ]; - - /* Reset ORDERED SECTION to parallel version */ - th->th.th_dispatch->th_deo_fcn = 0; - - /* Reset ORDERED SECTION to parallel version */ - th->th.th_dispatch->th_dxo_fcn = 0; - } - else { - /* in serial execution context, dequeue the last task */ - /* and execute it, if there were any tasks encountered */ - - if (queue->tq_nfull > 0) { - KMP_DEBUG_ASSERT(queue->tq_nfull == 1); - - thunk = __kmp_dequeue_task(global_tid, queue, in_parallel); - - if (queue->tq_flags & TQF_IS_LAST_TASK) { - /* TQF_IS_LASTPRIVATE, one thing in queue, __kmpc_end_taskq_task() */ - /* has been run so this is last task, run with TQF_IS_LAST_TASK so */ - /* instrumentation does copy-out. */ - - /* no need for test_then_or call since already locked */ - thunk->th_flags |= TQF_IS_LAST_TASK; - } - - KF_TRACE(50, ("T#%d found thunk: %p in serial queue: %p\n", global_tid, thunk, queue)); - - __kmp_execute_task_from_queue( tq, loc, global_tid, thunk, in_parallel ); - } - - /* destroy the unattached serial queue now that there is no more work to do */ - KF_TRACE(100, ("Before Deletion of Serialized TaskQ at %p on (%d):\n", queue, global_tid)); - KF_DUMP(100, __kmp_dump_task_queue( tq, queue, global_tid )); - -#ifdef KMP_DEBUG - i = 0; - for (thunk=queue->tq_free_thunks; thunk != NULL; thunk=thunk->th.th_next_free) - ++i; - KMP_DEBUG_ASSERT (i == queue->tq_nslots + 1); -#endif - /* release storage for unattached serial queue */ - KF_TRACE(50, ("Serialized TaskQ at %p deleted on (%d).\n", queue, global_tid)); - - queue->tq_flags |= TQF_DEALLOCATED; - __kmp_free_taskq ( tq, queue, in_parallel, global_tid ); - } - - KE_TRACE( 10, ("__kmpc_end_taskq return (%d)\n", global_tid)); -} - -/* Enqueues a task for thunk previously created by __kmpc_task_buffer. */ -/* Returns nonzero if just filled up queue */ - -kmp_int32 -__kmpc_task(ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *thunk) -{ - kmp_int32 ret; - kmpc_task_queue_t *queue; - int in_parallel; - kmp_taskq_t *tq; - - KE_TRACE( 10, ("__kmpc_task called (%d)\n", global_tid)); - - KMP_DEBUG_ASSERT (!(thunk->th_flags & TQF_TASKQ_TASK)); /* thunk->th_task is a regular task */ - - tq = &__kmp_threads[global_tid] -> th.th_team -> t.t_taskq; - queue = thunk->th.th_shareds->sv_queue; - in_parallel = (queue->tq_flags & TQF_PARALLEL_CONTEXT); - - if (in_parallel && (thunk->th_flags & TQF_IS_ORDERED)) - thunk->th_tasknum = ++queue->tq_tasknum_queuing; - - /* For serial execution dequeue the preceding task and execute it, if one exists */ - /* This cannot be the last task. That one is handled in __kmpc_end_taskq */ - - if (!in_parallel && queue->tq_nfull > 0) { - kmpc_thunk_t *prev_thunk; - - KMP_DEBUG_ASSERT(queue->tq_nfull == 1); - - prev_thunk = __kmp_dequeue_task(global_tid, queue, in_parallel); - - KF_TRACE(50, ("T#%d found thunk: %p in serial queue: %p\n", global_tid, prev_thunk, queue)); - - __kmp_execute_task_from_queue( tq, loc, global_tid, prev_thunk, in_parallel ); - } - - /* The instrumentation sequence is: __kmpc_task_buffer(), initialize private */ - /* variables, __kmpc_task(). The __kmpc_task_buffer routine checks that the */ - /* task queue is not full and allocates a thunk (which is then passed to */ - /* __kmpc_task()). So, the enqueue below should never fail due to a full queue. */ - - KF_TRACE(100, ("After enqueueing this Task on (%d):\n", global_tid)); - KF_DUMP(100, __kmp_dump_thunk( tq, thunk, global_tid )); - - ret = __kmp_enqueue_task ( tq, global_tid, queue, thunk, in_parallel ); - - KF_TRACE(100, ("Task Queue looks like this on (%d):\n", global_tid)); - KF_DUMP(100, __kmp_dump_task_queue( tq, queue, global_tid )); - - KE_TRACE( 10, ("__kmpc_task return (%d)\n", global_tid)); - - return ret; -} - -/* enqueues a taskq_task for thunk previously created by __kmpc_taskq */ -/* this should never be called unless in a parallel context */ - -void -__kmpc_taskq_task(ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *thunk, kmp_int32 status) -{ - kmpc_task_queue_t *queue; - kmp_taskq_t *tq = &__kmp_threads[global_tid] -> th.th_team -> t.t_taskq; - int tid = __kmp_tid_from_gtid( global_tid ); - - KE_TRACE( 10, ("__kmpc_taskq_task called (%d)\n", global_tid)); - KF_TRACE(100, ("TaskQ Task argument thunk on (%d):\n", global_tid)); - KF_DUMP(100, __kmp_dump_thunk( tq, thunk, global_tid )); - - queue = thunk->th.th_shareds->sv_queue; - - if ( __kmp_env_consistency_check ) - __kmp_pop_workshare( global_tid, ct_taskq, loc ); - - /* thunk->th_task is the taskq_task */ - KMP_DEBUG_ASSERT (thunk->th_flags & TQF_TASKQ_TASK); - - /* not supposed to call __kmpc_taskq_task if it's already enqueued */ - KMP_DEBUG_ASSERT (queue->tq_taskq_slot == NULL); - - /* dequeue taskq thunk from curr_thunk stack */ - tq->tq_curr_thunk[tid] = thunk->th_encl_thunk; - thunk->th_encl_thunk = NULL; - - KF_DUMP( 200, __kmp_dump_thunk_stack( tq->tq_curr_thunk[tid], global_tid )); - - thunk->th_status = status; - - KMP_MB(); /* flush thunk->th_status before taskq_task enqueued to avoid race condition */ - - /* enqueue taskq_task in thunk into special slot in queue */ - /* GEH - probably don't need to lock taskq slot since only one */ - /* thread enqueues & already a lock set at dequeue point */ - - queue->tq_taskq_slot = thunk; - - KE_TRACE( 10, ("__kmpc_taskq_task return (%d)\n", global_tid)); -} - -/* ends a taskq_task; done generating tasks */ - -void -__kmpc_end_taskq_task(ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *thunk) -{ - kmp_taskq_t *tq; - kmpc_task_queue_t *queue; - int in_parallel; - int tid; - - KE_TRACE( 10, ("__kmpc_end_taskq_task called (%d)\n", global_tid)); - - tq = &__kmp_threads[global_tid] -> th.th_team -> t.t_taskq; - queue = thunk->th.th_shareds->sv_queue; - in_parallel = (queue->tq_flags & TQF_PARALLEL_CONTEXT); - tid = __kmp_tid_from_gtid( global_tid ); - - if ( __kmp_env_consistency_check ) - __kmp_pop_workshare( global_tid, ct_taskq, loc ); - - if (in_parallel) { -#if KMP_ARCH_X86 || \ - KMP_ARCH_X86_64 - - KMP_TEST_THEN_OR32( &queue->tq_flags, (kmp_int32) TQF_ALL_TASKS_QUEUED ); -#else - { - __kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work fine without this call for digital/alpha, needed for IBM/RS6000 */ - - queue->tq_flags |= TQF_ALL_TASKS_QUEUED; - - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - } -#endif - } - - if (thunk->th_flags & TQF_IS_LASTPRIVATE) { - /* Normally, __kmp_find_task_in_queue() refuses to schedule the last task in the */ - /* queue if TQF_IS_LASTPRIVATE so we can positively identify that last task */ - /* and run it with its TQF_IS_LAST_TASK bit turned on in th_flags. When */ - /* __kmpc_end_taskq_task() is called we are done generating all the tasks, so */ - /* we know the last one in the queue is the lastprivate task. Mark the queue */ - /* as having gotten to this state via tq_flags & TQF_IS_LAST_TASK; when that */ - /* task actually executes mark it via th_flags & TQF_IS_LAST_TASK (this th_flags */ - /* bit signals the instrumented code to do copy-outs after execution). */ - - if (! in_parallel) { - /* No synchronization needed for serial context */ - queue->tq_flags |= TQF_IS_LAST_TASK; - } - else { -#if KMP_ARCH_X86 || \ - KMP_ARCH_X86_64 - - KMP_TEST_THEN_OR32( &queue->tq_flags, (kmp_int32) TQF_IS_LAST_TASK ); -#else - { - __kmp_acquire_lock(& queue->tq_queue_lck, global_tid); - - KMP_MB(); /* make sure data structures are in consistent state before querying them */ - /* Seems to work without this call for digital/alpha, needed for IBM/RS6000 */ - - queue->tq_flags |= TQF_IS_LAST_TASK; - - __kmp_release_lock(& queue->tq_queue_lck, global_tid); - } -#endif - /* to prevent race condition where last task is dequeued but */ - /* flag isn't visible yet (not sure about this) */ - KMP_MB(); - } - } - - /* dequeue taskq thunk from curr_thunk stack */ - if (in_parallel) { - tq->tq_curr_thunk[tid] = thunk->th_encl_thunk; - thunk->th_encl_thunk = NULL; - - KF_DUMP( 200, __kmp_dump_thunk_stack( tq->tq_curr_thunk[tid], global_tid )); - } - - KE_TRACE( 10, ("__kmpc_end_taskq_task return (%d)\n", global_tid)); -} - -/* returns thunk for a regular task based on taskq_thunk */ -/* (__kmpc_taskq_task does the analogous thing for a TQF_TASKQ_TASK) */ - -kmpc_thunk_t * -__kmpc_task_buffer(ident_t *loc, kmp_int32 global_tid, kmpc_thunk_t *taskq_thunk, kmpc_task_t task) -{ - kmp_taskq_t *tq; - kmpc_task_queue_t *queue; - kmpc_thunk_t *new_thunk; - int in_parallel; - - KE_TRACE( 10, ("__kmpc_task_buffer called (%d)\n", global_tid)); - - KMP_DEBUG_ASSERT (taskq_thunk->th_flags & TQF_TASKQ_TASK); /* taskq_thunk->th_task is the taskq_task */ - - tq = &__kmp_threads[global_tid] -> th.th_team -> t.t_taskq; - queue = taskq_thunk->th.th_shareds->sv_queue; - in_parallel = (queue->tq_flags & TQF_PARALLEL_CONTEXT); - - /* The instrumentation sequence is: __kmpc_task_buffer(), initialize private */ - /* variables, __kmpc_task(). The __kmpc_task_buffer routine checks that the */ - /* task queue is not full and allocates a thunk (which is then passed to */ - /* __kmpc_task()). So, we can pre-allocate a thunk here assuming it will be */ - /* the next to be enqueued in __kmpc_task(). */ - - new_thunk = __kmp_alloc_thunk (queue, in_parallel, global_tid); - new_thunk->th.th_shareds = (kmpc_shared_vars_t *) queue->tq_shareds[0].ai_data; - new_thunk->th_encl_thunk = NULL; - new_thunk->th_task = task; - - /* GEH - shouldn't need to lock the read of tq_flags here */ - new_thunk->th_flags = queue->tq_flags & TQF_INTERFACE_FLAGS; - - new_thunk->th_status = 0; - - KMP_DEBUG_ASSERT (!(new_thunk->th_flags & TQF_TASKQ_TASK)); - - KF_TRACE(100, ("Creating Regular Task on (%d):\n", global_tid)); - KF_DUMP(100, __kmp_dump_thunk( tq, new_thunk, global_tid )); - - KE_TRACE( 10, ("__kmpc_task_buffer return (%d)\n", global_tid)); - - return new_thunk; -} - -/* --------------------------------------------------------------------------- */ Index: runtime/src/kmp_taskq.cpp =================================================================== --- runtime/src/kmp_taskq.cpp +++ runtime/src/kmp_taskq.cpp @@ -1,5 +1,5 @@ /* - * kmp_taskq.c -- TASKQ support for OpenMP. + * kmp_taskq.cpp -- TASKQ support for OpenMP. */ Index: runtime/src/kmp_threadprivate.c =================================================================== --- runtime/src/kmp_threadprivate.c +++ runtime/src/kmp_threadprivate.c @@ -1,733 +0,0 @@ -/* - * kmp_threadprivate.c -- OpenMP threadprivate support library - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_itt.h" -#include "kmp_i18n.h" - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#define USE_CHECKS_COMMON - -#define KMP_INLINE_SUBR 1 - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size ); -struct private_common * -kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size ); - -struct shared_table __kmp_threadprivate_d_table; - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -static -#ifdef KMP_INLINE_SUBR -__forceinline -#endif -struct private_common * -__kmp_threadprivate_find_task_common( struct common_table *tbl, int gtid, void *pc_addr ) - -{ - struct private_common *tn; - -#ifdef KMP_TASK_COMMON_DEBUG - KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n", - gtid, pc_addr ) ); - dump_list(); -#endif - - for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) { - if (tn->gbl_addr == pc_addr) { -#ifdef KMP_TASK_COMMON_DEBUG - KC_TRACE( 10, ( "__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n", - gtid, pc_addr ) ); -#endif - return tn; - } - } - return 0; -} - -static -#ifdef KMP_INLINE_SUBR -__forceinline -#endif -struct shared_common * -__kmp_find_shared_task_common( struct shared_table *tbl, int gtid, void *pc_addr ) -{ - struct shared_common *tn; - - for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) { - if (tn->gbl_addr == pc_addr) { -#ifdef KMP_TASK_COMMON_DEBUG - KC_TRACE( 10, ( "__kmp_find_shared_task_common: thread#%d, found node %p on list\n", - gtid, pc_addr ) ); -#endif - return tn; - } - } - return 0; -} - - -/* - * Create a template for the data initialized storage. - * Either the template is NULL indicating zero fill, - * or the template is a copy of the original data. - */ - -static struct private_data * -__kmp_init_common_data( void *pc_addr, size_t pc_size ) -{ - struct private_data *d; - size_t i; - char *p; - - d = (struct private_data *) __kmp_allocate( sizeof( struct private_data ) ); -/* - d->data = 0; // AC: commented out because __kmp_allocate zeroes the memory - d->next = 0; -*/ - d->size = pc_size; - d->more = 1; - - p = (char*)pc_addr; - - for (i = pc_size; i > 0; --i) { - if (*p++ != '\0') { - d->data = __kmp_allocate( pc_size ); - KMP_MEMCPY( d->data, pc_addr, pc_size ); - break; - } - } - - return d; -} - -/* - * Initialize the data area from the template. - */ - -static void -__kmp_copy_common_data( void *pc_addr, struct private_data *d ) -{ - char *addr = (char *) pc_addr; - int i, offset; - - for (offset = 0; d != 0; d = d->next) { - for (i = d->more; i > 0; --i) { - if (d->data == 0) - memset( & addr[ offset ], '\0', d->size ); - else - KMP_MEMCPY( & addr[ offset ], d->data, d->size ); - offset += d->size; - } - } -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* we are called from __kmp_serial_initialize() with __kmp_initz_lock held. */ -void -__kmp_common_initialize( void ) -{ - if( ! TCR_4(__kmp_init_common) ) { - int q; -#ifdef KMP_DEBUG - int gtid; -#endif - - __kmp_threadpriv_cache_list = NULL; - -#ifdef KMP_DEBUG - /* verify the uber masters were initialized */ - for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ ) - if( __kmp_root[gtid] ) { - KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread ); - for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q) - KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] ); -/* __kmp_root[ gitd ]-> r.r_uber_thread -> th.th_pri_common -> data[ q ] = 0;*/ - } -#endif /* KMP_DEBUG */ - - for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) - __kmp_threadprivate_d_table.data[ q ] = 0; - - TCW_4(__kmp_init_common, TRUE); - } -} - -/* Call all destructors for threadprivate data belonging to all threads. - Currently unused! */ -void -__kmp_common_destroy( void ) -{ - if( TCR_4(__kmp_init_common) ) { - int q; - - TCW_4(__kmp_init_common, FALSE); - - for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { - int gtid; - struct private_common *tn; - struct shared_common *d_tn; - - /* C++ destructors need to be called once per thread before exiting */ - /* don't call destructors for master thread though unless we used copy constructor */ - - for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) { - if (d_tn->is_vec) { - if (d_tn->dt.dtorv != 0) { - for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { - if( __kmp_threads[gtid] ) { - if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : - (! KMP_UBER_GTID (gtid)) ) { - tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common, - gtid, d_tn->gbl_addr ); - if (tn) { - (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len); - } - } - } - } - if (d_tn->obj_init != 0) { - (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len); - } - } - } else { - if (d_tn->dt.dtor != 0) { - for (gtid = 0; gtid < __kmp_all_nth; ++gtid) { - if( __kmp_threads[gtid] ) { - if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : - (! KMP_UBER_GTID (gtid)) ) { - tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common, - gtid, d_tn->gbl_addr ); - if (tn) { - (*d_tn->dt.dtor) (tn->par_addr); - } - } - } - } - if (d_tn->obj_init != 0) { - (*d_tn->dt.dtor) (d_tn->obj_init); - } - } - } - } - __kmp_threadprivate_d_table.data[ q ] = 0; - } - } -} - -/* Call all destructors for threadprivate data belonging to this thread */ -void -__kmp_common_destroy_gtid( int gtid ) -{ - struct private_common *tn; - struct shared_common *d_tn; - - KC_TRACE( 10, ("__kmp_common_destroy_gtid: T#%d called\n", gtid ) ); - if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) : - (! KMP_UBER_GTID (gtid)) ) { - - if( TCR_4(__kmp_init_common) ) { - - /* Cannot do this here since not all threads have destroyed their data */ - /* TCW_4(__kmp_init_common, FALSE); */ - - for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) { - - d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, - gtid, tn->gbl_addr ); - - KMP_DEBUG_ASSERT( d_tn ); - - if (d_tn->is_vec) { - if (d_tn->dt.dtorv != 0) { - (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len); - } - if (d_tn->obj_init != 0) { - (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len); - } - } else { - if (d_tn->dt.dtor != 0) { - (void) (*d_tn->dt.dtor) (tn->par_addr); - } - if (d_tn->obj_init != 0) { - (void) (*d_tn->dt.dtor) (d_tn->obj_init); - } - } - } - KC_TRACE( 30, ("__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n", - gtid ) ); - } - } -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#ifdef KMP_TASK_COMMON_DEBUG -static void -dump_list( void ) -{ - int p, q; - - for (p = 0; p < __kmp_all_nth; ++p) { - if( !__kmp_threads[p] ) continue; - for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) { - if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) { - struct private_common *tn; - - KC_TRACE( 10, ( "\tdump_list: gtid:%d addresses\n", p ) ); - - for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) { - KC_TRACE( 10, ( "\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n", - tn->gbl_addr, tn->par_addr ) ); - } - } - } - } -} -#endif /* KMP_TASK_COMMON_DEBUG */ - - -/* - * NOTE: this routine is to be called only from the serial part of the program. - */ - -void -kmp_threadprivate_insert_private_data( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) -{ - struct shared_common **lnk_tn, *d_tn; - KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] && - __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 ); - - d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, - gtid, pc_addr ); - - if (d_tn == 0) { - d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); - - d_tn->gbl_addr = pc_addr; - d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); -/* - d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory - d_tn->ct.ctor = 0; - d_tn->cct.cctor = 0;; - d_tn->dt.dtor = 0; - d_tn->is_vec = FALSE; - d_tn->vec_len = 0L; -*/ - d_tn->cmn_size = pc_size; - - __kmp_acquire_lock( &__kmp_global_lock, gtid ); - - lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); - - d_tn->next = *lnk_tn; - *lnk_tn = d_tn; - - __kmp_release_lock( &__kmp_global_lock, gtid ); - } -} - -struct private_common * -kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_size ) -{ - struct private_common *tn, **tt; - struct shared_common *d_tn; - - /* +++++++++ START OF CRITICAL SECTION +++++++++ */ - - __kmp_acquire_lock( & __kmp_global_lock, gtid ); - - tn = (struct private_common *) __kmp_allocate( sizeof (struct private_common) ); - - tn->gbl_addr = pc_addr; - - d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, - gtid, pc_addr ); /* Only the MASTER data table exists. */ - - if (d_tn != 0) { - /* This threadprivate variable has already been seen. */ - - if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) { - d_tn->cmn_size = pc_size; - - if (d_tn->is_vec) { - if (d_tn->ct.ctorv != 0) { - /* Construct from scratch so no prototype exists */ - d_tn->obj_init = 0; - } - else if (d_tn->cct.cctorv != 0) { - /* Now data initialize the prototype since it was previously registered */ - d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); - (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len); - } - else { - d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); - } - } else { - if (d_tn->ct.ctor != 0) { - /* Construct from scratch so no prototype exists */ - d_tn->obj_init = 0; - } - else if (d_tn->cct.cctor != 0) { - /* Now data initialize the prototype since it was previously registered */ - d_tn->obj_init = (void *) __kmp_allocate( d_tn->cmn_size ); - (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr); - } - else { - d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size ); - } - } - } - } - else { - struct shared_common **lnk_tn; - - d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); - d_tn->gbl_addr = pc_addr; - d_tn->cmn_size = pc_size; - d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size ); -/* - d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory - d_tn->ct.ctor = 0; - d_tn->cct.cctor = 0; - d_tn->dt.dtor = 0; - d_tn->is_vec = FALSE; - d_tn->vec_len = 0L; -*/ - lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]); - - d_tn->next = *lnk_tn; - *lnk_tn = d_tn; - } - - tn->cmn_size = d_tn->cmn_size; - - if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) { - tn->par_addr = (void *) pc_addr; - } - else { - tn->par_addr = (void *) __kmp_allocate( tn->cmn_size ); - } - - __kmp_release_lock( & __kmp_global_lock, gtid ); - - /* +++++++++ END OF CRITICAL SECTION +++++++++ */ - -#ifdef USE_CHECKS_COMMON - if (pc_size > d_tn->cmn_size) { - KC_TRACE( 10, ( "__kmp_threadprivate_insert: THREADPRIVATE: %p (%" - KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n", - pc_addr, pc_size, d_tn->cmn_size ) ); - KMP_FATAL( TPCommonBlocksInconsist ); - } -#endif /* USE_CHECKS_COMMON */ - - tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]); - -#ifdef KMP_TASK_COMMON_DEBUG - if (*tt != 0) { - KC_TRACE( 10, ( "__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n", - gtid, pc_addr ) ); - } -#endif - tn->next = *tt; - *tt = tn; - -#ifdef KMP_TASK_COMMON_DEBUG - KC_TRACE( 10, ( "__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n", - gtid, pc_addr ) ); - dump_list( ); -#endif - - /* Link the node into a simple list */ - - tn->link = __kmp_threads[ gtid ]->th.th_pri_head; - __kmp_threads[ gtid ]->th.th_pri_head = tn; - -#ifdef BUILD_TV - __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr ); -#endif - - if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) - return tn; - - /* - * if C++ object with copy constructor, use it; - * else if C++ object with constructor, use it for the non-master copies only; - * else use pod_init and memcpy - * - * C++ constructors need to be called once for each non-master thread on allocate - * C++ copy constructors need to be called once for each thread on allocate - */ - - /* - * C++ object with constructors/destructors; - * don't call constructors for master thread though - */ - if (d_tn->is_vec) { - if ( d_tn->ct.ctorv != 0) { - (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len); - } else if (d_tn->cct.cctorv != 0) { - (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len); - } else if (tn->par_addr != tn->gbl_addr) { - __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); - } - } else { - if ( d_tn->ct.ctor != 0 ) { - (void) (*d_tn->ct.ctor) (tn->par_addr); - } else if (d_tn->cct.cctor != 0) { - (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init); - } else if (tn->par_addr != tn->gbl_addr) { - __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); - } - } -/* !BUILD_OPENMP_C - if (tn->par_addr != tn->gbl_addr) - __kmp_copy_common_data( tn->par_addr, d_tn->pod_init ); */ - - return tn; -} - -/* ------------------------------------------------------------------------ */ -/* We are currently parallel, and we know the thread id. */ -/* ------------------------------------------------------------------------ */ - -/*! - @ingroup THREADPRIVATE - - @param loc source location information - @param data pointer to data being privatized - @param ctor pointer to constructor function for data - @param cctor pointer to copy constructor function for data - @param dtor pointer to destructor function for data - - Register constructors and destructors for thread private data. - This function is called when executing in parallel, when we know the thread id. -*/ -void -__kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor) -{ - struct shared_common *d_tn, **lnk_tn; - - KC_TRACE( 10, ("__kmpc_threadprivate_register: called\n" ) ); - -#ifdef USE_CHECKS_COMMON - /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ - KMP_ASSERT( cctor == 0); -#endif /* USE_CHECKS_COMMON */ - - /* Only the global data table exists. */ - d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data ); - - if (d_tn == 0) { - d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); - d_tn->gbl_addr = data; - - d_tn->ct.ctor = ctor; - d_tn->cct.cctor = cctor; - d_tn->dt.dtor = dtor; -/* - d_tn->is_vec = FALSE; // AC: commented out because __kmp_allocate zeroes the memory - d_tn->vec_len = 0L; - d_tn->obj_init = 0; - d_tn->pod_init = 0; -*/ - lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); - - d_tn->next = *lnk_tn; - *lnk_tn = d_tn; - } -} - -void * -__kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size) -{ - void *ret; - struct private_common *tn; - - KC_TRACE( 10, ("__kmpc_threadprivate: T#%d called\n", global_tid ) ); - -#ifdef USE_CHECKS_COMMON - if (! __kmp_init_serial) - KMP_FATAL( RTLNotInitialized ); -#endif /* USE_CHECKS_COMMON */ - - if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) { - /* The parallel address will NEVER overlap with the data_address */ - /* dkp: 3rd arg to kmp_threadprivate_insert_private_data() is the data_address; use data_address = data */ - - KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) ); - kmp_threadprivate_insert_private_data( global_tid, data, data, size ); - - ret = data; - } - else { - KC_TRACE( 50, ("__kmpc_threadprivate: T#%d try to find private data at address %p\n", - global_tid, data ) ); - tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data ); - - if ( tn ) { - KC_TRACE( 20, ("__kmpc_threadprivate: T#%d found data\n", global_tid ) ); -#ifdef USE_CHECKS_COMMON - if ((size_t) size > tn->cmn_size) { - KC_TRACE( 10, ( "THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC " ,%" KMP_UINTPTR_SPEC ")\n", - data, size, tn->cmn_size ) ); - KMP_FATAL( TPCommonBlocksInconsist ); - } -#endif /* USE_CHECKS_COMMON */ - } - else { - /* The parallel address will NEVER overlap with the data_address */ - /* dkp: 3rd arg to kmp_threadprivate_insert() is the data_address; use data_address = data */ - KC_TRACE( 20, ("__kmpc_threadprivate: T#%d inserting data\n", global_tid ) ); - tn = kmp_threadprivate_insert( global_tid, data, data, size ); - } - - ret = tn->par_addr; - } - KC_TRACE( 10, ("__kmpc_threadprivate: T#%d exiting; return value = %p\n", - global_tid, ret ) ); - - return ret; -} - -/*! - @ingroup THREADPRIVATE - @param loc source location information - @param global_tid global thread number - @param data pointer to data to privatize - @param size size of data to privatize - @param cache pointer to cache - @return pointer to private storage - - Allocate private storage for threadprivate data. -*/ -void * -__kmpc_threadprivate_cached( - ident_t * loc, - kmp_int32 global_tid, // gtid. - void * data, // Pointer to original global variable. - size_t size, // Size of original global variable. - void *** cache -) { - KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %" - KMP_SIZE_T_SPEC "\n", - global_tid, *cache, data, size ) ); - - if ( TCR_PTR(*cache) == 0) { - __kmp_acquire_lock( & __kmp_global_lock, global_tid ); - - if ( TCR_PTR(*cache) == 0) { - __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock); - __kmp_tp_cached = 1; - __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock); - void ** my_cache; - KMP_ITT_IGNORE( - my_cache = (void**) - __kmp_allocate(sizeof( void * ) * __kmp_tp_capacity + sizeof ( kmp_cached_addr_t )); - ); - // No need to zero the allocated memory; __kmp_allocate does that. - KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n", - global_tid, my_cache ) ); - - /* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */ - /* Add address of mycache to linked list for cleanup later */ - kmp_cached_addr_t *tp_cache_addr; - - tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity]; - tp_cache_addr -> addr = my_cache; - tp_cache_addr -> next = __kmp_threadpriv_cache_list; - __kmp_threadpriv_cache_list = tp_cache_addr; - - KMP_MB(); - - TCW_PTR( *cache, my_cache); - - KMP_MB(); - } - - __kmp_release_lock( & __kmp_global_lock, global_tid ); - } - - void *ret; - if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) { - ret = __kmpc_threadprivate( loc, global_tid, data, (size_t) size); - - TCW_PTR( (*cache)[ global_tid ], ret); - } - KC_TRACE( 10, ("__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n", - global_tid, ret ) ); - - return ret; -} - -/*! - @ingroup THREADPRIVATE - @param loc source location information - @param data pointer to data being privatized - @param ctor pointer to constructor function for data - @param cctor pointer to copy constructor function for data - @param dtor pointer to destructor function for data - @param vector_length length of the vector (bytes or elements?) - Register vector constructors and destructors for thread private data. -*/ -void -__kmpc_threadprivate_register_vec( ident_t *loc, void *data, kmpc_ctor_vec ctor, - kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, - size_t vector_length ) -{ - struct shared_common *d_tn, **lnk_tn; - - KC_TRACE( 10, ("__kmpc_threadprivate_register_vec: called\n" ) ); - -#ifdef USE_CHECKS_COMMON - /* copy constructor must be zero for current code gen (Nov 2002 - jph) */ - KMP_ASSERT( cctor == 0); -#endif /* USE_CHECKS_COMMON */ - - d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, - -1, data ); /* Only the global data table exists. */ - - if (d_tn == 0) { - d_tn = (struct shared_common *) __kmp_allocate( sizeof( struct shared_common ) ); - d_tn->gbl_addr = data; - - d_tn->ct.ctorv = ctor; - d_tn->cct.cctorv = cctor; - d_tn->dt.dtorv = dtor; - d_tn->is_vec = TRUE; - d_tn->vec_len = (size_t) vector_length; -/* - d_tn->obj_init = 0; // AC: commented out because __kmp_allocate zeroes the memory - d_tn->pod_init = 0; -*/ - lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]); - - d_tn->next = *lnk_tn; - *lnk_tn = d_tn; - } -} Index: runtime/src/kmp_threadprivate.cpp =================================================================== --- runtime/src/kmp_threadprivate.cpp +++ runtime/src/kmp_threadprivate.cpp @@ -1,5 +1,5 @@ /* - * kmp_threadprivate.c -- OpenMP threadprivate support library + * kmp_threadprivate.cpp -- OpenMP threadprivate support library */ Index: runtime/src/kmp_utility.c =================================================================== --- runtime/src/kmp_utility.c +++ runtime/src/kmp_utility.c @@ -1,429 +0,0 @@ -/* - * kmp_utility.c -- Utility routines for the OpenMP support library. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_wrapper_getpid.h" -#include "kmp_str.h" -#include -#include "kmp_i18n.h" - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -static const char *unknown = "unknown"; - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - -/* NOTE: If called before serial_initialize (i.e. from runtime_initialize), then */ -/* the debugging package has not been initialized yet, and only "0" will print */ -/* debugging output since the environment variables have not been read. */ - -#ifdef KMP_DEBUG -static int trace_level = 5; -#endif - -/* - * LOG_ID_BITS = ( 1 + floor( log_2( max( log_per_phy - 1, 1 )))) - * APIC_ID = (PHY_ID << LOG_ID_BITS) | LOG_ID - * PHY_ID = APIC_ID >> LOG_ID_BITS - */ -int -__kmp_get_physical_id( int log_per_phy, int apic_id ) -{ - int index_lsb, index_msb, temp; - - if (log_per_phy > 1) { - index_lsb = 0; - index_msb = 31; - - temp = log_per_phy; - while ( (temp & 1) == 0 ) { - temp >>= 1; - index_lsb++; - } - - temp = log_per_phy; - while ( (temp & 0x80000000)==0 ) { - temp <<= 1; - index_msb--; - } - - /* If >1 bits were set in log_per_phy, choose next higher power of 2 */ - if (index_lsb != index_msb) index_msb++; - - return ( (int) (apic_id >> index_msb) ); - } - - return apic_id; -} - - -/* - * LOG_ID_BITS = ( 1 + floor( log_2( max( log_per_phy - 1, 1 )))) - * APIC_ID = (PHY_ID << LOG_ID_BITS) | LOG_ID - * LOG_ID = APIC_ID & (( 1 << LOG_ID_BITS ) - 1 ) - */ -int -__kmp_get_logical_id( int log_per_phy, int apic_id ) -{ - unsigned current_bit; - int bits_seen; - - if (log_per_phy <= 1) return ( 0 ); - - bits_seen = 0; - - for (current_bit = 1; log_per_phy != 0; current_bit <<= 1) { - if ( log_per_phy & current_bit ) { - log_per_phy &= ~current_bit; - bits_seen++; - } - } - - /* If exactly 1 bit was set in log_per_phy, choose next lower power of 2 */ - if (bits_seen == 1) { - current_bit >>= 1; - } - - return ( (int) ((current_bit - 1) & apic_id) ); -} - - -static -kmp_uint64 -__kmp_parse_frequency( // R: Frequency in Hz. - char const * frequency // I: Float number and unit: MHz, GHz, or TGz. -) { - - double value = 0.0; - char const * unit = NULL; - kmp_uint64 result = 0; /* Zero is a better unknown value than all ones. */ - - if ( frequency == NULL ) { - return result; - }; // if - value = strtod( frequency, (char * *) & unit ); // strtod() does not like "char const *". - if ( 0 < value && value <= DBL_MAX ) { // Good value (not overflow, underflow, etc). - if ( strcmp( unit, "MHz" ) == 0 ) { - value = value * 1.0E+6; - } else if ( strcmp( unit, "GHz" ) == 0 ) { - value = value * 1.0E+9; - } else if ( strcmp( unit, "THz" ) == 0 ) { - value = value * 1.0E+12; - } else { // Wrong unit. - return result; - }; // if - result = value; - }; // if - return result; - -}; // func __kmp_parse_cpu_frequency - -void -__kmp_query_cpuid( kmp_cpuinfo_t *p ) -{ - struct kmp_cpuid buf; - int max_arg; - int log_per_phy; -#ifdef KMP_DEBUG - int cflush_size; -#endif - - p->initialized = 1; - - p->sse2 = 1; // Assume SSE2 by default. - - __kmp_x86_cpuid( 0, 0, &buf ); - - KA_TRACE( trace_level, ("INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n", - 0, buf.eax, buf.ebx, buf.ecx, buf.edx ) ); - - max_arg = buf.eax; - - p->apic_id = -1; - - if (max_arg >= 1) { - int i; - kmp_uint32 t, data[ 4 ]; - - __kmp_x86_cpuid( 1, 0, &buf ); - KA_TRACE( trace_level, ("INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n", - 1, buf.eax, buf.ebx, buf.ecx, buf.edx ) ); - - { -#define get_value(reg,lo,mask) ( ( ( reg ) >> ( lo ) ) & ( mask ) ) - - p->signature = buf.eax; - p->family = get_value( buf.eax, 20, 0xff ) + get_value( buf.eax, 8, 0x0f ); - p->model = ( get_value( buf.eax, 16, 0x0f ) << 4 ) + get_value( buf.eax, 4, 0x0f ); - p->stepping = get_value( buf.eax, 0, 0x0f ); - -#undef get_value - - KA_TRACE( trace_level, (" family = %d, model = %d, stepping = %d\n", p->family, p->model, p->stepping ) ); - } - - for ( t = buf.ebx, i = 0; i < 4; t >>= 8, ++i ) { - data[ i ] = (t & 0xff); - }; // for - - p->sse2 = ( buf.edx >> 26 ) & 1; - -#ifdef KMP_DEBUG - - if ( (buf.edx >> 4) & 1 ) { - /* TSC - Timestamp Counter Available */ - KA_TRACE( trace_level, (" TSC" ) ); - } - if ( (buf.edx >> 8) & 1 ) { - /* CX8 - CMPXCHG8B Instruction Available */ - KA_TRACE( trace_level, (" CX8" ) ); - } - if ( (buf.edx >> 9) & 1 ) { - /* APIC - Local APIC Present (multi-processor operation support */ - KA_TRACE( trace_level, (" APIC" ) ); - } - if ( (buf.edx >> 15) & 1 ) { - /* CMOV - Conditional MOVe Instruction Available */ - KA_TRACE( trace_level, (" CMOV" ) ); - } - if ( (buf.edx >> 18) & 1 ) { - /* PSN - Processor Serial Number Available */ - KA_TRACE( trace_level, (" PSN" ) ); - } - if ( (buf.edx >> 19) & 1 ) { - /* CLFULSH - Cache Flush Instruction Available */ - cflush_size = data[ 1 ] * 8; /* Bits 15-08: CLFLUSH line size = 8 (64 bytes) */ - KA_TRACE( trace_level, (" CLFLUSH(%db)", cflush_size ) ); - - } - if ( (buf.edx >> 21) & 1 ) { - /* DTES - Debug Trace & EMON Store */ - KA_TRACE( trace_level, (" DTES" ) ); - } - if ( (buf.edx >> 22) & 1 ) { - /* ACPI - ACPI Support Available */ - KA_TRACE( trace_level, (" ACPI" ) ); - } - if ( (buf.edx >> 23) & 1 ) { - /* MMX - Multimedia Extensions */ - KA_TRACE( trace_level, (" MMX" ) ); - } - if ( (buf.edx >> 25) & 1 ) { - /* SSE - SSE Instructions */ - KA_TRACE( trace_level, (" SSE" ) ); - } - if ( (buf.edx >> 26) & 1 ) { - /* SSE2 - SSE2 Instructions */ - KA_TRACE( trace_level, (" SSE2" ) ); - } - if ( (buf.edx >> 27) & 1 ) { - /* SLFSNP - Self-Snooping Cache */ - KA_TRACE( trace_level, (" SLFSNP" ) ); - } -#endif /* KMP_DEBUG */ - - if ( (buf.edx >> 28) & 1 ) { - /* Bits 23-16: Logical Processors per Physical Processor (1 for P4) */ - log_per_phy = data[ 2 ]; - p->apic_id = data[ 3 ]; /* Bits 31-24: Processor Initial APIC ID (X) */ - KA_TRACE( trace_level, (" HT(%d TPUs)", log_per_phy ) ); - - if( log_per_phy > 1 ) { - /* default to 1k FOR JT-enabled processors (4k on OS X*) */ -#if KMP_OS_DARWIN - p->cpu_stackoffset = 4 * 1024; -#else - p->cpu_stackoffset = 1 * 1024; -#endif - } - - p->physical_id = __kmp_get_physical_id( log_per_phy, p->apic_id ); - p->logical_id = __kmp_get_logical_id( log_per_phy, p->apic_id ); - } -#ifdef KMP_DEBUG - if ( (buf.edx >> 29) & 1 ) { - /* ATHROTL - Automatic Throttle Control */ - KA_TRACE( trace_level, (" ATHROTL" ) ); - } - KA_TRACE( trace_level, (" ]\n" ) ); - - for (i = 2; i <= max_arg; ++i) { - __kmp_x86_cpuid( i, 0, &buf ); - KA_TRACE( trace_level, - ( "INFO: CPUID %d: EAX=0x%08X EBX=0x%08X ECX=0x%08X EDX=0x%08X\n", - i, buf.eax, buf.ebx, buf.ecx, buf.edx ) ); - } -#endif -#if KMP_USE_ADAPTIVE_LOCKS - p->rtm = 0; - if (max_arg > 7) - { - /* RTM bit CPUID.07:EBX, bit 11 */ - __kmp_x86_cpuid(7, 0, &buf); - p->rtm = (buf.ebx >> 11) & 1; - KA_TRACE( trace_level, (" RTM" ) ); - } -#endif - }; // if - - { // Parse CPU brand string for frequency, saving the string for later. - int i; - kmp_cpuid_t * base = (kmp_cpuid_t *)&p->name[0]; - - // Get CPU brand string. - for ( i = 0; i < 3; ++ i ) { - __kmp_x86_cpuid( 0x80000002 + i, 0, base+i ); - }; // for - p->name[ sizeof(p->name) - 1 ] = 0; // Just in case. ;-) - KA_TRACE( trace_level, ( "cpu brand string: \"%s\"\n", &p->name[0] ) ); - - // Parse frequency. - p->frequency = __kmp_parse_frequency( strrchr( &p->name[0], ' ' ) ); - KA_TRACE( trace_level, ( "cpu frequency from brand string: %" KMP_UINT64_SPEC "\n", p->frequency ) ); - } -} - -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -/* ------------------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------------------ */ - -void -__kmp_expand_host_name( char *buffer, size_t size ) -{ - KMP_DEBUG_ASSERT(size >= sizeof(unknown)); -#if KMP_OS_WINDOWS - { - DWORD s = size; - - if (! GetComputerNameA( buffer, & s )) - KMP_STRCPY_S( buffer, size, unknown ); - } -#else - buffer[size - 2] = 0; - if (gethostname( buffer, size ) || buffer[size - 2] != 0) - KMP_STRCPY_S( buffer, size, unknown ); -#endif -} - -/* Expand the meta characters in the filename: - * - * Currently defined characters are: - * - * %H the hostname - * %P the number of threads used. - * %I the unique identifier for this run. - */ - -void -__kmp_expand_file_name( char *result, size_t rlen, char *pattern ) -{ - char *pos = result, *end = result + rlen - 1; - char buffer[256]; - int default_cpu_width = 1; - int snp_result; - - KMP_DEBUG_ASSERT(rlen > 0); - *end = 0; - { - int i; - for(i = __kmp_xproc; i >= 10; i /= 10, ++default_cpu_width); - } - - if (pattern != NULL) { - while (*pattern != '\0' && pos < end) { - if (*pattern != '%') { - *pos++ = *pattern++; - } else { - char *old_pattern = pattern; - int width = 1; - int cpu_width = default_cpu_width; - - ++pattern; - - if (*pattern >= '0' && *pattern <= '9') { - width = 0; - do { - width = (width * 10) + *pattern++ - '0'; - } while (*pattern >= '0' && *pattern <= '9'); - if (width < 0 || width > 1024) - width = 1; - - cpu_width = width; - } - - switch (*pattern) { - case 'H': - case 'h': - { - __kmp_expand_host_name( buffer, sizeof( buffer ) ); - KMP_STRNCPY( pos, buffer, end - pos + 1); - if(*end == 0) { - while ( *pos ) - ++pos; - ++pattern; - } else - pos = end; - } - break; - case 'P': - case 'p': - { - snp_result = KMP_SNPRINTF( pos, end - pos + 1, "%0*d", cpu_width, __kmp_dflt_team_nth ); - if(snp_result >= 0 && snp_result <= end - pos) { - while ( *pos ) - ++pos; - ++pattern; - } else - pos = end; - } - break; - case 'I': - case 'i': - { - pid_t id = getpid(); - snp_result = KMP_SNPRINTF( pos, end - pos + 1, "%0*d", width, id ); - if(snp_result >= 0 && snp_result <= end - pos) { - while ( *pos ) - ++pos; - ++pattern; - } else - pos = end; - break; - } - case '%': - { - *pos++ = '%'; - ++pattern; - break; - } - default: - { - *pos++ = '%'; - pattern = old_pattern + 1; - break; - } - } - } - } - /* TODO: How do we get rid of this? */ - if(*pattern != '\0') - KMP_FATAL( FileNameTooLong ); - } - - *pos = '\0'; -} - Index: runtime/src/kmp_utility.cpp =================================================================== --- runtime/src/kmp_utility.cpp +++ runtime/src/kmp_utility.cpp @@ -1,5 +1,5 @@ /* - * kmp_utility.c -- Utility routines for the OpenMP support library. + * kmp_utility.cpp -- Utility routines for the OpenMP support library. */ Index: runtime/src/kmp_version.c =================================================================== --- runtime/src/kmp_version.c +++ runtime/src/kmp_version.c @@ -1,214 +0,0 @@ -/* - * kmp_version.c - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_io.h" -#include "kmp_version.h" - -// Replace with snapshot date YYYYMMDD for promotion build. -#define KMP_VERSION_BUILD 20140926 - -// Helper macros to convert value of macro to string literal. -#define _stringer( x ) #x -#define stringer( x ) _stringer( x ) - -// Detect compiler. -#if KMP_COMPILER_ICC - #if __INTEL_COMPILER == 1010 - #define KMP_COMPILER "Intel C++ Compiler 10.1" - #elif __INTEL_COMPILER == 1100 - #define KMP_COMPILER "Intel C++ Compiler 11.0" - #elif __INTEL_COMPILER == 1110 - #define KMP_COMPILER "Intel C++ Compiler 11.1" - #elif __INTEL_COMPILER == 1200 - #define KMP_COMPILER "Intel C++ Compiler 12.0" - #elif __INTEL_COMPILER == 1210 - #define KMP_COMPILER "Intel C++ Compiler 12.1" - #elif __INTEL_COMPILER == 1300 - #define KMP_COMPILER "Intel C++ Compiler 13.0" - #elif __INTEL_COMPILER == 1310 - #define KMP_COMPILER "Intel C++ Compiler 13.1" - #elif __INTEL_COMPILER == 1400 - #define KMP_COMPILER "Intel C++ Compiler 14.0" - #elif __INTEL_COMPILER == 1410 - #define KMP_COMPILER "Intel C++ Compiler 14.1" - #elif __INTEL_COMPILER == 1500 - #define KMP_COMPILER "Intel C++ Compiler 15.0" - #elif __INTEL_COMPILER == 1600 - #define KMP_COMPILER "Intel C++ Compiler 16.0" - #elif __INTEL_COMPILER == 1700 - #define KMP_COMPILER "Intel C++ Compiler 17.0" - #elif __INTEL_COMPILER == 9998 - #define KMP_COMPILER "Intel C++ Compiler mainline" - #elif __INTEL_COMPILER == 9999 - #define KMP_COMPILER "Intel C++ Compiler mainline" - #endif -#elif KMP_COMPILER_CLANG - #define KMP_COMPILER "Clang " stringer( __clang_major__ ) "." stringer( __clang_minor__ ) -#elif KMP_COMPILER_GCC - #define KMP_COMPILER "GCC " stringer( __GNUC__ ) "." stringer( __GNUC_MINOR__ ) -#elif KMP_COMPILER_MSVC - #define KMP_COMPILER "MSVC " stringer( _MSC_FULL_VER ) -#endif -#ifndef KMP_COMPILER - #warning "Unknown compiler" - #define KMP_COMPILER "unknown compiler" -#endif - -// Detect librray type (perf, stub). -#ifdef KMP_STUB - #define KMP_LIB_TYPE "stub" -#else - #define KMP_LIB_TYPE "performance" -#endif // KMP_LIB_TYPE - -// Detect link type (static, dynamic). -#ifdef KMP_DYNAMIC_LIB - #define KMP_LINK_TYPE "dynamic" -#else - #define KMP_LINK_TYPE "static" -#endif // KMP_LINK_TYPE - -// Finally, define strings. -#define KMP_LIBRARY KMP_LIB_TYPE " library (" KMP_LINK_TYPE ")" -#define KMP_COPYRIGHT "" - -int const __kmp_version_major = KMP_VERSION_MAJOR; -int const __kmp_version_minor = KMP_VERSION_MINOR; -int const __kmp_version_build = KMP_VERSION_BUILD; -int const __kmp_openmp_version = - #if OMP_45_ENABLED - 201511; - #elif OMP_40_ENABLED - 201307; - #else - 201107; - #endif - -/* Do NOT change the format of this string! Intel(R) Thread Profiler checks for a - specific format some changes in the recognition routine there need to - be made before this is changed. -*/ -char const __kmp_copyright[] = - KMP_VERSION_PREFIX KMP_LIBRARY - " ver. " stringer( KMP_VERSION_MAJOR ) "." stringer( KMP_VERSION_MINOR ) - "." stringer( KMP_VERSION_BUILD ) " " - KMP_COPYRIGHT; - -char const __kmp_version_copyright[] = KMP_VERSION_PREFIX KMP_COPYRIGHT; -char const __kmp_version_lib_ver[] = KMP_VERSION_PREFIX "version: " stringer( KMP_VERSION_MAJOR ) "." stringer( KMP_VERSION_MINOR ) "." stringer( KMP_VERSION_BUILD ); -char const __kmp_version_lib_type[] = KMP_VERSION_PREFIX "library type: " KMP_LIB_TYPE; -char const __kmp_version_link_type[] = KMP_VERSION_PREFIX "link type: " KMP_LINK_TYPE; -char const __kmp_version_build_time[] = KMP_VERSION_PREFIX "build time: " "no_timestamp"; -#if KMP_MIC2 - char const __kmp_version_target_env[] = KMP_VERSION_PREFIX "target environment: MIC2"; -#endif -char const __kmp_version_build_compiler[] = KMP_VERSION_PREFIX "build compiler: " KMP_COMPILER; - -// -// Called at serial initialization time. -// -static int __kmp_version_1_printed = FALSE; - -void -__kmp_print_version_1( void ) -{ - if ( __kmp_version_1_printed ) { - return; - }; // if - __kmp_version_1_printed = TRUE; - - #ifndef KMP_STUB - kmp_str_buf_t buffer; - __kmp_str_buf_init( & buffer ); - // Print version strings skipping initial magic. - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_lib_ver[ KMP_VERSION_MAGIC_LEN ] ); - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_lib_type[ KMP_VERSION_MAGIC_LEN ] ); - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_link_type[ KMP_VERSION_MAGIC_LEN ] ); - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_build_time[ KMP_VERSION_MAGIC_LEN ] ); - #if KMP_MIC - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_target_env[ KMP_VERSION_MAGIC_LEN ] ); - #endif - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_build_compiler[ KMP_VERSION_MAGIC_LEN ] ); - #if defined(KMP_GOMP_COMPAT) - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_alt_comp[ KMP_VERSION_MAGIC_LEN ] ); - #endif /* defined(KMP_GOMP_COMPAT) */ - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_omp_api[ KMP_VERSION_MAGIC_LEN ] ); - __kmp_str_buf_print( & buffer, "%sdynamic error checking: %s\n", KMP_VERSION_PREF_STR, ( __kmp_env_consistency_check ? "yes" : "no" ) ); - #ifdef KMP_DEBUG - for ( int i = bs_plain_barrier; i < bs_last_barrier; ++ i ) { - __kmp_str_buf_print( - & buffer, - "%s%s barrier branch bits: gather=%u, release=%u\n", - KMP_VERSION_PREF_STR, - __kmp_barrier_type_name[ i ], - __kmp_barrier_gather_branch_bits[ i ], - __kmp_barrier_release_branch_bits[ i ] - ); // __kmp_str_buf_print - }; // for i - for ( int i = bs_plain_barrier; i < bs_last_barrier; ++ i ) { - __kmp_str_buf_print( - & buffer, - "%s%s barrier pattern: gather=%s, release=%s\n", - KMP_VERSION_PREF_STR, - __kmp_barrier_type_name[ i ], - __kmp_barrier_pattern_name[ __kmp_barrier_gather_pattern[ i ] ], - __kmp_barrier_pattern_name[ __kmp_barrier_release_pattern[ i ] ] - ); // __kmp_str_buf_print - }; // for i - __kmp_str_buf_print( & buffer, "%s\n", & __kmp_version_lock[ KMP_VERSION_MAGIC_LEN ] ); - #endif - __kmp_str_buf_print( - & buffer, - "%sthread affinity support: %s\n", - KMP_VERSION_PREF_STR, - #if KMP_AFFINITY_SUPPORTED - ( - KMP_AFFINITY_CAPABLE() - ? - ( - __kmp_affinity_type == affinity_none - ? - "not used" - : - "yes" - ) - : - "no" - ) - #else - "no" - #endif - ); - __kmp_printf( "%s", buffer.str ); - __kmp_str_buf_free( & buffer ); - K_DIAG( 1, ( "KMP_VERSION is true\n" ) ); - #endif // KMP_STUB -} // __kmp_print_version_1 - -// -// Called at parallel initialization time. -// -static int __kmp_version_2_printed = FALSE; - -void -__kmp_print_version_2( void ) { - if ( __kmp_version_2_printed ) { - return; - }; // if - __kmp_version_2_printed = TRUE; -} // __kmp_print_version_2 - -// end of file // Index: runtime/src/kmp_version.cpp =================================================================== --- runtime/src/kmp_version.cpp +++ runtime/src/kmp_version.cpp @@ -1,5 +1,5 @@ /* - * kmp_version.c + * kmp_version.cpp */ Index: runtime/src/ompt-general.c =================================================================== --- runtime/src/ompt-general.c +++ runtime/src/ompt-general.c @@ -1,535 +0,0 @@ -/***************************************************************************** - * system include files - ****************************************************************************/ - -#include - -#include -#include -#include -#include - - - -/***************************************************************************** - * ompt include files - ****************************************************************************/ - -#include "ompt-specific.c" - - - -/***************************************************************************** - * macros - ****************************************************************************/ - -#define ompt_get_callback_success 1 -#define ompt_get_callback_failure 0 - -#define no_tool_present 0 - -#define OMPT_API_ROUTINE static - -#ifndef OMPT_STR_MATCH -#define OMPT_STR_MATCH(haystack, needle) (!strcasecmp(haystack, needle)) -#endif - - -/***************************************************************************** - * types - ****************************************************************************/ - -typedef struct { - const char *state_name; - ompt_state_t state_id; -} ompt_state_info_t; - - -enum tool_setting_e { - omp_tool_error, - omp_tool_unset, - omp_tool_disabled, - omp_tool_enabled -}; - - -typedef void (*ompt_initialize_t) ( - ompt_function_lookup_t ompt_fn_lookup, - const char *version, - unsigned int ompt_version -); - - - -/***************************************************************************** - * global variables - ****************************************************************************/ - -int ompt_enabled = 0; - -ompt_state_info_t ompt_state_info[] = { -#define ompt_state_macro(state, code) { # state, state }, - FOREACH_OMPT_STATE(ompt_state_macro) -#undef ompt_state_macro -}; - -ompt_callbacks_t ompt_callbacks; - -static ompt_initialize_t ompt_initialize_fn = NULL; - - - -/***************************************************************************** - * forward declarations - ****************************************************************************/ - -static ompt_interface_fn_t ompt_fn_lookup(const char *s); - -OMPT_API_ROUTINE ompt_thread_id_t ompt_get_thread_id(void); - - -/***************************************************************************** - * initialization and finalization (private operations) - ****************************************************************************/ - -/* On Unix-like systems that support weak symbols the following implementation - * of ompt_tool() will be used in case no tool-supplied implementation of - * this function is present in the address space of a process. - * - * On Windows, the ompt_tool_windows function is used to find the - * ompt_tool symbol across all modules loaded by a process. If ompt_tool is - * found, ompt_tool's return value is used to initialize the tool. Otherwise, - * NULL is returned and OMPT won't be enabled */ -#if OMPT_HAVE_WEAK_ATTRIBUTE -_OMP_EXTERN -__attribute__ (( weak )) -ompt_initialize_t ompt_tool() -{ -#if OMPT_DEBUG - printf("ompt_tool() is called from the RTL\n"); -#endif - return NULL; -} - -#elif OMPT_HAVE_PSAPI - -#include -#pragma comment(lib, "psapi.lib") -#define ompt_tool ompt_tool_windows - -// The number of loaded modules to start enumeration with EnumProcessModules() -#define NUM_MODULES 128 - -static -ompt_initialize_t ompt_tool_windows() -{ - int i; - DWORD needed, new_size; - HMODULE *modules; - HANDLE process = GetCurrentProcess(); - modules = (HMODULE*)malloc( NUM_MODULES * sizeof(HMODULE) ); - ompt_initialize_t (*ompt_tool_p)() = NULL; - -#if OMPT_DEBUG - printf("ompt_tool_windows(): looking for ompt_tool\n"); -#endif - if (!EnumProcessModules( process, modules, NUM_MODULES * sizeof(HMODULE), - &needed)) { - // Regardless of the error reason use the stub initialization function - free(modules); - return NULL; - } - // Check if NUM_MODULES is enough to list all modules - new_size = needed / sizeof(HMODULE); - if (new_size > NUM_MODULES) { -#if OMPT_DEBUG - printf("ompt_tool_windows(): resize buffer to %d bytes\n", needed); -#endif - modules = (HMODULE*)realloc( modules, needed ); - // If resizing failed use the stub function. - if (!EnumProcessModules(process, modules, needed, &needed)) { - free(modules); - return NULL; - } - } - for (i = 0; i < new_size; ++i) { - (FARPROC &)ompt_tool_p = GetProcAddress(modules[i], "ompt_tool"); - if (ompt_tool_p) { -#if OMPT_DEBUG - TCHAR modName[MAX_PATH]; - if (GetModuleFileName(modules[i], modName, MAX_PATH)) - printf("ompt_tool_windows(): ompt_tool found in module %s\n", - modName); -#endif - free(modules); - return ompt_tool_p(); - } -#if OMPT_DEBUG - else { - TCHAR modName[MAX_PATH]; - if (GetModuleFileName(modules[i], modName, MAX_PATH)) - printf("ompt_tool_windows(): ompt_tool not found in module %s\n", - modName); - } -#endif - } - free(modules); - return NULL; -} -#else -# error Either __attribute__((weak)) or psapi.dll are required for OMPT support -#endif // OMPT_HAVE_WEAK_ATTRIBUTE - -void ompt_pre_init() -{ - //-------------------------------------------------- - // Execute the pre-initialization logic only once. - //-------------------------------------------------- - static int ompt_pre_initialized = 0; - - if (ompt_pre_initialized) return; - - ompt_pre_initialized = 1; - - //-------------------------------------------------- - // Use a tool iff a tool is enabled and available. - //-------------------------------------------------- - const char *ompt_env_var = getenv("OMP_TOOL"); - tool_setting_e tool_setting = omp_tool_error; - - if (!ompt_env_var || !strcmp(ompt_env_var, "")) - tool_setting = omp_tool_unset; - else if (OMPT_STR_MATCH(ompt_env_var, "disabled")) - tool_setting = omp_tool_disabled; - else if (OMPT_STR_MATCH(ompt_env_var, "enabled")) - tool_setting = omp_tool_enabled; - -#if OMPT_DEBUG - printf("ompt_pre_init(): tool_setting = %d\n", tool_setting); -#endif - switch(tool_setting) { - case omp_tool_disabled: - break; - - case omp_tool_unset: - case omp_tool_enabled: - ompt_initialize_fn = ompt_tool(); - if (ompt_initialize_fn) { - ompt_enabled = 1; - } - break; - - case omp_tool_error: - fprintf(stderr, - "Warning: OMP_TOOL has invalid value \"%s\".\n" - " legal values are (NULL,\"\",\"disabled\"," - "\"enabled\").\n", ompt_env_var); - break; - } -#if OMPT_DEBUG - printf("ompt_pre_init(): ompt_enabled = %d\n", ompt_enabled); -#endif -} - - -void ompt_post_init() -{ - //-------------------------------------------------- - // Execute the post-initialization logic only once. - //-------------------------------------------------- - static int ompt_post_initialized = 0; - - if (ompt_post_initialized) return; - - ompt_post_initialized = 1; - - //-------------------------------------------------- - // Initialize the tool if so indicated. - //-------------------------------------------------- - if (ompt_enabled) { - ompt_initialize_fn(ompt_fn_lookup, ompt_get_runtime_version(), - OMPT_VERSION); - - ompt_thread_t *root_thread = ompt_get_thread(); - - ompt_set_thread_state(root_thread, ompt_state_overhead); - - if (ompt_callbacks.ompt_callback(ompt_event_thread_begin)) { - ompt_callbacks.ompt_callback(ompt_event_thread_begin) - (ompt_thread_initial, ompt_get_thread_id()); - } - - ompt_set_thread_state(root_thread, ompt_state_work_serial); - } -} - - -void ompt_fini() -{ - if (ompt_enabled) { - if (ompt_callbacks.ompt_callback(ompt_event_runtime_shutdown)) { - ompt_callbacks.ompt_callback(ompt_event_runtime_shutdown)(); - } - } - - ompt_enabled = 0; -} - - -/***************************************************************************** - * interface operations - ****************************************************************************/ - -/***************************************************************************** - * state - ****************************************************************************/ - -OMPT_API_ROUTINE int ompt_enumerate_state(int current_state, int *next_state, - const char **next_state_name) -{ - const static int len = sizeof(ompt_state_info) / sizeof(ompt_state_info_t); - int i = 0; - - for (i = 0; i < len - 1; i++) { - if (ompt_state_info[i].state_id == current_state) { - *next_state = ompt_state_info[i+1].state_id; - *next_state_name = ompt_state_info[i+1].state_name; - return 1; - } - } - - return 0; -} - - - -/***************************************************************************** - * callbacks - ****************************************************************************/ - -OMPT_API_ROUTINE int ompt_set_callback(ompt_event_t evid, ompt_callback_t cb) -{ - switch (evid) { - -#define ompt_event_macro(event_name, callback_type, event_id) \ - case event_name: \ - if (ompt_event_implementation_status(event_name)) { \ - ompt_callbacks.ompt_callback(event_name) = (callback_type) cb; \ - } \ - return ompt_event_implementation_status(event_name); - - FOREACH_OMPT_EVENT(ompt_event_macro) - -#undef ompt_event_macro - - default: return ompt_set_result_registration_error; - } -} - - -OMPT_API_ROUTINE int ompt_get_callback(ompt_event_t evid, ompt_callback_t *cb) -{ - switch (evid) { - -#define ompt_event_macro(event_name, callback_type, event_id) \ - case event_name: \ - if (ompt_event_implementation_status(event_name)) { \ - ompt_callback_t mycb = \ - (ompt_callback_t) ompt_callbacks.ompt_callback(event_name); \ - if (mycb) { \ - *cb = mycb; \ - return ompt_get_callback_success; \ - } \ - } \ - return ompt_get_callback_failure; - - FOREACH_OMPT_EVENT(ompt_event_macro) - -#undef ompt_event_macro - - default: return ompt_get_callback_failure; - } -} - - -/***************************************************************************** - * parallel regions - ****************************************************************************/ - -OMPT_API_ROUTINE ompt_parallel_id_t ompt_get_parallel_id(int ancestor_level) -{ - return __ompt_get_parallel_id_internal(ancestor_level); -} - - -OMPT_API_ROUTINE int ompt_get_parallel_team_size(int ancestor_level) -{ - return __ompt_get_parallel_team_size_internal(ancestor_level); -} - - -OMPT_API_ROUTINE void *ompt_get_parallel_function(int ancestor_level) -{ - return __ompt_get_parallel_function_internal(ancestor_level); -} - - -OMPT_API_ROUTINE ompt_state_t ompt_get_state(ompt_wait_id_t *ompt_wait_id) -{ - ompt_state_t thread_state = __ompt_get_state_internal(ompt_wait_id); - - if (thread_state == ompt_state_undefined) { - thread_state = ompt_state_work_serial; - } - - return thread_state; -} - - - -/***************************************************************************** - * threads - ****************************************************************************/ - - -OMPT_API_ROUTINE void *ompt_get_idle_frame() -{ - return __ompt_get_idle_frame_internal(); -} - - - -/***************************************************************************** - * tasks - ****************************************************************************/ - - -OMPT_API_ROUTINE ompt_thread_id_t ompt_get_thread_id(void) -{ - return __ompt_get_thread_id_internal(); -} - -OMPT_API_ROUTINE ompt_task_id_t ompt_get_task_id(int depth) -{ - return __ompt_get_task_id_internal(depth); -} - - -OMPT_API_ROUTINE ompt_frame_t *ompt_get_task_frame(int depth) -{ - return __ompt_get_task_frame_internal(depth); -} - - -OMPT_API_ROUTINE void *ompt_get_task_function(int depth) -{ - return __ompt_get_task_function_internal(depth); -} - - -/***************************************************************************** - * placeholders - ****************************************************************************/ - -// Don't define this as static. The loader may choose to eliminate the symbol -// even though it is needed by tools. -#define OMPT_API_PLACEHOLDER - -// Ensure that placeholders don't have mangled names in the symbol table. -#ifdef __cplusplus -extern "C" { -#endif - - -OMPT_API_PLACEHOLDER void ompt_idle(void) -{ - // This function is a placeholder used to represent the calling context of - // idle OpenMP worker threads. It is not meant to be invoked. - assert(0); -} - - -OMPT_API_PLACEHOLDER void ompt_overhead(void) -{ - // This function is a placeholder used to represent the OpenMP context of - // threads working in the OpenMP runtime. It is not meant to be invoked. - assert(0); -} - - -OMPT_API_PLACEHOLDER void ompt_barrier_wait(void) -{ - // This function is a placeholder used to represent the OpenMP context of - // threads waiting for a barrier in the OpenMP runtime. It is not meant - // to be invoked. - assert(0); -} - - -OMPT_API_PLACEHOLDER void ompt_task_wait(void) -{ - // This function is a placeholder used to represent the OpenMP context of - // threads waiting for a task in the OpenMP runtime. It is not meant - // to be invoked. - assert(0); -} - - -OMPT_API_PLACEHOLDER void ompt_mutex_wait(void) -{ - // This function is a placeholder used to represent the OpenMP context of - // threads waiting for a mutex in the OpenMP runtime. It is not meant - // to be invoked. - assert(0); -} - -#ifdef __cplusplus -}; -#endif - - -/***************************************************************************** - * compatability - ****************************************************************************/ - -OMPT_API_ROUTINE int ompt_get_ompt_version() -{ - return OMPT_VERSION; -} - - - -/***************************************************************************** - * application-facing API - ****************************************************************************/ - - -/*---------------------------------------------------------------------------- - | control - ---------------------------------------------------------------------------*/ - -_OMP_EXTERN void ompt_control(uint64_t command, uint64_t modifier) -{ - if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_control)) { - ompt_callbacks.ompt_callback(ompt_event_control)(command, modifier); - } -} - - - -/***************************************************************************** - * API inquiry for tool - ****************************************************************************/ - -static ompt_interface_fn_t ompt_fn_lookup(const char *s) -{ - -#define ompt_interface_fn(fn) \ - if (strcmp(s, #fn) == 0) return (ompt_interface_fn_t) fn; - - FOREACH_OMPT_INQUIRY_FN(ompt_interface_fn) - - FOREACH_OMPT_PLACEHOLDER_FN(ompt_interface_fn) - - return (ompt_interface_fn_t) 0; -} Index: runtime/src/ompt-general.cpp =================================================================== --- runtime/src/ompt-general.cpp +++ runtime/src/ompt-general.cpp @@ -15,7 +15,7 @@ * ompt include files ****************************************************************************/ -#include "ompt-specific.c" +#include "ompt-specific.cpp" Index: runtime/src/ompt-specific.c =================================================================== --- runtime/src/ompt-specific.c +++ runtime/src/ompt-specific.c @@ -1,337 +0,0 @@ -//****************************************************************************** -// include files -//****************************************************************************** - -#include "kmp.h" -#include "ompt-internal.h" -#include "ompt-specific.h" - -//****************************************************************************** -// macros -//****************************************************************************** - -#define GTID_TO_OMPT_THREAD_ID(id) ((ompt_thread_id_t) (id >=0) ? id + 1: 0) - -#define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info; - -#define OMPT_THREAD_ID_BITS 16 - -// 2013 08 24 - John Mellor-Crummey -// ideally, a thread should assign its own ids based on thread private data. -// however, the way the intel runtime reinitializes thread data structures -// when it creates teams makes it difficult to maintain persistent thread -// data. using a shared variable instead is simple. I leave it to intel to -// sort out how to implement a higher performance version in their runtime. - -// when using fetch_and_add to generate the IDs, there isn't any reason to waste -// bits for thread id. -#if 0 -#define NEXT_ID(id_ptr,tid) \ - ((KMP_TEST_THEN_INC64(id_ptr) << OMPT_THREAD_ID_BITS) | (tid)) -#else -#define NEXT_ID(id_ptr,tid) (KMP_TEST_THEN_INC64((volatile kmp_int64 *)id_ptr)) -#endif - -//****************************************************************************** -// private operations -//****************************************************************************** - -//---------------------------------------------------------- -// traverse the team and task hierarchy -// note: __ompt_get_teaminfo and __ompt_get_taskinfo -// traverse the hierarchy similarly and need to be -// kept consistent -//---------------------------------------------------------- - -ompt_team_info_t * -__ompt_get_teaminfo(int depth, int *size) -{ - kmp_info_t *thr = ompt_get_thread(); - - if (thr) { - kmp_team *team = thr->th.th_team; - if (team == NULL) return NULL; - - ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(team); - - while(depth > 0) { - // next lightweight team (if any) - if (lwt) lwt = lwt->parent; - - // next heavyweight team (if any) after - // lightweight teams are exhausted - if (!lwt && team) { - team=team->t.t_parent; - if (team) { - lwt = LWT_FROM_TEAM(team); - } - } - - depth--; - } - - if (lwt) { - // lightweight teams have one task - if (size) *size = 1; - - // return team info for lightweight team - return &lwt->ompt_team_info; - } else if (team) { - // extract size from heavyweight team - if (size) *size = team->t.t_nproc; - - // return team info for heavyweight team - return &team->t.ompt_team_info; - } - } - - return NULL; -} - - -ompt_task_info_t * -__ompt_get_taskinfo(int depth) -{ - ompt_task_info_t *info = NULL; - kmp_info_t *thr = ompt_get_thread(); - - if (thr) { - kmp_taskdata_t *taskdata = thr->th.th_current_task; - ompt_lw_taskteam_t *lwt = LWT_FROM_TEAM(taskdata->td_team); - - while (depth > 0) { - // next lightweight team (if any) - if (lwt) lwt = lwt->parent; - - // next heavyweight team (if any) after - // lightweight teams are exhausted - if (!lwt && taskdata) { - taskdata = taskdata->td_parent; - if (taskdata) { - lwt = LWT_FROM_TEAM(taskdata->td_team); - } - } - depth--; - } - - if (lwt) { - info = &lwt->ompt_task_info; - } else if (taskdata) { - info = &taskdata->ompt_task_info; - } - } - - return info; -} - - - -//****************************************************************************** -// interface operations -//****************************************************************************** - -//---------------------------------------------------------- -// thread support -//---------------------------------------------------------- - -ompt_parallel_id_t -__ompt_thread_id_new() -{ - static uint64_t ompt_thread_id = 1; - return NEXT_ID(&ompt_thread_id, 0); -} - -void -__ompt_thread_begin(ompt_thread_type_t thread_type, int gtid) -{ - ompt_callbacks.ompt_callback(ompt_event_thread_begin)( - thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); -} - - -void -__ompt_thread_end(ompt_thread_type_t thread_type, int gtid) -{ - ompt_callbacks.ompt_callback(ompt_event_thread_end)( - thread_type, GTID_TO_OMPT_THREAD_ID(gtid)); -} - - -ompt_thread_id_t -__ompt_get_thread_id_internal() -{ - // FIXME - // until we have a better way of assigning ids, use __kmp_get_gtid - // since the return value might be negative, we need to test that before - // assigning it to an ompt_thread_id_t, which is unsigned. - int id = __kmp_get_gtid(); - assert(id >= 0); - - return GTID_TO_OMPT_THREAD_ID(id); -} - -//---------------------------------------------------------- -// state support -//---------------------------------------------------------- - -void -__ompt_thread_assign_wait_id(void *variable) -{ - int gtid = __kmp_gtid_get_specific(); - kmp_info_t *ti = ompt_get_thread_gtid(gtid); - - ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t) variable; -} - -ompt_state_t -__ompt_get_state_internal(ompt_wait_id_t *ompt_wait_id) -{ - kmp_info_t *ti = ompt_get_thread(); - - if (ti) { - if (ompt_wait_id) - *ompt_wait_id = ti->th.ompt_thread_info.wait_id; - return ti->th.ompt_thread_info.state; - } - return ompt_state_undefined; -} - -//---------------------------------------------------------- -// idle frame support -//---------------------------------------------------------- - -void * -__ompt_get_idle_frame_internal(void) -{ - kmp_info_t *ti = ompt_get_thread(); - return ti ? ti->th.ompt_thread_info.idle_frame : NULL; -} - - -//---------------------------------------------------------- -// parallel region support -//---------------------------------------------------------- - -ompt_parallel_id_t -__ompt_parallel_id_new(int gtid) -{ - static uint64_t ompt_parallel_id = 1; - return gtid >= 0 ? NEXT_ID(&ompt_parallel_id, gtid) : 0; -} - - -void * -__ompt_get_parallel_function_internal(int depth) -{ - ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); - void *function = info ? info->microtask : NULL; - return function; -} - - -ompt_parallel_id_t -__ompt_get_parallel_id_internal(int depth) -{ - ompt_team_info_t *info = __ompt_get_teaminfo(depth, NULL); - ompt_parallel_id_t id = info ? info->parallel_id : 0; - return id; -} - - -int -__ompt_get_parallel_team_size_internal(int depth) -{ - // initialize the return value with the error value. - // if there is a team at the specified depth, the default - // value will be overwritten the size of that team. - int size = -1; - (void) __ompt_get_teaminfo(depth, &size); - return size; -} - - -//---------------------------------------------------------- -// lightweight task team support -//---------------------------------------------------------- - -void -__ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, - int gtid, void *microtask, - ompt_parallel_id_t ompt_pid) -{ - lwt->ompt_team_info.parallel_id = ompt_pid; - lwt->ompt_team_info.microtask = microtask; - lwt->ompt_task_info.task_id = 0; - lwt->ompt_task_info.frame.reenter_runtime_frame = NULL; - lwt->ompt_task_info.frame.exit_runtime_frame = NULL; - lwt->ompt_task_info.function = NULL; - lwt->parent = 0; -} - - -void -__ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr) -{ - ompt_lw_taskteam_t *my_parent = thr->th.th_team->t.ompt_serialized_team_info; - lwt->parent = my_parent; - thr->th.th_team->t.ompt_serialized_team_info = lwt; -} - - -ompt_lw_taskteam_t * -__ompt_lw_taskteam_unlink(kmp_info_t *thr) -{ - ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info; - if (lwtask) thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent; - return lwtask; -} - - -//---------------------------------------------------------- -// task support -//---------------------------------------------------------- - -ompt_task_id_t -__ompt_task_id_new(int gtid) -{ - static uint64_t ompt_task_id = 1; - return NEXT_ID(&ompt_task_id, gtid); -} - - -ompt_task_id_t -__ompt_get_task_id_internal(int depth) -{ - ompt_task_info_t *info = __ompt_get_taskinfo(depth); - ompt_task_id_t task_id = info ? info->task_id : 0; - return task_id; -} - - -void * -__ompt_get_task_function_internal(int depth) -{ - ompt_task_info_t *info = __ompt_get_taskinfo(depth); - void *function = info ? info->function : NULL; - return function; -} - - -ompt_frame_t * -__ompt_get_task_frame_internal(int depth) -{ - ompt_task_info_t *info = __ompt_get_taskinfo(depth); - ompt_frame_t *frame = info ? frame = &info->frame : NULL; - return frame; -} - - -//---------------------------------------------------------- -// team support -//---------------------------------------------------------- - -void -__ompt_team_assign_id(kmp_team_t *team, ompt_parallel_id_t ompt_pid) -{ - team->t.ompt_team_info.parallel_id = ompt_pid; -} Index: runtime/src/tsan_annotations.c =================================================================== --- runtime/src/tsan_annotations.c +++ runtime/src/tsan_annotations.c @@ -1,63 +0,0 @@ -/* - * tsan_annotations.c -- ThreadSanitizer annotations to support data - * race detection in OpenMP programs. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - -#include "tsan_annotations.h" - -#include - -typedef unsigned long uptr; -typedef signed long sptr; - -extern "C" __attribute__((weak)) void AnnotateHappensBefore(const char *f, int l, uptr addr) {} -extern "C" __attribute__((weak)) void AnnotateHappensAfter(const char *f, int l, uptr addr) {} -extern "C" __attribute__((weak)) void AnnotateCondVarSignal(const char *f, int l, uptr cv) {} -extern "C" __attribute__((weak)) void AnnotateCondVarSignalAll(const char *f, int l, uptr cv) {} -extern "C" __attribute__((weak)) void AnnotateMutexIsNotPHB(const char *f, int l, uptr mu) {} -extern "C" __attribute__((weak)) void AnnotateCondVarWait(const char *f, int l, uptr cv, uptr lock) {} -extern "C" __attribute__((weak)) void AnnotateRWLockCreate(const char *f, int l, uptr m) {} -extern "C" __attribute__((weak)) void AnnotateRWLockCreateStatic(const char *f, int l, uptr m) {} -extern "C" __attribute__((weak)) void AnnotateRWLockDestroy(const char *f, int l, uptr m) {} -extern "C" __attribute__((weak)) void AnnotateRWLockAcquired(const char *f, int l, uptr m, uptr is_w) {} -extern "C" __attribute__((weak)) void AnnotateRWLockReleased(const char *f, int l, uptr m, uptr is_w) {} -extern "C" __attribute__((weak)) void AnnotateTraceMemory(const char *f, int l, uptr mem) {} -extern "C" __attribute__((weak)) void AnnotateFlushState(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateNewMemory(const char *f, int l, uptr mem, uptr size) {} -extern "C" __attribute__((weak)) void AnnotateNoOp(const char *f, int l, uptr mem) {} -extern "C" __attribute__((weak)) void AnnotateFlushExpectedRaces(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateEnableRaceDetection( const char *f, int l, int enable) {} -extern "C" __attribute__((weak)) void AnnotateMutexIsUsedAsCondVar( const char *f, int l, uptr mu) {} -extern "C" __attribute__((weak)) void AnnotatePCQGet( const char *f, int l, uptr pcq) {} -extern "C" __attribute__((weak)) void AnnotatePCQPut( const char *f, int l, uptr pcq) {} -extern "C" __attribute__((weak)) void AnnotatePCQDestroy( const char *f, int l, uptr pcq) {} -extern "C" __attribute__((weak)) void AnnotatePCQCreate( const char *f, int l, uptr pcq) {} -extern "C" __attribute__((weak)) void AnnotateExpectRace( const char *f, int l, uptr mem, char *desc) {} -extern "C" __attribute__((weak)) void AnnotateBenignRaceSized( const char *f, int l, uptr mem, uptr size, char *desc) {} -extern "C" __attribute__((weak)) void AnnotateBenignRace( const char *f, int l, uptr mem, char *desc) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreReadsBegin(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreReadsEnd(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreWritesBegin(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreWritesEnd(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreSyncBegin(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotateIgnoreSyncEnd(const char *f, int l) {} -extern "C" __attribute__((weak)) void AnnotatePublishMemoryRange( const char *f, int l, uptr addr, uptr size) {} -extern "C" __attribute__((weak)) void AnnotateUnpublishMemoryRange( const char *f, int l, uptr addr, uptr size) {} -extern "C" __attribute__((weak)) void AnnotateThreadName( const char *f, int l, char *name) {} -extern "C" __attribute__((weak)) void WTFAnnotateHappensBefore(const char *f, int l, uptr addr) {} -extern "C" __attribute__((weak)) void WTFAnnotateHappensAfter(const char *f, int l, uptr addr) {} -extern "C" __attribute__((weak)) void WTFAnnotateBenignRaceSized( const char *f, int l, uptr mem, uptr sz, char *desc) {} -extern "C" __attribute__((weak)) int RunningOnValgrind() {return 0;} -extern "C" __attribute__((weak)) double ValgrindSlowdown(void) {return 0;} -extern "C" __attribute__((weak)) const char __attribute__((weak))* ThreadSanitizerQuery(const char *query) {return 0;} -extern "C" __attribute__((weak)) void AnnotateMemoryIsInitialized(const char *f, int l, uptr mem, uptr sz) {} Index: runtime/src/tsan_annotations.cpp =================================================================== --- runtime/src/tsan_annotations.cpp +++ runtime/src/tsan_annotations.cpp @@ -1,5 +1,5 @@ /* - * tsan_annotations.c -- ThreadSanitizer annotations to support data + * tsan_annotations.cpp -- ThreadSanitizer annotations to support data * race detection in OpenMP programs. */ Index: runtime/src/z_Linux_util.c =================================================================== --- runtime/src/z_Linux_util.c +++ runtime/src/z_Linux_util.c @@ -1,2612 +0,0 @@ -/* - * z_Linux_util.c -- platform specific routines. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_wrapper_getpid.h" -#include "kmp_itt.h" -#include "kmp_str.h" -#include "kmp_i18n.h" -#include "kmp_lock.h" -#include "kmp_io.h" -#include "kmp_stats.h" -#include "kmp_wait_release.h" -#include "kmp_affinity.h" - -#if !KMP_OS_FREEBSD && !KMP_OS_NETBSD -# include -#endif -#include -#include // HUGE_VAL. -#include -#include -#include -#include - -#if KMP_OS_LINUX && !KMP_OS_CNK -# include -# if KMP_USE_FUTEX -// We should really include , but that causes compatibility problems on different -// Linux* OS distributions that either require that you include (or break when you try to include) -// . -// Since all we need is the two macros below (which are part of the kernel ABI, so can't change) -// we just define the constants here and don't include -# ifndef FUTEX_WAIT -# define FUTEX_WAIT 0 -# endif -# ifndef FUTEX_WAKE -# define FUTEX_WAKE 1 -# endif -# endif -#elif KMP_OS_DARWIN -# include -# include -#elif KMP_OS_FREEBSD -# include -#endif - -#include -#include -#include - -#include "tsan_annotations.h" - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -struct kmp_sys_timer { - struct timespec start; -}; - -// Convert timespec to nanoseconds. -#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec) - -static struct kmp_sys_timer __kmp_sys_timer_data; - -#if KMP_HANDLE_SIGNALS - typedef void (* sig_func_t )( int ); - STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[ NSIG ]; - static sigset_t __kmp_sigset; -#endif - -static int __kmp_init_runtime = FALSE; - -static int __kmp_fork_count = 0; - -static pthread_condattr_t __kmp_suspend_cond_attr; -static pthread_mutexattr_t __kmp_suspend_mutex_attr; - -static kmp_cond_align_t __kmp_wait_cv; -static kmp_mutex_align_t __kmp_wait_mx; - -double __kmp_ticks_per_nsec; - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#ifdef DEBUG_SUSPEND -static void -__kmp_print_cond( char *buffer, kmp_cond_align_t *cond ) -{ - KMP_SNPRINTF( buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))", - cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock, - cond->c_cond.__c_waiting ); -} -#endif - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if ( KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED) - -/* - * Affinity support - */ - -void -__kmp_affinity_bind_thread( int which ) -{ - KMP_ASSERT2(KMP_AFFINITY_CAPABLE(), - "Illegal set affinity operation when not capable"); - - kmp_affin_mask_t *mask; - KMP_CPU_ALLOC_ON_STACK(mask); - KMP_CPU_ZERO(mask); - KMP_CPU_SET(which, mask); - __kmp_set_system_affinity(mask, TRUE); - KMP_CPU_FREE_FROM_STACK(mask); -} - -/* - * Determine if we can access affinity functionality on this version of - * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set - * __kmp_affin_mask_size to the appropriate value (0 means not capable). - */ -void -__kmp_affinity_determine_capable(const char *env_var) -{ - // - // Check and see if the OS supports thread affinity. - // - -# define KMP_CPU_SET_SIZE_LIMIT (1024*1024) - - int gCode; - int sCode; - unsigned char *buf; - buf = ( unsigned char * ) KMP_INTERNAL_MALLOC( KMP_CPU_SET_SIZE_LIMIT ); - - // If Linux* OS: - // If the syscall fails or returns a suggestion for the size, - // then we don't have to search for an appropriate size. - gCode = syscall( __NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf ); - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "initial getaffinity call returned %d errno = %d\n", - gCode, errno)); - - //if ((gCode < 0) && (errno == ENOSYS)) - if (gCode < 0) { - // - // System call not supported - // - if (__kmp_affinity_verbose || (__kmp_affinity_warnings - && (__kmp_affinity_type != affinity_none) - && (__kmp_affinity_type != affinity_default) - && (__kmp_affinity_type != affinity_disabled))) { - int error = errno; - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( GetAffSysCallNotSupported, env_var ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } - KMP_AFFINITY_DISABLE(); - KMP_INTERNAL_FREE(buf); - return; - } - if (gCode > 0) { // Linux* OS only - // The optimal situation: the OS returns the size of the buffer - // it expects. - // - // A verification of correct behavior is that Isetaffinity on a NULL - // buffer with the same size fails with errno set to EFAULT. - sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL ); - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "setaffinity for mask size %d returned %d errno = %d\n", - gCode, sCode, errno)); - if (sCode < 0) { - if (errno == ENOSYS) { - if (__kmp_affinity_verbose || (__kmp_affinity_warnings - && (__kmp_affinity_type != affinity_none) - && (__kmp_affinity_type != affinity_default) - && (__kmp_affinity_type != affinity_disabled))) { - int error = errno; - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( SetAffSysCallNotSupported, env_var ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } - KMP_AFFINITY_DISABLE(); - KMP_INTERNAL_FREE(buf); - } - if (errno == EFAULT) { - KMP_AFFINITY_ENABLE(gCode); - KA_TRACE(10, ( "__kmp_affinity_determine_capable: " - "affinity supported (mask size %d)\n", - (int)__kmp_affin_mask_size)); - KMP_INTERNAL_FREE(buf); - return; - } - } - } - - // - // Call the getaffinity system call repeatedly with increasing set sizes - // until we succeed, or reach an upper bound on the search. - // - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "searching for proper set size\n")); - int size; - for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) { - gCode = syscall( __NR_sched_getaffinity, 0, size, buf ); - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "getaffinity for mask size %d returned %d errno = %d\n", size, - gCode, errno)); - - if (gCode < 0) { - if ( errno == ENOSYS ) - { - // - // We shouldn't get here - // - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n", - size)); - if (__kmp_affinity_verbose || (__kmp_affinity_warnings - && (__kmp_affinity_type != affinity_none) - && (__kmp_affinity_type != affinity_default) - && (__kmp_affinity_type != affinity_disabled))) { - int error = errno; - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( GetAffSysCallNotSupported, env_var ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } - KMP_AFFINITY_DISABLE(); - KMP_INTERNAL_FREE(buf); - return; - } - continue; - } - - sCode = syscall( __NR_sched_setaffinity, 0, gCode, NULL ); - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "setaffinity for mask size %d returned %d errno = %d\n", - gCode, sCode, errno)); - if (sCode < 0) { - if (errno == ENOSYS) { // Linux* OS only - // - // We shouldn't get here - // - KA_TRACE(30, ( "__kmp_affinity_determine_capable: " - "inconsistent OS call behavior: errno == ENOSYS for mask size %d\n", - size)); - if (__kmp_affinity_verbose || (__kmp_affinity_warnings - && (__kmp_affinity_type != affinity_none) - && (__kmp_affinity_type != affinity_default) - && (__kmp_affinity_type != affinity_disabled))) { - int error = errno; - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( SetAffSysCallNotSupported, env_var ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } - KMP_AFFINITY_DISABLE(); - KMP_INTERNAL_FREE(buf); - return; - } - if (errno == EFAULT) { - KMP_AFFINITY_ENABLE(gCode); - KA_TRACE(10, ( "__kmp_affinity_determine_capable: " - "affinity supported (mask size %d)\n", - (int)__kmp_affin_mask_size)); - KMP_INTERNAL_FREE(buf); - return; - } - } - } - //int error = errno; // save uncaught error code - KMP_INTERNAL_FREE(buf); - // errno = error; // restore uncaught error code, will be printed at the next KMP_WARNING below - - // - // Affinity is not supported - // - KMP_AFFINITY_DISABLE(); - KA_TRACE(10, ( "__kmp_affinity_determine_capable: " - "cannot determine mask size - affinity not supported\n")); - if (__kmp_affinity_verbose || (__kmp_affinity_warnings - && (__kmp_affinity_type != affinity_none) - && (__kmp_affinity_type != affinity_default) - && (__kmp_affinity_type != affinity_disabled))) { - KMP_WARNING( AffCantGetMaskSize, env_var ); - } -} - -#endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if KMP_USE_FUTEX - -int -__kmp_futex_determine_capable() -{ - int loc = 0; - int rc = syscall( __NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0 ); - int retval = ( rc == 0 ) || ( errno != ENOSYS ); - - KA_TRACE(10, ( "__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, - errno ) ); - KA_TRACE(10, ( "__kmp_futex_determine_capable: futex syscall%s supported\n", - retval ? "" : " not" ) ); - - return retval; -} - -#endif // KMP_USE_FUTEX - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) -/* - * Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to - * use compare_and_store for these routines - */ - -kmp_int8 -__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int8 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value | d; - - while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value | d; - } - return old_value; -} - -kmp_int8 -__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int8 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value & d; - - while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value & d; - } - return old_value; -} - -kmp_int32 -__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d ) -{ - kmp_int32 old_value, new_value; - - old_value = TCR_4( *p ); - new_value = old_value | d; - - while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_4( *p ); - new_value = old_value | d; - } - return old_value; -} - -kmp_int32 -__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d ) -{ - kmp_int32 old_value, new_value; - - old_value = TCR_4( *p ); - new_value = old_value & d; - - while ( ! KMP_COMPARE_AND_STORE_REL32 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_4( *p ); - new_value = old_value & d; - } - return old_value; -} - -# if KMP_ARCH_X86 || KMP_ARCH_PPC64 || (KMP_OS_LINUX && KMP_ARCH_AARCH64) -kmp_int8 -__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int8 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value + d; - - while ( ! KMP_COMPARE_AND_STORE_REL8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value + d; - } - return old_value; -} - -kmp_int64 -__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value + d; - - while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value + d; - } - return old_value; -} -# endif /* KMP_ARCH_X86 || KMP_ARCH_PPC64 || (KMP_OS_LINUX && KMP_ARCH_AARCH64) */ - -kmp_int64 -__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value | d; - while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value | d; - } - return old_value; -} - -kmp_int64 -__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value & d; - while ( ! KMP_COMPARE_AND_STORE_REL64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value & d; - } - return old_value; -} - -#endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */ - -void -__kmp_terminate_thread( int gtid ) -{ - int status; - kmp_info_t *th = __kmp_threads[ gtid ]; - - if ( !th ) return; - - #ifdef KMP_CANCEL_THREADS - KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) ); - status = pthread_cancel( th->th.th_info.ds.ds_thread ); - if ( status != 0 && status != ESRCH ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantTerminateWorkerThread ), - KMP_ERR( status ), - __kmp_msg_null - ); - }; // if - #endif - __kmp_yield( TRUE ); -} // - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -/* - * Set thread stack info according to values returned by - * pthread_getattr_np(). - * If values are unreasonable, assume call failed and use - * incremental stack refinement method instead. - * Returns TRUE if the stack parameters could be determined exactly, - * FALSE if incremental refinement is necessary. - */ -static kmp_int32 -__kmp_set_stack_info( int gtid, kmp_info_t *th ) -{ - int stack_data; -#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD - /* Linux* OS only -- no pthread_getattr_np support on OS X* */ - pthread_attr_t attr; - int status; - size_t size = 0; - void * addr = 0; - - /* Always do incremental stack refinement for ubermaster threads since the initial - thread stack range can be reduced by sibling thread creation so pthread_attr_getstack - may cause thread gtid aliasing */ - if ( ! KMP_UBER_GTID(gtid) ) { - - /* Fetch the real thread attributes */ - status = pthread_attr_init( &attr ); - KMP_CHECK_SYSFAIL( "pthread_attr_init", status ); -#if KMP_OS_FREEBSD || KMP_OS_NETBSD - status = pthread_attr_get_np( pthread_self(), &attr ); - KMP_CHECK_SYSFAIL( "pthread_attr_get_np", status ); -#else - status = pthread_getattr_np( pthread_self(), &attr ); - KMP_CHECK_SYSFAIL( "pthread_getattr_np", status ); -#endif - status = pthread_attr_getstack( &attr, &addr, &size ); - KMP_CHECK_SYSFAIL( "pthread_attr_getstack", status ); - KA_TRACE( 60, ( "__kmp_set_stack_info: T#%d pthread_attr_getstack returned size: %lu, " - "low addr: %p\n", - gtid, size, addr )); - - status = pthread_attr_destroy( &attr ); - KMP_CHECK_SYSFAIL( "pthread_attr_destroy", status ); - } - - if ( size != 0 && addr != 0 ) { /* was stack parameter determination successful? */ - /* Store the correct base and size */ - TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size)); - TCW_PTR(th->th.th_info.ds.ds_stacksize, size); - TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE); - return TRUE; - } -#endif /* KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD */ - /* Use incremental refinement starting from initial conservative estimate */ - TCW_PTR(th->th.th_info.ds.ds_stacksize, 0); - TCW_PTR(th -> th.th_info.ds.ds_stackbase, &stack_data); - TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE); - return FALSE; -} - -static void* -__kmp_launch_worker( void *thr ) -{ - int status, old_type, old_state; -#ifdef KMP_BLOCK_SIGNALS - sigset_t new_set, old_set; -#endif /* KMP_BLOCK_SIGNALS */ - void *exit_val; -#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD - void * volatile padding = 0; -#endif - int gtid; - - gtid = ((kmp_info_t*)thr) -> th.th_info.ds.ds_gtid; - __kmp_gtid_set_specific( gtid ); -#ifdef KMP_TDATA_GTID - __kmp_gtid = gtid; -#endif -#if KMP_STATS_ENABLED - // set __thread local index to point to thread-specific stats - __kmp_stats_thread_ptr = ((kmp_info_t*)thr)->th.th_stats; - KMP_START_EXPLICIT_TIMER(OMP_worker_thread_life); - KMP_SET_THREAD_STATE(IDLE); - KMP_INIT_PARTITIONED_TIMERS(OMP_idle); -#endif - -#if USE_ITT_BUILD - __kmp_itt_thread_name( gtid ); -#endif /* USE_ITT_BUILD */ - -#if KMP_AFFINITY_SUPPORTED - __kmp_affinity_set_init_mask( gtid, FALSE ); -#endif - -#ifdef KMP_CANCEL_THREADS - status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type ); - KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status ); - /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */ - status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state ); - KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status ); -#endif - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - // - // Set the FP control regs to be a copy of - // the parallel initialization thread's. - // - __kmp_clear_x87_fpu_status_word(); - __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word ); - __kmp_load_mxcsr( &__kmp_init_mxcsr ); -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -#ifdef KMP_BLOCK_SIGNALS - status = sigfillset( & new_set ); - KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status ); - status = pthread_sigmask( SIG_BLOCK, & new_set, & old_set ); - KMP_CHECK_SYSFAIL( "pthread_sigmask", status ); -#endif /* KMP_BLOCK_SIGNALS */ - -#if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD - if ( __kmp_stkoffset > 0 && gtid > 0 ) { - padding = KMP_ALLOCA( gtid * __kmp_stkoffset ); - } -#endif - - KMP_MB(); - __kmp_set_stack_info( gtid, (kmp_info_t*)thr ); - - __kmp_check_stack_overlap( (kmp_info_t*)thr ); - - exit_val = __kmp_launch_thread( (kmp_info_t *) thr ); - -#ifdef KMP_BLOCK_SIGNALS - status = pthread_sigmask( SIG_SETMASK, & old_set, NULL ); - KMP_CHECK_SYSFAIL( "pthread_sigmask", status ); -#endif /* KMP_BLOCK_SIGNALS */ - - return exit_val; -} - -#if KMP_USE_MONITOR -/* The monitor thread controls all of the threads in the complex */ - -static void* -__kmp_launch_monitor( void *thr ) -{ - int status, old_type, old_state; -#ifdef KMP_BLOCK_SIGNALS - sigset_t new_set; -#endif /* KMP_BLOCK_SIGNALS */ - struct timespec interval; - int yield_count; - int yield_cycles = 0; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ("__kmp_launch_monitor: #1 launched\n" ) ); - - /* register us as the monitor thread */ - __kmp_gtid_set_specific( KMP_GTID_MONITOR ); -#ifdef KMP_TDATA_GTID - __kmp_gtid = KMP_GTID_MONITOR; -#endif - - KMP_MB(); - -#if USE_ITT_BUILD - __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread. -#endif /* USE_ITT_BUILD */ - - __kmp_set_stack_info( ((kmp_info_t*)thr)->th.th_info.ds.ds_gtid, (kmp_info_t*)thr ); - - __kmp_check_stack_overlap( (kmp_info_t*)thr ); - -#ifdef KMP_CANCEL_THREADS - status = pthread_setcanceltype( PTHREAD_CANCEL_ASYNCHRONOUS, & old_type ); - KMP_CHECK_SYSFAIL( "pthread_setcanceltype", status ); - /* josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? */ - status = pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, & old_state ); - KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status ); -#endif - - #if KMP_REAL_TIME_FIX - // This is a potential fix which allows application with real-time scheduling policy work. - // However, decision about the fix is not made yet, so it is disabled by default. - { // Are program started with real-time scheduling policy? - int sched = sched_getscheduler( 0 ); - if ( sched == SCHED_FIFO || sched == SCHED_RR ) { - // Yes, we are a part of real-time application. Try to increase the priority of the - // monitor. - struct sched_param param; - int max_priority = sched_get_priority_max( sched ); - int rc; - KMP_WARNING( RealTimeSchedNotSupported ); - sched_getparam( 0, & param ); - if ( param.sched_priority < max_priority ) { - param.sched_priority += 1; - rc = sched_setscheduler( 0, sched, & param ); - if ( rc != 0 ) { - int error = errno; - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantChangeMonitorPriority ), - err_code, - KMP_MSG( MonitorWillStarve ), - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - }; // if - } else { - // We cannot abort here, because number of CPUs may be enough for all the threads, - // including the monitor thread, so application could potentially work... - __kmp_msg( - kmp_ms_warning, - KMP_MSG( RunningAtMaxPriority ), - KMP_MSG( MonitorWillStarve ), - KMP_HNT( RunningAtMaxPriority ), - __kmp_msg_null - ); - }; // if - }; // if - TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); // AC: free thread that waits for monitor started - } - #endif // KMP_REAL_TIME_FIX - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - if ( __kmp_monitor_wakeups == 1 ) { - interval.tv_sec = 1; - interval.tv_nsec = 0; - } else { - interval.tv_sec = 0; - interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups); - } - - KA_TRACE( 10, ("__kmp_launch_monitor: #2 monitor\n" ) ); - - if (__kmp_yield_cycle) { - __kmp_yielding_on = 0; /* Start out with yielding shut off */ - yield_count = __kmp_yield_off_count; - } else { - __kmp_yielding_on = 1; /* Yielding is on permanently */ - } - - while( ! TCR_4( __kmp_global.g.g_done ) ) { - struct timespec now; - struct timeval tval; - - /* This thread monitors the state of the system */ - - KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) ); - - status = gettimeofday( &tval, NULL ); - KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status ); - TIMEVAL_TO_TIMESPEC( &tval, &now ); - - now.tv_sec += interval.tv_sec; - now.tv_nsec += interval.tv_nsec; - - if (now.tv_nsec >= KMP_NSEC_PER_SEC) { - now.tv_sec += 1; - now.tv_nsec -= KMP_NSEC_PER_SEC; - } - - status = pthread_mutex_lock( & __kmp_wait_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status ); - // AC: the monitor should not fall asleep if g_done has been set - if ( !TCR_4(__kmp_global.g.g_done) ) { // check once more under mutex - status = pthread_cond_timedwait( &__kmp_wait_cv.c_cond, &__kmp_wait_mx.m_mutex, &now ); - if ( status != 0 ) { - if ( status != ETIMEDOUT && status != EINTR ) { - KMP_SYSFAIL( "pthread_cond_timedwait", status ); - }; - }; - }; - status = pthread_mutex_unlock( & __kmp_wait_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - - if (__kmp_yield_cycle) { - yield_cycles++; - if ( (yield_cycles % yield_count) == 0 ) { - if (__kmp_yielding_on) { - __kmp_yielding_on = 0; /* Turn it off now */ - yield_count = __kmp_yield_off_count; - } else { - __kmp_yielding_on = 1; /* Turn it on now */ - yield_count = __kmp_yield_on_count; - } - yield_cycles = 0; - } - } else { - __kmp_yielding_on = 1; - } - - TCW_4( __kmp_global.g.g_time.dt.t_value, - TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - } - - KA_TRACE( 10, ("__kmp_launch_monitor: #3 cleanup\n" ) ); - -#ifdef KMP_BLOCK_SIGNALS - status = sigfillset( & new_set ); - KMP_CHECK_SYSFAIL_ERRNO( "sigfillset", status ); - status = pthread_sigmask( SIG_UNBLOCK, & new_set, NULL ); - KMP_CHECK_SYSFAIL( "pthread_sigmask", status ); -#endif /* KMP_BLOCK_SIGNALS */ - - KA_TRACE( 10, ("__kmp_launch_monitor: #4 finished\n" ) ); - - if( __kmp_global.g.g_abort != 0 ) { - /* now we need to terminate the worker threads */ - /* the value of t_abort is the signal we caught */ - - int gtid; - - KA_TRACE( 10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", __kmp_global.g.g_abort ) ); - - /* terminate the OpenMP worker threads */ - /* TODO this is not valid for sibling threads!! - * the uber master might not be 0 anymore.. */ - for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid) - __kmp_terminate_thread( gtid ); - - __kmp_cleanup(); - - KA_TRACE( 10, ("__kmp_launch_monitor: #6 raise sig=%d\n", __kmp_global.g.g_abort ) ); - - if (__kmp_global.g.g_abort > 0) - raise( __kmp_global.g.g_abort ); - - } - - KA_TRACE( 10, ("__kmp_launch_monitor: #7 exit\n" ) ); - - return thr; -} -#endif // KMP_USE_MONITOR - -void -__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size ) -{ - pthread_t handle; - pthread_attr_t thread_attr; - int status; - - - th->th.th_info.ds.ds_gtid = gtid; - -#if KMP_STATS_ENABLED - // sets up worker thread stats - __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid); - - // th->th.th_stats is used to transfer thread specific stats-pointer to __kmp_launch_worker - // So when thread is created (goes into __kmp_launch_worker) it will - // set it's __thread local pointer to th->th.th_stats - if(!KMP_UBER_GTID(gtid)) { - th->th.th_stats = __kmp_stats_list->push_back(gtid); - } else { - // For root threads, the __kmp_stats_thread_ptr is set in __kmp_register_root(), so - // set the th->th.th_stats field to it. - th->th.th_stats = __kmp_stats_thread_ptr; - } - __kmp_release_tas_lock(&__kmp_stats_lock, gtid); - -#endif // KMP_STATS_ENABLED - - if ( KMP_UBER_GTID(gtid) ) { - KA_TRACE( 10, ("__kmp_create_worker: uber thread (%d)\n", gtid ) ); - th -> th.th_info.ds.ds_thread = pthread_self(); - __kmp_set_stack_info( gtid, th ); - __kmp_check_stack_overlap( th ); - return; - }; // if - - KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - -#ifdef KMP_THREAD_ATTR - status = pthread_attr_init( &thread_attr ); - if ( status != 0 ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantInitThreadAttrs ), KMP_ERR( status ), __kmp_msg_null); - }; // if - status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE ); - if ( status != 0 ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerState ), KMP_ERR( status ), __kmp_msg_null); - }; // if - - /* Set stack size for this thread now. - * The multiple of 2 is there because on some machines, requesting an unusual stacksize - * causes the thread to have an offset before the dummy alloca() takes place to create the - * offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we - * alloca() twice the offset so that the upcoming alloca() does not eliminate any premade - * offset, and also gives the user the stack space they requested for all threads */ - stack_size += gtid * __kmp_stkoffset * 2; - - KA_TRACE( 10, ( "__kmp_create_worker: T#%d, default stacksize = %lu bytes, " - "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n", - gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) ); - -# ifdef _POSIX_THREAD_ATTR_STACKSIZE - status = pthread_attr_setstacksize( & thread_attr, stack_size ); -# ifdef KMP_BACKUP_STKSIZE - if ( status != 0 ) { - if ( ! __kmp_env_stksize ) { - stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset; - __kmp_stksize = KMP_BACKUP_STKSIZE; - KA_TRACE( 10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, " - "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " - "bytes\n", - gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size ) - ); - status = pthread_attr_setstacksize( &thread_attr, stack_size ); - }; // if - }; // if -# endif /* KMP_BACKUP_STKSIZE */ - if ( status != 0 ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ), - KMP_HNT( ChangeWorkerStackSize ), __kmp_msg_null); - }; // if -# endif /* _POSIX_THREAD_ATTR_STACKSIZE */ - -#endif /* KMP_THREAD_ATTR */ - - status = pthread_create( & handle, & thread_attr, __kmp_launch_worker, (void *) th ); - if ( status != 0 || ! handle ) { // ??? Why do we check handle?? -#ifdef _POSIX_THREAD_ATTR_STACKSIZE - if ( status == EINVAL ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ), - KMP_HNT( IncreaseWorkerStackSize ), __kmp_msg_null); - }; - if ( status == ENOMEM ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerStackSize, stack_size ), KMP_ERR( status ), - KMP_HNT( DecreaseWorkerStackSize ), __kmp_msg_null); - }; -#endif /* _POSIX_THREAD_ATTR_STACKSIZE */ - if ( status == EAGAIN ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( NoResourcesForWorkerThread ), KMP_ERR( status ), - KMP_HNT( Decrease_NUM_THREADS ), __kmp_msg_null); - }; // if - KMP_SYSFAIL( "pthread_create", status ); - }; // if - - th->th.th_info.ds.ds_thread = handle; - -#ifdef KMP_THREAD_ATTR - status = pthread_attr_destroy( & thread_attr ); - if ( status ) { - kmp_msg_t err_code = KMP_ERR( status ); - __kmp_msg(kmp_ms_warning, KMP_MSG( CantDestroyThreadAttrs ), err_code, __kmp_msg_null); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - }; // if -#endif /* KMP_THREAD_ATTR */ - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) ); - -} // __kmp_create_worker - - -#if KMP_USE_MONITOR -void -__kmp_create_monitor( kmp_info_t *th ) -{ - pthread_t handle; - pthread_attr_t thread_attr; - size_t size; - int status; - int auto_adj_size = FALSE; - - if( __kmp_dflt_blocktime == KMP_MAX_BLOCKTIME ) { - // We don't need monitor thread in case of MAX_BLOCKTIME - KA_TRACE( 10, ("__kmp_create_monitor: skipping monitor thread because of MAX blocktime\n" ) ); - th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op - th->th.th_info.ds.ds_gtid = 0; - return; - } - KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR; - th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR; - #if KMP_REAL_TIME_FIX - TCW_4( __kmp_global.g.g_time.dt.t_value, -1 ); // Will use it for synchronization a bit later. - #else - TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); - #endif // KMP_REAL_TIME_FIX - - #ifdef KMP_THREAD_ATTR - if ( __kmp_monitor_stksize == 0 ) { - __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE; - auto_adj_size = TRUE; - } - status = pthread_attr_init( &thread_attr ); - if ( status != 0 ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantInitThreadAttrs ), - KMP_ERR( status ), - __kmp_msg_null - ); - }; // if - status = pthread_attr_setdetachstate( & thread_attr, PTHREAD_CREATE_JOINABLE ); - if ( status != 0 ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetMonitorState ), - KMP_ERR( status ), - __kmp_msg_null - ); - }; // if - - #ifdef _POSIX_THREAD_ATTR_STACKSIZE - status = pthread_attr_getstacksize( & thread_attr, & size ); - KMP_CHECK_SYSFAIL( "pthread_attr_getstacksize", status ); - #else - size = __kmp_sys_min_stksize; - #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ - #endif /* KMP_THREAD_ATTR */ - - if ( __kmp_monitor_stksize == 0 ) { - __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE; - } - if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) { - __kmp_monitor_stksize = __kmp_sys_min_stksize; - } - - KA_TRACE( 10, ( "__kmp_create_monitor: default stacksize = %lu bytes," - "requested stacksize = %lu bytes\n", - size, __kmp_monitor_stksize ) ); - - retry: - - /* Set stack size for this thread now. */ - - #ifdef _POSIX_THREAD_ATTR_STACKSIZE - KA_TRACE( 10, ( "__kmp_create_monitor: setting stacksize = %lu bytes,", - __kmp_monitor_stksize ) ); - status = pthread_attr_setstacksize( & thread_attr, __kmp_monitor_stksize ); - if ( status != 0 ) { - if ( auto_adj_size ) { - __kmp_monitor_stksize *= 2; - goto retry; - } - kmp_msg_t err_code = KMP_ERR( status ); - __kmp_msg( - kmp_ms_warning, // should this be fatal? BB - KMP_MSG( CantSetMonitorStackSize, (long int) __kmp_monitor_stksize ), - err_code, - KMP_HNT( ChangeMonitorStackSize ), - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - }; // if - #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ - - status = pthread_create( &handle, & thread_attr, __kmp_launch_monitor, (void *) th ); - - if ( status != 0 ) { - #ifdef _POSIX_THREAD_ATTR_STACKSIZE - if ( status == EINVAL ) { - if ( auto_adj_size && ( __kmp_monitor_stksize < (size_t)0x40000000 ) ) { - __kmp_monitor_stksize *= 2; - goto retry; - } - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ), - KMP_ERR( status ), - KMP_HNT( IncreaseMonitorStackSize ), - __kmp_msg_null - ); - }; // if - if ( status == ENOMEM ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetMonitorStackSize, __kmp_monitor_stksize ), - KMP_ERR( status ), - KMP_HNT( DecreaseMonitorStackSize ), - __kmp_msg_null - ); - }; // if - #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ - if ( status == EAGAIN ) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( NoResourcesForMonitorThread ), - KMP_ERR( status ), - KMP_HNT( DecreaseNumberOfThreadsInUse ), - __kmp_msg_null - ); - }; // if - KMP_SYSFAIL( "pthread_create", status ); - }; // if - - th->th.th_info.ds.ds_thread = handle; - - #if KMP_REAL_TIME_FIX - // Wait for the monitor thread is really started and set its *priority*. - KMP_DEBUG_ASSERT( sizeof( kmp_uint32 ) == sizeof( __kmp_global.g.g_time.dt.t_value ) ); - __kmp_wait_yield_4( - (kmp_uint32 volatile *) & __kmp_global.g.g_time.dt.t_value, -1, & __kmp_neq_4, NULL - ); - #endif // KMP_REAL_TIME_FIX - - #ifdef KMP_THREAD_ATTR - status = pthread_attr_destroy( & thread_attr ); - if ( status != 0 ) { - kmp_msg_t err_code = KMP_ERR( status ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantDestroyThreadAttrs ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - }; // if - #endif - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ( "__kmp_create_monitor: monitor created %#.8lx\n", th->th.th_info.ds.ds_thread ) ); - -} // __kmp_create_monitor -#endif // KMP_USE_MONITOR - -void -__kmp_exit_thread( - int exit_status -) { - pthread_exit( (void *)(intptr_t) exit_status ); -} // __kmp_exit_thread - -#if KMP_USE_MONITOR -void __kmp_resume_monitor(); - -void -__kmp_reap_monitor( kmp_info_t *th ) -{ - int status; - void *exit_val; - - KA_TRACE( 10, ("__kmp_reap_monitor: try to reap monitor thread with handle %#.8lx\n", - th->th.th_info.ds.ds_thread ) ); - - // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR. - // If both tid and gtid are 0, it means the monitor did not ever start. - // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down. - KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid ); - if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) { - KA_TRACE( 10, ("__kmp_reap_monitor: monitor did not start, returning\n") ); - return; - }; // if - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - - /* First, check to see whether the monitor thread exists to wake it up. This is - to avoid performance problem when the monitor sleeps during blocktime-size - interval */ - - status = pthread_kill( th->th.th_info.ds.ds_thread, 0 ); - if (status != ESRCH) { - __kmp_resume_monitor(); // Wake up the monitor thread - } - KA_TRACE( 10, ("__kmp_reap_monitor: try to join with monitor\n") ); - status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val); - if (exit_val != th) { - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( ReapMonitorError ), - KMP_ERR( status ), - __kmp_msg_null - ); - } - - th->th.th_info.ds.ds_tid = KMP_GTID_DNE; - th->th.th_info.ds.ds_gtid = KMP_GTID_DNE; - - KA_TRACE( 10, ("__kmp_reap_monitor: done reaping monitor thread with handle %#.8lx\n", - th->th.th_info.ds.ds_thread ) ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - -} -#endif // KMP_USE_MONITOR - -void -__kmp_reap_worker( kmp_info_t *th ) -{ - int status; - void *exit_val; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid ) ); - - status = pthread_join( th->th.th_info.ds.ds_thread, & exit_val); -#ifdef KMP_DEBUG - /* Don't expose these to the user until we understand when they trigger */ - if ( status != 0 ) { - __kmp_msg(kmp_ms_fatal, KMP_MSG( ReapWorkerError ), KMP_ERR( status ), __kmp_msg_null); - } - if ( exit_val != th ) { - KA_TRACE( 10, ( "__kmp_reap_worker: worker T#%d did not reap properly, exit_val = %p\n", - th->th.th_info.ds.ds_gtid, exit_val ) ); - } -#endif /* KMP_DEBUG */ - - KA_TRACE( 10, ("__kmp_reap_worker: done reaping T#%d\n", th->th.th_info.ds.ds_gtid ) ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ -} - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if KMP_HANDLE_SIGNALS - - -static void -__kmp_null_handler( int signo ) -{ - // Do nothing, for doing SIG_IGN-type actions. -} // __kmp_null_handler - - -static void -__kmp_team_handler( int signo ) -{ - if ( __kmp_global.g.g_abort == 0 ) { - /* Stage 1 signal handler, let's shut down all of the threads */ - #ifdef KMP_DEBUG - __kmp_debug_printf( "__kmp_team_handler: caught signal = %d\n", signo ); - #endif - switch ( signo ) { - case SIGHUP : - case SIGINT : - case SIGQUIT : - case SIGILL : - case SIGABRT : - case SIGFPE : - case SIGBUS : - case SIGSEGV : - #ifdef SIGSYS - case SIGSYS : - #endif - case SIGTERM : - if ( __kmp_debug_buf ) { - __kmp_dump_debug_buffer( ); - }; // if - KMP_MB(); // Flush all pending memory write invalidates. - TCW_4( __kmp_global.g.g_abort, signo ); - KMP_MB(); // Flush all pending memory write invalidates. - TCW_4( __kmp_global.g.g_done, TRUE ); - KMP_MB(); // Flush all pending memory write invalidates. - break; - default: - #ifdef KMP_DEBUG - __kmp_debug_printf( "__kmp_team_handler: unknown signal type" ); - #endif - break; - }; // switch - }; // if -} // __kmp_team_handler - - -static -void __kmp_sigaction( int signum, const struct sigaction * act, struct sigaction * oldact ) { - int rc = sigaction( signum, act, oldact ); - KMP_CHECK_SYSFAIL_ERRNO( "sigaction", rc ); -} - - -static void -__kmp_install_one_handler( int sig, sig_func_t handler_func, int parallel_init ) -{ - KMP_MB(); // Flush all pending memory write invalidates. - KB_TRACE( 60, ( "__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init ) ); - if ( parallel_init ) { - struct sigaction new_action; - struct sigaction old_action; - new_action.sa_handler = handler_func; - new_action.sa_flags = 0; - sigfillset( & new_action.sa_mask ); - __kmp_sigaction( sig, & new_action, & old_action ); - if ( old_action.sa_handler == __kmp_sighldrs[ sig ].sa_handler ) { - sigaddset( & __kmp_sigset, sig ); - } else { - // Restore/keep user's handler if one previously installed. - __kmp_sigaction( sig, & old_action, NULL ); - }; // if - } else { - // Save initial/system signal handlers to see if user handlers installed. - __kmp_sigaction( sig, NULL, & __kmp_sighldrs[ sig ] ); - }; // if - KMP_MB(); // Flush all pending memory write invalidates. -} // __kmp_install_one_handler - - -static void -__kmp_remove_one_handler( int sig ) -{ - KB_TRACE( 60, ( "__kmp_remove_one_handler( %d )\n", sig ) ); - if ( sigismember( & __kmp_sigset, sig ) ) { - struct sigaction old; - KMP_MB(); // Flush all pending memory write invalidates. - __kmp_sigaction( sig, & __kmp_sighldrs[ sig ], & old ); - if ( ( old.sa_handler != __kmp_team_handler ) && ( old.sa_handler != __kmp_null_handler ) ) { - // Restore the users signal handler. - KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) ); - __kmp_sigaction( sig, & old, NULL ); - }; // if - sigdelset( & __kmp_sigset, sig ); - KMP_MB(); // Flush all pending memory write invalidates. - }; // if -} // __kmp_remove_one_handler - - -void -__kmp_install_signals( int parallel_init ) -{ - KB_TRACE( 10, ( "__kmp_install_signals( %d )\n", parallel_init ) ); - if ( __kmp_handle_signals || ! parallel_init ) { - // If ! parallel_init, we do not install handlers, just save original handlers. - // Let us do it even __handle_signals is 0. - sigemptyset( & __kmp_sigset ); - __kmp_install_one_handler( SIGHUP, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGQUIT, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGBUS, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init ); - #ifdef SIGSYS - __kmp_install_one_handler( SIGSYS, __kmp_team_handler, parallel_init ); - #endif // SIGSYS - __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init ); - #ifdef SIGPIPE - __kmp_install_one_handler( SIGPIPE, __kmp_team_handler, parallel_init ); - #endif // SIGPIPE - }; // if -} // __kmp_install_signals - - -void -__kmp_remove_signals( void ) -{ - int sig; - KB_TRACE( 10, ( "__kmp_remove_signals()\n" ) ); - for ( sig = 1; sig < NSIG; ++ sig ) { - __kmp_remove_one_handler( sig ); - }; // for sig -} // __kmp_remove_signals - - -#endif // KMP_HANDLE_SIGNALS - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_enable( int new_state ) -{ - #ifdef KMP_CANCEL_THREADS - int status, old_state; - status = pthread_setcancelstate( new_state, & old_state ); - KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status ); - KMP_DEBUG_ASSERT( old_state == PTHREAD_CANCEL_DISABLE ); - #endif -} - -void -__kmp_disable( int * old_state ) -{ - #ifdef KMP_CANCEL_THREADS - int status; - status = pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, old_state ); - KMP_CHECK_SYSFAIL( "pthread_setcancelstate", status ); - #endif -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -static void -__kmp_atfork_prepare (void) -{ - /* nothing to do */ -} - -static void -__kmp_atfork_parent (void) -{ - /* nothing to do */ -} - -/* - Reset the library so execution in the child starts "all over again" with - clean data structures in initial states. Don't worry about freeing memory - allocated by parent, just abandon it to be safe. -*/ -static void -__kmp_atfork_child (void) -{ - /* TODO make sure this is done right for nested/sibling */ - // ATT: Memory leaks are here? TODO: Check it and fix. - /* KMP_ASSERT( 0 ); */ - - ++__kmp_fork_count; - - __kmp_init_runtime = FALSE; -#if KMP_USE_MONITOR - __kmp_init_monitor = 0; -#endif - __kmp_init_parallel = FALSE; - __kmp_init_middle = FALSE; - __kmp_init_serial = FALSE; - TCW_4(__kmp_init_gtid, FALSE); - __kmp_init_common = FALSE; - - TCW_4(__kmp_init_user_locks, FALSE); -#if ! KMP_USE_DYNAMIC_LOCK - __kmp_user_lock_table.used = 1; - __kmp_user_lock_table.allocated = 0; - __kmp_user_lock_table.table = NULL; - __kmp_lock_blocks = NULL; -#endif - - __kmp_all_nth = 0; - TCW_4(__kmp_nth, 0); - - /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate here - so threadprivate doesn't use stale data */ - KA_TRACE( 10, ( "__kmp_atfork_child: checking cache address list %p\n", - __kmp_threadpriv_cache_list ) ); - - while ( __kmp_threadpriv_cache_list != NULL ) { - - if ( *__kmp_threadpriv_cache_list -> addr != NULL ) { - KC_TRACE( 50, ( "__kmp_atfork_child: zeroing cache at address %p\n", - &(*__kmp_threadpriv_cache_list -> addr) ) ); - - *__kmp_threadpriv_cache_list -> addr = NULL; - } - __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list -> next; - } - - __kmp_init_runtime = FALSE; - - /* reset statically initialized locks */ - __kmp_init_bootstrap_lock( &__kmp_initz_lock ); - __kmp_init_bootstrap_lock( &__kmp_stdio_lock ); - __kmp_init_bootstrap_lock( &__kmp_console_lock ); - - /* This is necessary to make sure no stale data is left around */ - /* AC: customers complain that we use unsafe routines in the atfork - handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen - in dynamic_link when check the presence of shared tbbmalloc library. - Suggestion is to make the library initialization lazier, similar - to what done for __kmpc_begin(). */ - // TODO: synchronize all static initializations with regular library - // startup; look at kmp_global.c and etc. - //__kmp_internal_begin (); - -} - -void -__kmp_register_atfork(void) { - if ( __kmp_need_register_atfork ) { - int status = pthread_atfork( __kmp_atfork_prepare, __kmp_atfork_parent, __kmp_atfork_child ); - KMP_CHECK_SYSFAIL( "pthread_atfork", status ); - __kmp_need_register_atfork = FALSE; - } -} - -void -__kmp_suspend_initialize( void ) -{ - int status; - status = pthread_mutexattr_init( &__kmp_suspend_mutex_attr ); - KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status ); - status = pthread_condattr_init( &__kmp_suspend_cond_attr ); - KMP_CHECK_SYSFAIL( "pthread_condattr_init", status ); -} - -static void -__kmp_suspend_initialize_thread( kmp_info_t *th ) -{ - ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count); - if ( th->th.th_suspend_init_count <= __kmp_fork_count ) { - /* this means we haven't initialized the suspension pthread objects for this thread - in this instance of the process */ - int status; - status = pthread_cond_init( &th->th.th_suspend_cv.c_cond, &__kmp_suspend_cond_attr ); - KMP_CHECK_SYSFAIL( "pthread_cond_init", status ); - status = pthread_mutex_init( &th->th.th_suspend_mx.m_mutex, & __kmp_suspend_mutex_attr ); - KMP_CHECK_SYSFAIL( "pthread_mutex_init", status ); - *(volatile int*)&th->th.th_suspend_init_count = __kmp_fork_count + 1; - ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count); - }; -} - -void -__kmp_suspend_uninitialize_thread( kmp_info_t *th ) -{ - if(th->th.th_suspend_init_count > __kmp_fork_count) { - /* this means we have initialize the suspension pthread objects for this thread - in this instance of the process */ - int status; - - status = pthread_cond_destroy( &th->th.th_suspend_cv.c_cond ); - if ( status != 0 && status != EBUSY ) { - KMP_SYSFAIL( "pthread_cond_destroy", status ); - }; - status = pthread_mutex_destroy( &th->th.th_suspend_mx.m_mutex ); - if ( status != 0 && status != EBUSY ) { - KMP_SYSFAIL( "pthread_mutex_destroy", status ); - }; - --th->th.th_suspend_init_count; - KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count); - } -} - -/* This routine puts the calling thread to sleep after setting the - * sleep bit for the indicated flag variable to true. - */ -template -static inline void __kmp_suspend_template( int th_gtid, C *flag ) -{ - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend); - kmp_info_t *th = __kmp_threads[th_gtid]; - int status; - typename C::flag_t old_spin; - - KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, flag->get() ) ); - - __kmp_suspend_initialize_thread( th ); - - status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status ); - - KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n", - th_gtid, flag->get() ) ); - - /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread - gets called first? - */ - old_spin = flag->set_sleeping(); - - KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x, was %x\n", - th_gtid, flag->get(), *(flag->get()), old_spin ) ); - - if ( flag->done_check_val(old_spin) ) { - old_spin = flag->unset_sleeping(); - KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for spin(%p)\n", - th_gtid, flag->get()) ); - } else { - /* Encapsulate in a loop as the documentation states that this may - * "with low probability" return when the condition variable has - * not been signaled or broadcast - */ - int deactivated = FALSE; - TCW_PTR(th->th.th_sleep_loc, (void *)flag); - while ( flag->is_sleeping() ) { -#ifdef DEBUG_SUSPEND - char buffer[128]; - __kmp_suspend_count++; - __kmp_print_cond( buffer, &th->th.th_suspend_cv ); - __kmp_printf( "__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, buffer ); -#endif - // Mark the thread as no longer active (only in the first iteration of the loop). - if ( ! deactivated ) { - th->th.th_active = FALSE; - if ( th->th.th_active_in_pool ) { - th->th.th_active_in_pool = FALSE; - KMP_TEST_THEN_DEC32( - (kmp_int32 *) &__kmp_thread_pool_active_nth ); - KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 ); - } - deactivated = TRUE; - } - -#if USE_SUSPEND_TIMEOUT - struct timespec now; - struct timeval tval; - int msecs; - - status = gettimeofday( &tval, NULL ); - KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status ); - TIMEVAL_TO_TIMESPEC( &tval, &now ); - - msecs = (4*__kmp_dflt_blocktime) + 200; - now.tv_sec += msecs / 1000; - now.tv_nsec += (msecs % 1000)*1000; - - KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_timedwait\n", - th_gtid ) ); - status = pthread_cond_timedwait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex, & now ); -#else - KF_TRACE( 15, ( "__kmp_suspend_template: T#%d about to perform pthread_cond_wait\n", - th_gtid ) ); - status = pthread_cond_wait( &th->th.th_suspend_cv.c_cond, &th->th.th_suspend_mx.m_mutex ); -#endif - - if ( (status != 0) && (status != EINTR) && (status != ETIMEDOUT) ) { - KMP_SYSFAIL( "pthread_cond_wait", status ); - } -#ifdef KMP_DEBUG - if (status == ETIMEDOUT) { - if ( flag->is_sleeping() ) { - KF_TRACE( 100, ( "__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid ) ); - } else { - KF_TRACE( 2, ( "__kmp_suspend_template: T#%d timeout wakeup, sleep bit not set!\n", - th_gtid ) ); - } - } else if ( flag->is_sleeping() ) { - KF_TRACE( 100, ( "__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid ) ); - } -#endif - } // while - - // Mark the thread as active again (if it was previous marked as inactive) - if ( deactivated ) { - th->th.th_active = TRUE; - if ( TCR_4(th->th.th_in_pool) ) { - KMP_TEST_THEN_INC32( (kmp_int32 *) &__kmp_thread_pool_active_nth ); - th->th.th_active_in_pool = TRUE; - } - } - } - -#ifdef DEBUG_SUSPEND - { - char buffer[128]; - __kmp_print_cond( buffer, &th->th.th_suspend_cv); - __kmp_printf( "__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, buffer ); - } -#endif - - status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - - KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) ); -} - -void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) { - __kmp_suspend_template(th_gtid, flag); -} -void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) { - __kmp_suspend_template(th_gtid, flag); -} -void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) { - __kmp_suspend_template(th_gtid, flag); -} - - -/* This routine signals the thread specified by target_gtid to wake up - * after setting the sleep bit indicated by the flag argument to FALSE. - * The target thread must already have called __kmp_suspend_template() - */ -template -static inline void __kmp_resume_template( int target_gtid, C *flag ) -{ - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume); - kmp_info_t *th = __kmp_threads[target_gtid]; - int status; - -#ifdef KMP_DEBUG - int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1; -#endif - - KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) ); - KMP_DEBUG_ASSERT( gtid != target_gtid ); - - __kmp_suspend_initialize_thread( th ); - - status = pthread_mutex_lock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status ); - - if (!flag) { // coming from __kmp_null_resume_wrapper - flag = (C *)th->th.th_sleep_loc; - } - - // First, check if the flag is null or its type has changed. If so, someone else woke it up. - if (!flag || flag->get_type() != flag->get_ptr_type()) { // get_ptr_type simply shows what flag was cast to - KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p)\n", - gtid, target_gtid, NULL ) ); - status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - return; - } - else { // if multiple threads are sleeping, flag should be internally referring to a specific thread here - typename C::flag_t old_spin = flag->unset_sleeping(); - if ( ! flag->is_sleeping_val(old_spin) ) { - KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): " - "%u => %u\n", - gtid, target_gtid, flag->get(), old_spin, *flag->get() ) ); - status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - return; - } - KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p): " - "%u => %u\n", - gtid, target_gtid, flag->get(), old_spin, *flag->get() ) ); - } - TCW_PTR(th->th.th_sleep_loc, NULL); - - -#ifdef DEBUG_SUSPEND - { - char buffer[128]; - __kmp_print_cond( buffer, &th->th.th_suspend_cv ); - __kmp_printf( "__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, target_gtid, buffer ); - } -#endif - - status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond ); - KMP_CHECK_SYSFAIL( "pthread_cond_signal", status ); - status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n", - gtid, target_gtid ) ); -} - -void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) { - __kmp_resume_template(target_gtid, flag); -} -void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) { - __kmp_resume_template(target_gtid, flag); -} -void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) { - __kmp_resume_template(target_gtid, flag); -} - -#if KMP_USE_MONITOR -void -__kmp_resume_monitor() -{ - KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume); - int status; -#ifdef KMP_DEBUG - int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1; - KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", - gtid, KMP_GTID_MONITOR ) ); - KMP_DEBUG_ASSERT( gtid != KMP_GTID_MONITOR ); -#endif - status = pthread_mutex_lock( &__kmp_wait_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_lock", status ); -#ifdef DEBUG_SUSPEND - { - char buffer[128]; - __kmp_print_cond( buffer, &__kmp_wait_cv.c_cond ); - __kmp_printf( "__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, KMP_GTID_MONITOR, buffer ); - } -#endif - status = pthread_cond_signal( &__kmp_wait_cv.c_cond ); - KMP_CHECK_SYSFAIL( "pthread_cond_signal", status ); - status = pthread_mutex_unlock( &__kmp_wait_mx.m_mutex ); - KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status ); - KF_TRACE( 30, ( "__kmp_resume_monitor: T#%d exiting after signaling wake up for T#%d\n", - gtid, KMP_GTID_MONITOR ) ); -} -#endif // KMP_USE_MONITOR - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_yield( int cond ) -{ - if (cond -#if KMP_USE_MONITOR - && __kmp_yielding_on -#endif - ) { - sched_yield(); - } -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_gtid_set_specific( int gtid ) -{ - if( __kmp_init_gtid ) { - int status; - status = pthread_setspecific( __kmp_gtid_threadprivate_key, (void*)(intptr_t)(gtid+1) ); - KMP_CHECK_SYSFAIL( "pthread_setspecific", status ); - } else { - KA_TRACE( 50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n" ) ); - } -} - -int -__kmp_gtid_get_specific() -{ - int gtid; - if ( !__kmp_init_gtid ) { - KA_TRACE( 50, ("__kmp_gtid_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) ); - return KMP_GTID_SHUTDOWN; - } - gtid = (int)(size_t)pthread_getspecific( __kmp_gtid_threadprivate_key ); - if ( gtid == 0 ) { - gtid = KMP_GTID_DNE; - } - else { - gtid--; - } - KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n", - __kmp_gtid_threadprivate_key, gtid )); - return gtid; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -double -__kmp_read_cpu_time( void ) -{ - /*clock_t t;*/ - struct tms buffer; - - /*t =*/ times( & buffer ); - - return (buffer.tms_utime + buffer.tms_cutime) / (double) CLOCKS_PER_SEC; -} - -int -__kmp_read_system_info( struct kmp_sys_info *info ) -{ - int status; - struct rusage r_usage; - - memset( info, 0, sizeof( *info ) ); - - status = getrusage( RUSAGE_SELF, &r_usage); - KMP_CHECK_SYSFAIL_ERRNO( "getrusage", status ); - - info->maxrss = r_usage.ru_maxrss; /* the maximum resident set size utilized (in kilobytes) */ - info->minflt = r_usage.ru_minflt; /* the number of page faults serviced without any I/O */ - info->majflt = r_usage.ru_majflt; /* the number of page faults serviced that required I/O */ - info->nswap = r_usage.ru_nswap; /* the number of times a process was "swapped" out of memory */ - info->inblock = r_usage.ru_inblock; /* the number of times the file system had to perform input */ - info->oublock = r_usage.ru_oublock; /* the number of times the file system had to perform output */ - info->nvcsw = r_usage.ru_nvcsw; /* the number of times a context switch was voluntarily */ - info->nivcsw = r_usage.ru_nivcsw; /* the number of times a context switch was forced */ - - return (status != 0); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_read_system_time( double *delta ) -{ - double t_ns; - struct timeval tval; - struct timespec stop; - int status; - - status = gettimeofday( &tval, NULL ); - KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status ); - TIMEVAL_TO_TIMESPEC( &tval, &stop ); - t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start); - *delta = (t_ns * 1e-9); -} - -void -__kmp_clear_system_time( void ) -{ - struct timeval tval; - int status; - status = gettimeofday( &tval, NULL ); - KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status ); - TIMEVAL_TO_TIMESPEC( &tval, &__kmp_sys_timer_data.start ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#ifdef BUILD_TV - -void -__kmp_tv_threadprivate_store( kmp_info_t *th, void *global_addr, void *thread_addr ) -{ - struct tv_data *p; - - p = (struct tv_data *) __kmp_allocate( sizeof( *p ) ); - - p->u.tp.global_addr = global_addr; - p->u.tp.thread_addr = thread_addr; - - p->type = (void *) 1; - - p->next = th->th.th_local.tv_data; - th->th.th_local.tv_data = p; - - if ( p->next == 0 ) { - int rc = pthread_setspecific( __kmp_tv_key, p ); - KMP_CHECK_SYSFAIL( "pthread_setspecific", rc ); - } -} - -#endif /* BUILD_TV */ - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -static int -__kmp_get_xproc( void ) { - - int r = 0; - - #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD - - r = sysconf( _SC_NPROCESSORS_ONLN ); - - #elif KMP_OS_DARWIN - - // Bug C77011 High "OpenMP Threads and number of active cores". - - // Find the number of available CPUs. - kern_return_t rc; - host_basic_info_data_t info; - mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT; - rc = host_info( mach_host_self(), HOST_BASIC_INFO, (host_info_t) & info, & num ); - if ( rc == 0 && num == HOST_BASIC_INFO_COUNT ) { - // Cannot use KA_TRACE() here because this code works before trace support is - // initialized. - r = info.avail_cpus; - } else { - KMP_WARNING( CantGetNumAvailCPU ); - KMP_INFORM( AssumedNumCPU ); - }; // if - - #else - - #error "Unknown or unsupported OS." - - #endif - - return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */ - -} // __kmp_get_xproc - -int -__kmp_read_from_file( char const *path, char const *format, ... ) -{ - int result; - va_list args; - - va_start(args, format); - FILE *f = fopen(path, "rb"); - if ( f == NULL ) - return 0; - result = vfscanf(f, format, args); - fclose(f); - - return result; -} - -void -__kmp_runtime_initialize( void ) -{ - int status; - pthread_mutexattr_t mutex_attr; - pthread_condattr_t cond_attr; - - if ( __kmp_init_runtime ) { - return; - }; // if - - #if ( KMP_ARCH_X86 || KMP_ARCH_X86_64 ) - if ( ! __kmp_cpuinfo.initialized ) { - __kmp_query_cpuid( &__kmp_cpuinfo ); - }; // if - #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - __kmp_xproc = __kmp_get_xproc(); - - if ( sysconf( _SC_THREADS ) ) { - - /* Query the maximum number of threads */ - __kmp_sys_max_nth = sysconf( _SC_THREAD_THREADS_MAX ); - if ( __kmp_sys_max_nth == -1 ) { - /* Unlimited threads for NPTL */ - __kmp_sys_max_nth = INT_MAX; - } - else if ( __kmp_sys_max_nth <= 1 ) { - /* Can't tell, just use PTHREAD_THREADS_MAX */ - __kmp_sys_max_nth = KMP_MAX_NTH; - } - - /* Query the minimum stack size */ - __kmp_sys_min_stksize = sysconf( _SC_THREAD_STACK_MIN ); - if ( __kmp_sys_min_stksize <= 1 ) { - __kmp_sys_min_stksize = KMP_MIN_STKSIZE; - } - } - - /* Set up minimum number of threads to switch to TLS gtid */ - __kmp_tls_gtid_min = KMP_TLS_GTID_MIN; - - #ifdef BUILD_TV - { - int rc = pthread_key_create( & __kmp_tv_key, 0 ); - KMP_CHECK_SYSFAIL( "pthread_key_create", rc ); - } - #endif - - status = pthread_key_create( &__kmp_gtid_threadprivate_key, __kmp_internal_end_dest ); - KMP_CHECK_SYSFAIL( "pthread_key_create", status ); - status = pthread_mutexattr_init( & mutex_attr ); - KMP_CHECK_SYSFAIL( "pthread_mutexattr_init", status ); - status = pthread_mutex_init( & __kmp_wait_mx.m_mutex, & mutex_attr ); - KMP_CHECK_SYSFAIL( "pthread_mutex_init", status ); - status = pthread_condattr_init( & cond_attr ); - KMP_CHECK_SYSFAIL( "pthread_condattr_init", status ); - status = pthread_cond_init( & __kmp_wait_cv.c_cond, & cond_attr ); - KMP_CHECK_SYSFAIL( "pthread_cond_init", status ); -#if USE_ITT_BUILD - __kmp_itt_initialize(); -#endif /* USE_ITT_BUILD */ - - __kmp_init_runtime = TRUE; -} - -void -__kmp_runtime_destroy( void ) -{ - int status; - - if ( ! __kmp_init_runtime ) { - return; // Nothing to do. - }; - -#if USE_ITT_BUILD - __kmp_itt_destroy(); -#endif /* USE_ITT_BUILD */ - - status = pthread_key_delete( __kmp_gtid_threadprivate_key ); - KMP_CHECK_SYSFAIL( "pthread_key_delete", status ); - #ifdef BUILD_TV - status = pthread_key_delete( __kmp_tv_key ); - KMP_CHECK_SYSFAIL( "pthread_key_delete", status ); - #endif - - status = pthread_mutex_destroy( & __kmp_wait_mx.m_mutex ); - if ( status != 0 && status != EBUSY ) { - KMP_SYSFAIL( "pthread_mutex_destroy", status ); - } - status = pthread_cond_destroy( & __kmp_wait_cv.c_cond ); - if ( status != 0 && status != EBUSY ) { - KMP_SYSFAIL( "pthread_cond_destroy", status ); - } - #if KMP_AFFINITY_SUPPORTED - __kmp_affinity_uninitialize(); - #endif - - __kmp_init_runtime = FALSE; -} - - -/* Put the thread to sleep for a time period */ -/* NOTE: not currently used anywhere */ -void -__kmp_thread_sleep( int millis ) -{ - sleep( ( millis + 500 ) / 1000 ); -} - -/* Calculate the elapsed wall clock time for the user */ -void -__kmp_elapsed( double *t ) -{ - int status; -# ifdef FIX_SGI_CLOCK - struct timespec ts; - - status = clock_gettime( CLOCK_PROCESS_CPUTIME_ID, &ts ); - KMP_CHECK_SYSFAIL_ERRNO( "clock_gettime", status ); - *t = (double) ts.tv_nsec * (1.0 / (double) KMP_NSEC_PER_SEC) + - (double) ts.tv_sec; -# else - struct timeval tv; - - status = gettimeofday( & tv, NULL ); - KMP_CHECK_SYSFAIL_ERRNO( "gettimeofday", status ); - *t = (double) tv.tv_usec * (1.0 / (double) KMP_USEC_PER_SEC) + - (double) tv.tv_sec; -# endif -} - -/* Calculate the elapsed wall clock tick for the user */ -void -__kmp_elapsed_tick( double *t ) -{ - *t = 1 / (double) CLOCKS_PER_SEC; -} - -/* Return the current time stamp in nsec */ -kmp_uint64 -__kmp_now_nsec() -{ - struct timeval t; - gettimeofday(&t, NULL); - return KMP_NSEC_PER_SEC*t.tv_sec + 1000*t.tv_usec; -} - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 -/* Measure clock tick per nanosecond */ -void -__kmp_initialize_system_tick() -{ - kmp_uint64 delay = 100000; // 50~100 usec on most machines. - kmp_uint64 nsec = __kmp_now_nsec(); - kmp_uint64 goal = __kmp_hardware_timestamp() + delay; - kmp_uint64 now; - while ((now = __kmp_hardware_timestamp()) < goal); - __kmp_ticks_per_nsec = 1.0 * (delay + (now - goal)) / (__kmp_now_nsec() - nsec); -} -#endif - -/* - Determine whether the given address is mapped into the current address space. -*/ - -int -__kmp_is_address_mapped( void * addr ) { - - int found = 0; - int rc; - - #if KMP_OS_LINUX || KMP_OS_FREEBSD - - /* - On Linux* OS, read the /proc//maps pseudo-file to get all the address ranges mapped - into the address space. - */ - - char * name = __kmp_str_format( "/proc/%d/maps", getpid() ); - FILE * file = NULL; - - file = fopen( name, "r" ); - KMP_ASSERT( file != NULL ); - - for ( ; ; ) { - - void * beginning = NULL; - void * ending = NULL; - char perms[ 5 ]; - - rc = fscanf( file, "%p-%p %4s %*[^\n]\n", & beginning, & ending, perms ); - if ( rc == EOF ) { - break; - }; // if - KMP_ASSERT( rc == 3 && KMP_STRLEN( perms ) == 4 ); // Make sure all fields are read. - - // Ending address is not included in the region, but beginning is. - if ( ( addr >= beginning ) && ( addr < ending ) ) { - perms[ 2 ] = 0; // 3th and 4th character does not matter. - if ( strcmp( perms, "rw" ) == 0 ) { - // Memory we are looking for should be readable and writable. - found = 1; - }; // if - break; - }; // if - - }; // forever - - // Free resources. - fclose( file ); - KMP_INTERNAL_FREE( name ); - - #elif KMP_OS_DARWIN - - /* - On OS X*, /proc pseudo filesystem is not available. Try to read memory using vm - interface. - */ - - int buffer; - vm_size_t count; - rc = - vm_read_overwrite( - mach_task_self(), // Task to read memory of. - (vm_address_t)( addr ), // Address to read from. - 1, // Number of bytes to be read. - (vm_address_t)( & buffer ), // Address of buffer to save read bytes in. - & count // Address of var to save number of read bytes in. - ); - if ( rc == 0 ) { - // Memory successfully read. - found = 1; - }; // if - - #elif KMP_OS_FREEBSD || KMP_OS_NETBSD - - // FIXME(FreeBSD, NetBSD): Implement this - found = 1; - - #else - - #error "Unknown or unsupported OS" - - #endif - - return found; - -} // __kmp_is_address_mapped - -#ifdef USE_LOAD_BALANCE - - -# if KMP_OS_DARWIN - -// The function returns the rounded value of the system load average -// during given time interval which depends on the value of -// __kmp_load_balance_interval variable (default is 60 sec, other values -// may be 300 sec or 900 sec). -// It returns -1 in case of error. -int -__kmp_get_load_balance( int max ) -{ - double averages[3]; - int ret_avg = 0; - - int res = getloadavg( averages, 3 ); - - //Check __kmp_load_balance_interval to determine which of averages to use. - // getloadavg() may return the number of samples less than requested that is - // less than 3. - if ( __kmp_load_balance_interval < 180 && ( res >= 1 ) ) { - ret_avg = averages[0];// 1 min - } else if ( ( __kmp_load_balance_interval >= 180 - && __kmp_load_balance_interval < 600 ) && ( res >= 2 ) ) { - ret_avg = averages[1];// 5 min - } else if ( ( __kmp_load_balance_interval >= 600 ) && ( res == 3 ) ) { - ret_avg = averages[2];// 15 min - } else {// Error occurred - return -1; - } - - return ret_avg; -} - -# else // Linux* OS - -// The fuction returns number of running (not sleeping) threads, or -1 in case of error. -// Error could be reported if Linux* OS kernel too old (without "/proc" support). -// Counting running threads stops if max running threads encountered. -int -__kmp_get_load_balance( int max ) -{ - static int permanent_error = 0; - - static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */ - static double glb_call_time = 0; /* Thread balance algorithm call time */ - - int running_threads = 0; // Number of running threads in the system. - - DIR * proc_dir = NULL; // Handle of "/proc/" directory. - struct dirent * proc_entry = NULL; - - kmp_str_buf_t task_path; // "/proc//task//" path. - DIR * task_dir = NULL; // Handle of "/proc//task//" directory. - struct dirent * task_entry = NULL; - int task_path_fixed_len; - - kmp_str_buf_t stat_path; // "/proc//task//stat" path. - int stat_file = -1; - int stat_path_fixed_len; - - int total_processes = 0; // Total number of processes in system. - int total_threads = 0; // Total number of threads in system. - - double call_time = 0.0; - - __kmp_str_buf_init( & task_path ); - __kmp_str_buf_init( & stat_path ); - - __kmp_elapsed( & call_time ); - - if ( glb_call_time && - ( call_time - glb_call_time < __kmp_load_balance_interval ) ) { - running_threads = glb_running_threads; - goto finish; - } - - glb_call_time = call_time; - - // Do not spend time on scanning "/proc/" if we have a permanent error. - if ( permanent_error ) { - running_threads = -1; - goto finish; - }; // if - - if ( max <= 0 ) { - max = INT_MAX; - }; // if - - // Open "/proc/" directory. - proc_dir = opendir( "/proc" ); - if ( proc_dir == NULL ) { - // Cannot open "/prroc/". Probably the kernel does not support it. Return an error now and - // in subsequent calls. - running_threads = -1; - permanent_error = 1; - goto finish; - }; // if - - // Initialize fixed part of task_path. This part will not change. - __kmp_str_buf_cat( & task_path, "/proc/", 6 ); - task_path_fixed_len = task_path.used; // Remember number of used characters. - - proc_entry = readdir( proc_dir ); - while ( proc_entry != NULL ) { - // Proc entry is a directory and name starts with a digit. Assume it is a process' - // directory. - if ( proc_entry->d_type == DT_DIR && isdigit( proc_entry->d_name[ 0 ] ) ) { - - ++ total_processes; - // Make sure init process is the very first in "/proc", so we can replace - // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 1. - // We are going to check that total_processes == 1 => d_name == "1" is true (where - // "=>" is implication). Since C++ does not have => operator, let us replace it with its - // equivalent: a => b == ! a || b. - KMP_DEBUG_ASSERT( total_processes != 1 || strcmp( proc_entry->d_name, "1" ) == 0 ); - - // Construct task_path. - task_path.used = task_path_fixed_len; // Reset task_path to "/proc/". - __kmp_str_buf_cat( & task_path, proc_entry->d_name, KMP_STRLEN( proc_entry->d_name ) ); - __kmp_str_buf_cat( & task_path, "/task", 5 ); - - task_dir = opendir( task_path.str ); - if ( task_dir == NULL ) { - // Process can finish between reading "/proc/" directory entry and opening process' - // "task/" directory. So, in general case we should not complain, but have to skip - // this process and read the next one. - // But on systems with no "task/" support we will spend lot of time to scan "/proc/" - // tree again and again without any benefit. "init" process (its pid is 1) should - // exist always, so, if we cannot open "/proc/1/task/" directory, it means "task/" - // is not supported by kernel. Report an error now and in the future. - if ( strcmp( proc_entry->d_name, "1" ) == 0 ) { - running_threads = -1; - permanent_error = 1; - goto finish; - }; // if - } else { - // Construct fixed part of stat file path. - __kmp_str_buf_clear( & stat_path ); - __kmp_str_buf_cat( & stat_path, task_path.str, task_path.used ); - __kmp_str_buf_cat( & stat_path, "/", 1 ); - stat_path_fixed_len = stat_path.used; - - task_entry = readdir( task_dir ); - while ( task_entry != NULL ) { - // It is a directory and name starts with a digit. - if ( proc_entry->d_type == DT_DIR && isdigit( task_entry->d_name[ 0 ] ) ) { - - ++ total_threads; - - // Consruct complete stat file path. Easiest way would be: - // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, task_entry->d_name ); - // but seriae of __kmp_str_buf_cat works a bit faster. - stat_path.used = stat_path_fixed_len; // Reset stat path to its fixed part. - __kmp_str_buf_cat( & stat_path, task_entry->d_name, KMP_STRLEN( task_entry->d_name ) ); - __kmp_str_buf_cat( & stat_path, "/stat", 5 ); - - // Note: Low-level API (open/read/close) is used. High-level API - // (fopen/fclose) works ~ 30 % slower. - stat_file = open( stat_path.str, O_RDONLY ); - if ( stat_file == -1 ) { - // We cannot report an error because task (thread) can terminate just - // before reading this file. - } else { - /* - Content of "stat" file looks like: - - 24285 (program) S ... - - It is a single line (if program name does not include fanny - symbols). First number is a thread id, then name of executable file - name in paretheses, then state of the thread. We need just thread - state. - - Good news: Length of program name is 15 characters max. Longer - names are truncated. - - Thus, we need rather short buffer: 15 chars for program name + - 2 parenthesis, + 3 spaces + ~7 digits of pid = 37. - - Bad news: Program name may contain special symbols like space, - closing parenthesis, or even new line. This makes parsing "stat" - file not 100 % reliable. In case of fanny program names parsing - may fail (report incorrect thread state). - - Parsing "status" file looks more promissing (due to different - file structure and escaping special symbols) but reading and - parsing of "status" file works slower. - - -- ln - */ - char buffer[ 65 ]; - int len; - len = read( stat_file, buffer, sizeof( buffer ) - 1 ); - if ( len >= 0 ) { - buffer[ len ] = 0; - // Using scanf: - // sscanf( buffer, "%*d (%*s) %c ", & state ); - // looks very nice, but searching for a closing parenthesis works a - // bit faster. - char * close_parent = strstr( buffer, ") " ); - if ( close_parent != NULL ) { - char state = * ( close_parent + 2 ); - if ( state == 'R' ) { - ++ running_threads; - if ( running_threads >= max ) { - goto finish; - }; // if - }; // if - }; // if - }; // if - close( stat_file ); - stat_file = -1; - }; // if - }; // if - task_entry = readdir( task_dir ); - }; // while - closedir( task_dir ); - task_dir = NULL; - }; // if - }; // if - proc_entry = readdir( proc_dir ); - }; // while - - // - // There _might_ be a timing hole where the thread executing this - // code get skipped in the load balance, and running_threads is 0. - // Assert in the debug builds only!!! - // - KMP_DEBUG_ASSERT( running_threads > 0 ); - if ( running_threads <= 0 ) { - running_threads = 1; - } - - finish: // Clean up and exit. - if ( proc_dir != NULL ) { - closedir( proc_dir ); - }; // if - __kmp_str_buf_free( & task_path ); - if ( task_dir != NULL ) { - closedir( task_dir ); - }; // if - __kmp_str_buf_free( & stat_path ); - if ( stat_file != -1 ) { - close( stat_file ); - }; // if - - glb_running_threads = running_threads; - - return running_threads; - -} // __kmp_get_load_balance - -# endif // KMP_OS_DARWIN - -#endif // USE_LOAD_BALANCE - -#if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || (KMP_OS_LINUX && KMP_ARCH_AARCH64) || KMP_ARCH_PPC64) - -// we really only need the case with 1 argument, because CLANG always build -// a struct of pointers to shared variables referenced in the outlined function -int -__kmp_invoke_microtask( microtask_t pkfn, - int gtid, int tid, - int argc, void *p_argv[] -#if OMPT_SUPPORT - , void **exit_frame_ptr -#endif -) -{ -#if OMPT_SUPPORT - *exit_frame_ptr = __builtin_frame_address(0); -#endif - - switch (argc) { - default: - fprintf(stderr, "Too many args to microtask: %d!\n", argc); - fflush(stderr); - exit(-1); - case 0: - (*pkfn)(>id, &tid); - break; - case 1: - (*pkfn)(>id, &tid, p_argv[0]); - break; - case 2: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1]); - break; - case 3: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]); - break; - case 4: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]); - break; - case 5: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]); - break; - case 6: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5]); - break; - case 7: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6]); - break; - case 8: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7]); - break; - case 9: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8]); - break; - case 10: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]); - break; - case 11: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]); - break; - case 12: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], - p_argv[11]); - break; - case 13: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], - p_argv[11], p_argv[12]); - break; - case 14: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], - p_argv[11], p_argv[12], p_argv[13]); - break; - case 15: - (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], - p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], - p_argv[11], p_argv[12], p_argv[13], p_argv[14]); - break; - } - -#if OMPT_SUPPORT - *exit_frame_ptr = 0; -#endif - - return 1; -} - -#endif - -// end of file // - Index: runtime/src/z_Linux_util.cpp =================================================================== --- runtime/src/z_Linux_util.cpp +++ runtime/src/z_Linux_util.cpp @@ -1,5 +1,5 @@ /* - * z_Linux_util.c -- platform specific routines. + * z_Linux_util.cpp -- platform specific routines. */ @@ -1503,7 +1503,7 @@ Suggestion is to make the library initialization lazier, similar to what done for __kmpc_begin(). */ // TODO: synchronize all static initializations with regular library - // startup; look at kmp_global.c and etc. + // startup; look at kmp_global.cpp and etc. //__kmp_internal_begin (); } Index: runtime/src/z_Windows_NT-586_util.c =================================================================== --- runtime/src/z_Windows_NT-586_util.c +++ runtime/src/z_Windows_NT-586_util.c @@ -1,163 +0,0 @@ -/* - * z_Windows_NT-586_util.c -- platform specific routines. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" - -#if (KMP_ARCH_X86 || KMP_ARCH_X86_64) -/* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to - * use compare_and_store for these routines - */ - -kmp_int8 -__kmp_test_then_or8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int8 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value | d; - - while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value | d; - } - return old_value; -} - -kmp_int8 -__kmp_test_then_and8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int8 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value & d; - - while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value & d; - } - return old_value; -} - -kmp_int32 -__kmp_test_then_or32( volatile kmp_int32 *p, kmp_int32 d ) -{ - kmp_int32 old_value, new_value; - - old_value = TCR_4( *p ); - new_value = old_value | d; - - while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_4( *p ); - new_value = old_value | d; - } - return old_value; -} - -kmp_int32 -__kmp_test_then_and32( volatile kmp_int32 *p, kmp_int32 d ) -{ - kmp_int32 old_value, new_value; - - old_value = TCR_4( *p ); - new_value = old_value & d; - - while ( ! __kmp_compare_and_store32 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_4( *p ); - new_value = old_value & d; - } - return old_value; -} - -kmp_int8 -__kmp_test_then_add8( volatile kmp_int8 *p, kmp_int8 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_1( *p ); - new_value = old_value + d; - while ( ! __kmp_compare_and_store8 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_1( *p ); - new_value = old_value + d; - } - return old_value; -} - -#if KMP_ARCH_X86 -kmp_int64 -__kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value + d; - while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value + d; - } - return old_value; -} -#endif /* KMP_ARCH_X86 */ - -kmp_int64 -__kmp_test_then_or64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value | d; - while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value | d; - } - - return old_value; -} - -kmp_int64 -__kmp_test_then_and64( volatile kmp_int64 *p, kmp_int64 d ) -{ - kmp_int64 old_value, new_value; - - old_value = TCR_8( *p ); - new_value = old_value & d; - while ( ! __kmp_compare_and_store64 ( p, old_value, new_value ) ) - { - KMP_CPU_PAUSE(); - old_value = TCR_8( *p ); - new_value = old_value & d; - } - - return old_value; -} - -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - Index: runtime/src/z_Windows_NT-586_util.cpp =================================================================== --- runtime/src/z_Windows_NT-586_util.cpp +++ runtime/src/z_Windows_NT-586_util.cpp @@ -1,5 +1,5 @@ /* - * z_Windows_NT-586_util.c -- platform specific routines. + * z_Windows_NT-586_util.cpp -- platform specific routines. */ Index: runtime/src/z_Windows_NT_util.c =================================================================== --- runtime/src/z_Windows_NT_util.c +++ runtime/src/z_Windows_NT_util.c @@ -1,1772 +0,0 @@ -/* - * z_Windows_NT_util.c -- platform specific routines. - */ - - -//===----------------------------------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is dual licensed under the MIT and the University of Illinois Open -// Source Licenses. See LICENSE.txt for details. -// -//===----------------------------------------------------------------------===// - - -#include "kmp.h" -#include "kmp_itt.h" -#include "kmp_i18n.h" -#include "kmp_io.h" -#include "kmp_wait_release.h" -#include "kmp_affinity.h" - -/* This code is related to NtQuerySystemInformation() function. This function - is used in the Load balance algorithm for OMP_DYNAMIC=true to find the - number of running threads in the system. */ - -#include -#include // UNICODE_STRING - -enum SYSTEM_INFORMATION_CLASS { - SystemProcessInformation = 5 -}; // SYSTEM_INFORMATION_CLASS - -struct CLIENT_ID { - HANDLE UniqueProcess; - HANDLE UniqueThread; -}; // struct CLIENT_ID - -enum THREAD_STATE { - StateInitialized, - StateReady, - StateRunning, - StateStandby, - StateTerminated, - StateWait, - StateTransition, - StateUnknown -}; // enum THREAD_STATE - -struct VM_COUNTERS { - SIZE_T PeakVirtualSize; - SIZE_T VirtualSize; - ULONG PageFaultCount; - SIZE_T PeakWorkingSetSize; - SIZE_T WorkingSetSize; - SIZE_T QuotaPeakPagedPoolUsage; - SIZE_T QuotaPagedPoolUsage; - SIZE_T QuotaPeakNonPagedPoolUsage; - SIZE_T QuotaNonPagedPoolUsage; - SIZE_T PagefileUsage; - SIZE_T PeakPagefileUsage; - SIZE_T PrivatePageCount; -}; // struct VM_COUNTERS - -struct SYSTEM_THREAD { - LARGE_INTEGER KernelTime; - LARGE_INTEGER UserTime; - LARGE_INTEGER CreateTime; - ULONG WaitTime; - LPVOID StartAddress; - CLIENT_ID ClientId; - DWORD Priority; - LONG BasePriority; - ULONG ContextSwitchCount; - THREAD_STATE State; - ULONG WaitReason; -}; // SYSTEM_THREAD - -KMP_BUILD_ASSERT( offsetof( SYSTEM_THREAD, KernelTime ) == 0 ); -#if KMP_ARCH_X86 - KMP_BUILD_ASSERT( offsetof( SYSTEM_THREAD, StartAddress ) == 28 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_THREAD, State ) == 52 ); -#else - KMP_BUILD_ASSERT( offsetof( SYSTEM_THREAD, StartAddress ) == 32 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_THREAD, State ) == 68 ); -#endif - -struct SYSTEM_PROCESS_INFORMATION { - ULONG NextEntryOffset; - ULONG NumberOfThreads; - LARGE_INTEGER Reserved[ 3 ]; - LARGE_INTEGER CreateTime; - LARGE_INTEGER UserTime; - LARGE_INTEGER KernelTime; - UNICODE_STRING ImageName; - DWORD BasePriority; - HANDLE ProcessId; - HANDLE ParentProcessId; - ULONG HandleCount; - ULONG Reserved2[ 2 ]; - VM_COUNTERS VMCounters; - IO_COUNTERS IOCounters; - SYSTEM_THREAD Threads[ 1 ]; -}; // SYSTEM_PROCESS_INFORMATION -typedef SYSTEM_PROCESS_INFORMATION * PSYSTEM_PROCESS_INFORMATION; - -KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, NextEntryOffset ) == 0 ); -KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, CreateTime ) == 32 ); -KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, ImageName ) == 56 ); -#if KMP_ARCH_X86 - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, ProcessId ) == 68 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, HandleCount ) == 76 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, VMCounters ) == 88 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, IOCounters ) == 136 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, Threads ) == 184 ); -#else - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, ProcessId ) == 80 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, HandleCount ) == 96 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, VMCounters ) == 112 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, IOCounters ) == 208 ); - KMP_BUILD_ASSERT( offsetof( SYSTEM_PROCESS_INFORMATION, Threads ) == 256 ); -#endif - -typedef NTSTATUS (NTAPI *NtQuerySystemInformation_t)( SYSTEM_INFORMATION_CLASS, PVOID, ULONG, PULONG ); -NtQuerySystemInformation_t NtQuerySystemInformation = NULL; - -HMODULE ntdll = NULL; - -/* End of NtQuerySystemInformation()-related code */ - -static HMODULE kernel32 = NULL; - -/* ----------------------------------------------------------------------------------- */ -/* ----------------------------------------------------------------------------------- */ - -#if KMP_HANDLE_SIGNALS - typedef void (* sig_func_t )( int ); - static sig_func_t __kmp_sighldrs[ NSIG ]; - static int __kmp_siginstalled[ NSIG ]; -#endif - -#if KMP_USE_MONITOR -static HANDLE __kmp_monitor_ev; -#endif -static kmp_int64 __kmp_win32_time; -double __kmp_win32_tick; - -int __kmp_init_runtime = FALSE; -CRITICAL_SECTION __kmp_win32_section; - -void -__kmp_win32_mutex_init( kmp_win32_mutex_t *mx ) -{ - InitializeCriticalSection( & mx->cs ); -#if USE_ITT_BUILD - __kmp_itt_system_object_created( & mx->cs, "Critical Section" ); -#endif /* USE_ITT_BUILD */ -} - -void -__kmp_win32_mutex_destroy( kmp_win32_mutex_t *mx ) -{ - DeleteCriticalSection( & mx->cs ); -} - -void -__kmp_win32_mutex_lock( kmp_win32_mutex_t *mx ) -{ - EnterCriticalSection( & mx->cs ); -} - -void -__kmp_win32_mutex_unlock( kmp_win32_mutex_t *mx ) -{ - LeaveCriticalSection( & mx->cs ); -} - -void -__kmp_win32_cond_init( kmp_win32_cond_t *cv ) -{ - cv->waiters_count_ = 0; - cv->wait_generation_count_ = 0; - cv->release_count_ = 0; - - /* Initialize the critical section */ - __kmp_win32_mutex_init( & cv->waiters_count_lock_ ); - - /* Create a manual-reset event. */ - cv->event_ = CreateEvent( NULL, // no security - TRUE, // manual-reset - FALSE, // non-signaled initially - NULL ); // unnamed -#if USE_ITT_BUILD - __kmp_itt_system_object_created( cv->event_, "Event" ); -#endif /* USE_ITT_BUILD */ -} - -void -__kmp_win32_cond_destroy( kmp_win32_cond_t *cv ) -{ - __kmp_win32_mutex_destroy( & cv->waiters_count_lock_ ); - __kmp_free_handle( cv->event_ ); - memset( cv, '\0', sizeof( *cv ) ); -} - -/* TODO associate cv with a team instead of a thread so as to optimize - * the case where we wake up a whole team */ - -void -__kmp_win32_cond_wait( kmp_win32_cond_t *cv, kmp_win32_mutex_t *mx, kmp_info_t *th, int need_decrease_load ) -{ - int my_generation; - int last_waiter; - - /* Avoid race conditions */ - __kmp_win32_mutex_lock( &cv->waiters_count_lock_ ); - - /* Increment count of waiters */ - cv->waiters_count_++; - - /* Store current generation in our activation record. */ - my_generation = cv->wait_generation_count_; - - __kmp_win32_mutex_unlock( &cv->waiters_count_lock_ ); - __kmp_win32_mutex_unlock( mx ); - - for (;;) { - int wait_done; - - /* Wait until the event is signaled */ - WaitForSingleObject( cv->event_, INFINITE ); - - __kmp_win32_mutex_lock( &cv->waiters_count_lock_ ); - - /* Exit the loop when the event_> is signaled and - * there are still waiting threads from this - * that haven't been released from this wait yet. */ - wait_done = ( cv->release_count_ > 0 ) && - ( cv->wait_generation_count_ != my_generation ); - - __kmp_win32_mutex_unlock( &cv->waiters_count_lock_); - - /* there used to be a semicolon after the if statement, - * it looked like a bug, so i removed it */ - if( wait_done ) - break; - } - - __kmp_win32_mutex_lock( mx ); - __kmp_win32_mutex_lock( &cv->waiters_count_lock_ ); - - cv->waiters_count_--; - cv->release_count_--; - - last_waiter = ( cv->release_count_ == 0 ); - - __kmp_win32_mutex_unlock( &cv->waiters_count_lock_ ); - - if( last_waiter ) { - /* We're the last waiter to be notified, so reset the manual event. */ - ResetEvent( cv->event_ ); - } -} - -void -__kmp_win32_cond_broadcast( kmp_win32_cond_t *cv ) -{ - __kmp_win32_mutex_lock( &cv->waiters_count_lock_ ); - - if( cv->waiters_count_ > 0 ) { - SetEvent( cv->event_ ); - /* Release all the threads in this generation. */ - - cv->release_count_ = cv->waiters_count_; - - /* Start a new generation. */ - cv->wait_generation_count_++; - } - - __kmp_win32_mutex_unlock( &cv->waiters_count_lock_ ); -} - -void -__kmp_win32_cond_signal( kmp_win32_cond_t *cv ) -{ - __kmp_win32_cond_broadcast( cv ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_enable( int new_state ) -{ - if (__kmp_init_runtime) - LeaveCriticalSection( & __kmp_win32_section ); -} - -void -__kmp_disable( int *old_state ) -{ - *old_state = 0; - - if (__kmp_init_runtime) - EnterCriticalSection( & __kmp_win32_section ); -} - -void -__kmp_suspend_initialize( void ) -{ - /* do nothing */ -} - -static void -__kmp_suspend_initialize_thread( kmp_info_t *th ) -{ - if ( ! TCR_4( th->th.th_suspend_init ) ) { - /* this means we haven't initialized the suspension pthread objects for this thread - in this instance of the process */ - __kmp_win32_cond_init( &th->th.th_suspend_cv ); - __kmp_win32_mutex_init( &th->th.th_suspend_mx ); - TCW_4( th->th.th_suspend_init, TRUE ); - } -} - -void -__kmp_suspend_uninitialize_thread( kmp_info_t *th ) -{ - if ( TCR_4( th->th.th_suspend_init ) ) { - /* this means we have initialize the suspension pthread objects for this thread - in this instance of the process */ - __kmp_win32_cond_destroy( & th->th.th_suspend_cv ); - __kmp_win32_mutex_destroy( & th->th.th_suspend_mx ); - TCW_4( th->th.th_suspend_init, FALSE ); - } -} - -/* This routine puts the calling thread to sleep after setting the - * sleep bit for the indicated flag variable to true. - */ -template -static inline void __kmp_suspend_template( int th_gtid, C *flag ) -{ - kmp_info_t *th = __kmp_threads[th_gtid]; - int status; - typename C::flag_t old_spin; - - KF_TRACE( 30, ("__kmp_suspend_template: T#%d enter for flag's loc(%p)\n", th_gtid, flag->get() ) ); - - __kmp_suspend_initialize_thread( th ); - __kmp_win32_mutex_lock( &th->th.th_suspend_mx ); - - KF_TRACE( 10, ( "__kmp_suspend_template: T#%d setting sleep bit for flag's loc(%p)\n", - th_gtid, flag->get() ) ); - - /* TODO: shouldn't this use release semantics to ensure that __kmp_suspend_initialize_thread - gets called first? - */ - old_spin = flag->set_sleeping(); - - KF_TRACE( 5, ( "__kmp_suspend_template: T#%d set sleep bit for flag's loc(%p)==%d\n", - th_gtid, flag->get(), *(flag->get()) ) ); - - if ( flag->done_check_val(old_spin) ) { - old_spin = flag->unset_sleeping(); - KF_TRACE( 5, ( "__kmp_suspend_template: T#%d false alarm, reset sleep bit for flag's loc(%p)\n", - th_gtid, flag->get()) ); - } else { -#ifdef DEBUG_SUSPEND - __kmp_suspend_count++; -#endif - /* Encapsulate in a loop as the documentation states that this may - * "with low probability" return when the condition variable has - * not been signaled or broadcast - */ - int deactivated = FALSE; - TCW_PTR(th->th.th_sleep_loc, (void *)flag); - while ( flag->is_sleeping() ) { - KF_TRACE( 15, ("__kmp_suspend_template: T#%d about to perform kmp_win32_cond_wait()\n", - th_gtid ) ); - // Mark the thread as no longer active (only in the first iteration of the loop). - if ( ! deactivated ) { - th->th.th_active = FALSE; - if ( th->th.th_active_in_pool ) { - th->th.th_active_in_pool = FALSE; - KMP_TEST_THEN_DEC32( - (kmp_int32 *) &__kmp_thread_pool_active_nth ); - KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 ); - } - deactivated = TRUE; - - __kmp_win32_cond_wait( &th->th.th_suspend_cv, &th->th.th_suspend_mx, 0, 0 ); - } - else { - __kmp_win32_cond_wait( &th->th.th_suspend_cv, &th->th.th_suspend_mx, 0, 0 ); - } - -#ifdef KMP_DEBUG - if( flag->is_sleeping() ) { - KF_TRACE( 100, ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid )); - } -#endif /* KMP_DEBUG */ - - } // while - - // Mark the thread as active again (if it was previous marked as inactive) - if ( deactivated ) { - th->th.th_active = TRUE; - if ( TCR_4(th->th.th_in_pool) ) { - KMP_TEST_THEN_INC32( - (kmp_int32 *) &__kmp_thread_pool_active_nth ); - th->th.th_active_in_pool = TRUE; - } - } - } - - __kmp_win32_mutex_unlock( &th->th.th_suspend_mx ); - - KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) ); -} - -void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) { - __kmp_suspend_template(th_gtid, flag); -} -void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) { - __kmp_suspend_template(th_gtid, flag); -} -void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) { - __kmp_suspend_template(th_gtid, flag); -} - - -/* This routine signals the thread specified by target_gtid to wake up - * after setting the sleep bit indicated by the flag argument to FALSE - */ -template -static inline void __kmp_resume_template( int target_gtid, C *flag ) -{ - kmp_info_t *th = __kmp_threads[target_gtid]; - int status; - -#ifdef KMP_DEBUG - int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1; -#endif - - KF_TRACE( 30, ( "__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", gtid, target_gtid ) ); - - __kmp_suspend_initialize_thread( th ); - __kmp_win32_mutex_lock( &th->th.th_suspend_mx ); - - if (!flag) { // coming from __kmp_null_resume_wrapper - flag = (C *)th->th.th_sleep_loc; - } - - // First, check if the flag is null or its type has changed. If so, someone else woke it up. - if (!flag || flag->get_type() != flag->get_ptr_type()) { // get_ptr_type simply shows what flag was cast to - KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag's loc(%p)\n", - gtid, target_gtid, NULL ) ); - __kmp_win32_mutex_unlock( &th->th.th_suspend_mx ); - return; - } - else { - typename C::flag_t old_spin = flag->unset_sleeping(); - if ( !flag->is_sleeping_val(old_spin) ) { - KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag's loc(%p): " - "%u => %u\n", - gtid, target_gtid, flag->get(), old_spin, *(flag->get()) ) ); - __kmp_win32_mutex_unlock( &th->th.th_suspend_mx ); - return; - } - } - TCW_PTR(th->th.th_sleep_loc, NULL); - - KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p)\n", - gtid, target_gtid, flag->get() ) ); - - __kmp_win32_cond_signal( &th->th.th_suspend_cv ); - __kmp_win32_mutex_unlock( &th->th.th_suspend_mx ); - - KF_TRACE( 30, ( "__kmp_resume_template: T#%d exiting after signaling wake up for T#%d\n", - gtid, target_gtid ) ); -} - -void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) { - __kmp_resume_template(target_gtid, flag); -} -void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) { - __kmp_resume_template(target_gtid, flag); -} -void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) { - __kmp_resume_template(target_gtid, flag); -} - - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_yield( int cond ) -{ - if (cond) - Sleep(0); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_gtid_set_specific( int gtid ) -{ - if( __kmp_init_gtid ) { - KA_TRACE( 50, ("__kmp_gtid_set_specific: T#%d key:%d\n", - gtid, __kmp_gtid_threadprivate_key )); - if( ! TlsSetValue( __kmp_gtid_threadprivate_key, (LPVOID)(gtid+1)) ) - KMP_FATAL( TLSSetValueFailed ); - } else { - KA_TRACE( 50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n" ) ); - } -} - -int -__kmp_gtid_get_specific() -{ - int gtid; - if( !__kmp_init_gtid ) { - KA_TRACE( 50, ("__kmp_gtid_get_specific: runtime shutdown, returning KMP_GTID_SHUTDOWN\n" ) ); - return KMP_GTID_SHUTDOWN; - } - gtid = (int)(kmp_intptr_t)TlsGetValue( __kmp_gtid_threadprivate_key ); - if ( gtid == 0 ) { - gtid = KMP_GTID_DNE; - } - else { - gtid--; - } - KA_TRACE( 50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n", - __kmp_gtid_threadprivate_key, gtid )); - return gtid; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_affinity_bind_thread( int proc ) -{ - if (__kmp_num_proc_groups > 1) { - // - // Form the GROUP_AFFINITY struct directly, rather than filling - // out a bit vector and calling __kmp_set_system_affinity(). - // - GROUP_AFFINITY ga; - KMP_DEBUG_ASSERT((proc >= 0) && (proc < (__kmp_num_proc_groups - * CHAR_BIT * sizeof(DWORD_PTR)))); - ga.Group = proc / (CHAR_BIT * sizeof(DWORD_PTR)); - ga.Mask = (unsigned long long)1 << (proc % (CHAR_BIT * sizeof(DWORD_PTR))); - ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0; - - KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL); - if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) { - DWORD error = GetLastError(); - if (__kmp_affinity_verbose) { // AC: continue silently if not verbose - kmp_msg_t err_code = KMP_ERR( error ); - __kmp_msg( - kmp_ms_warning, - KMP_MSG( CantSetThreadAffMask ), - err_code, - __kmp_msg_null - ); - if (__kmp_generate_warnings == kmp_warnings_off) { - __kmp_str_free(&err_code.str); - } - } - } - } else { - kmp_affin_mask_t *mask; - KMP_CPU_ALLOC_ON_STACK(mask); - KMP_CPU_ZERO(mask); - KMP_CPU_SET(proc, mask); - __kmp_set_system_affinity(mask, TRUE); - KMP_CPU_FREE_FROM_STACK(mask); - } -} - -void -__kmp_affinity_determine_capable( const char *env_var ) -{ - // - // All versions of Windows* OS (since Win '95) support SetThreadAffinityMask(). - // - -#if KMP_GROUP_AFFINITY - KMP_AFFINITY_ENABLE(__kmp_num_proc_groups*sizeof(DWORD_PTR)); -#else - KMP_AFFINITY_ENABLE(sizeof(DWORD_PTR)); -#endif - - KA_TRACE( 10, ( - "__kmp_affinity_determine_capable: " - "Windows* OS affinity interface functional (mask size = %" KMP_SIZE_T_SPEC ").\n", - __kmp_affin_mask_size - ) ); -} - -double -__kmp_read_cpu_time( void ) -{ - FILETIME CreationTime, ExitTime, KernelTime, UserTime; - int status; - double cpu_time; - - cpu_time = 0; - - status = GetProcessTimes( GetCurrentProcess(), &CreationTime, - &ExitTime, &KernelTime, &UserTime ); - - if (status) { - double sec = 0; - - sec += KernelTime.dwHighDateTime; - sec += UserTime.dwHighDateTime; - - /* Shift left by 32 bits */ - sec *= (double) (1 << 16) * (double) (1 << 16); - - sec += KernelTime.dwLowDateTime; - sec += UserTime.dwLowDateTime; - - cpu_time += (sec * 100.0) / KMP_NSEC_PER_SEC; - } - - return cpu_time; -} - -int -__kmp_read_system_info( struct kmp_sys_info *info ) -{ - info->maxrss = 0; /* the maximum resident set size utilized (in kilobytes) */ - info->minflt = 0; /* the number of page faults serviced without any I/O */ - info->majflt = 0; /* the number of page faults serviced that required I/O */ - info->nswap = 0; /* the number of times a process was "swapped" out of memory */ - info->inblock = 0; /* the number of times the file system had to perform input */ - info->oublock = 0; /* the number of times the file system had to perform output */ - info->nvcsw = 0; /* the number of times a context switch was voluntarily */ - info->nivcsw = 0; /* the number of times a context switch was forced */ - - return 1; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - - -void -__kmp_runtime_initialize( void ) -{ - SYSTEM_INFO info; - kmp_str_buf_t path; - UINT path_size; - - if ( __kmp_init_runtime ) { - return; - }; - -#if KMP_DYNAMIC_LIB - /* Pin dynamic library for the lifetime of application */ - { - // First, turn off error message boxes - UINT err_mode = SetErrorMode (SEM_FAILCRITICALERRORS); - HMODULE h; - BOOL ret = GetModuleHandleEx( GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS - |GET_MODULE_HANDLE_EX_FLAG_PIN, - (LPCTSTR)&__kmp_serial_initialize, &h); - KMP_DEBUG_ASSERT2(h && ret, "OpenMP RTL cannot find itself loaded"); - SetErrorMode (err_mode); // Restore error mode - KA_TRACE( 10, ("__kmp_runtime_initialize: dynamic library pinned\n") ); - } -#endif - - InitializeCriticalSection( & __kmp_win32_section ); -#if USE_ITT_BUILD - __kmp_itt_system_object_created( & __kmp_win32_section, "Critical Section" ); -#endif /* USE_ITT_BUILD */ - __kmp_initialize_system_tick(); - - #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) - if ( ! __kmp_cpuinfo.initialized ) { - __kmp_query_cpuid( & __kmp_cpuinfo ); - }; // if - #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - /* Set up minimum number of threads to switch to TLS gtid */ - #if KMP_OS_WINDOWS && ! defined KMP_DYNAMIC_LIB - // Windows* OS, static library. - /* - New thread may use stack space previously used by another thread, currently terminated. - On Windows* OS, in case of static linking, we do not know the moment of thread termination, - and our structures (__kmp_threads and __kmp_root arrays) are still keep info about dead - threads. This leads to problem in __kmp_get_global_thread_id() function: it wrongly - finds gtid (by searching through stack addresses of all known threads) for unregistered - foreign tread. - - Setting __kmp_tls_gtid_min to 0 workarounds this problem: __kmp_get_global_thread_id() - does not search through stacks, but get gtid from TLS immediately. - - --ln - */ - __kmp_tls_gtid_min = 0; - #else - __kmp_tls_gtid_min = KMP_TLS_GTID_MIN; - #endif - - /* for the static library */ - if ( !__kmp_gtid_threadprivate_key ) { - __kmp_gtid_threadprivate_key = TlsAlloc(); - if( __kmp_gtid_threadprivate_key == TLS_OUT_OF_INDEXES ) { - KMP_FATAL( TLSOutOfIndexes ); - } - } - - - // - // Load ntdll.dll. - // - /* - Simple - GetModuleHandle( "ntdll.dl" ) - is not suitable due to security issue (see - http://www.microsoft.com/technet/security/advisory/2269637.mspx). We have to specify full - path to the library. - */ - __kmp_str_buf_init( & path ); - path_size = GetSystemDirectory( path.str, path.size ); - KMP_DEBUG_ASSERT( path_size > 0 ); - if ( path_size >= path.size ) { - // - // Buffer is too short. Expand the buffer and try again. - // - __kmp_str_buf_reserve( & path, path_size ); - path_size = GetSystemDirectory( path.str, path.size ); - KMP_DEBUG_ASSERT( path_size > 0 ); - }; // if - if ( path_size > 0 && path_size < path.size ) { - // - // Now we have system directory name in the buffer. - // Append backslash and name of dll to form full path, - // - path.used = path_size; - __kmp_str_buf_print( & path, "\\%s", "ntdll.dll" ); - - // - // Now load ntdll using full path. - // - ntdll = GetModuleHandle( path.str ); - } - - KMP_DEBUG_ASSERT( ntdll != NULL ); - if ( ntdll != NULL ) { - NtQuerySystemInformation = (NtQuerySystemInformation_t) GetProcAddress( ntdll, "NtQuerySystemInformation" ); - } - KMP_DEBUG_ASSERT( NtQuerySystemInformation != NULL ); - -#if KMP_GROUP_AFFINITY - // - // Load kernel32.dll. - // Same caveat - must use full system path name. - // - if ( path_size > 0 && path_size < path.size ) { - // - // Truncate the buffer back to just the system path length, - // discarding "\\ntdll.dll", and replacing it with "kernel32.dll". - // - path.used = path_size; - __kmp_str_buf_print( & path, "\\%s", "kernel32.dll" ); - - // - // Load kernel32.dll using full path. - // - kernel32 = GetModuleHandle( path.str ); - KA_TRACE( 10, ("__kmp_runtime_initialize: kernel32.dll = %s\n", path.str ) ); - - // - // Load the function pointers to kernel32.dll routines - // that may or may not exist on this system. - // - if ( kernel32 != NULL ) { - __kmp_GetActiveProcessorCount = (kmp_GetActiveProcessorCount_t) GetProcAddress( kernel32, "GetActiveProcessorCount" ); - __kmp_GetActiveProcessorGroupCount = (kmp_GetActiveProcessorGroupCount_t) GetProcAddress( kernel32, "GetActiveProcessorGroupCount" ); - __kmp_GetThreadGroupAffinity = (kmp_GetThreadGroupAffinity_t) GetProcAddress( kernel32, "GetThreadGroupAffinity" ); - __kmp_SetThreadGroupAffinity = (kmp_SetThreadGroupAffinity_t) GetProcAddress( kernel32, "SetThreadGroupAffinity" ); - - KA_TRACE( 10, ("__kmp_runtime_initialize: __kmp_GetActiveProcessorCount = %p\n", __kmp_GetActiveProcessorCount ) ); - KA_TRACE( 10, ("__kmp_runtime_initialize: __kmp_GetActiveProcessorGroupCount = %p\n", __kmp_GetActiveProcessorGroupCount ) ); - KA_TRACE( 10, ("__kmp_runtime_initialize:__kmp_GetThreadGroupAffinity = %p\n", __kmp_GetThreadGroupAffinity ) ); - KA_TRACE( 10, ("__kmp_runtime_initialize: __kmp_SetThreadGroupAffinity = %p\n", __kmp_SetThreadGroupAffinity ) ); - KA_TRACE( 10, ("__kmp_runtime_initialize: sizeof(kmp_affin_mask_t) = %d\n", sizeof(kmp_affin_mask_t) ) ); - - // - // See if group affinity is supported on this system. - // If so, calculate the #groups and #procs. - // - // Group affinity was introduced with Windows* 7 OS and - // Windows* Server 2008 R2 OS. - // - if ( ( __kmp_GetActiveProcessorCount != NULL ) - && ( __kmp_GetActiveProcessorGroupCount != NULL ) - && ( __kmp_GetThreadGroupAffinity != NULL ) - && ( __kmp_SetThreadGroupAffinity != NULL ) - && ( ( __kmp_num_proc_groups - = __kmp_GetActiveProcessorGroupCount() ) > 1 ) ) { - // - // Calculate the total number of active OS procs. - // - int i; - - KA_TRACE( 10, ("__kmp_runtime_initialize: %d processor groups detected\n", __kmp_num_proc_groups ) ); - - __kmp_xproc = 0; - - for ( i = 0; i < __kmp_num_proc_groups; i++ ) { - DWORD size = __kmp_GetActiveProcessorCount( i ); - __kmp_xproc += size; - KA_TRACE( 10, ("__kmp_runtime_initialize: proc group %d size = %d\n", i, size ) ); - } - } - else { - KA_TRACE( 10, ("__kmp_runtime_initialize: %d processor groups detected\n", __kmp_num_proc_groups ) ); - } - } - } - if ( __kmp_num_proc_groups <= 1 ) { - GetSystemInfo( & info ); - __kmp_xproc = info.dwNumberOfProcessors; - } -#else - GetSystemInfo( & info ); - __kmp_xproc = info.dwNumberOfProcessors; -#endif /* KMP_GROUP_AFFINITY */ - - // - // If the OS said there were 0 procs, take a guess and use a value of 2. - // This is done for Linux* OS, also. Do we need error / warning? - // - if ( __kmp_xproc <= 0 ) { - __kmp_xproc = 2; - } - - KA_TRACE( 5, ("__kmp_runtime_initialize: total processors = %d\n", __kmp_xproc) ); - - __kmp_str_buf_free( & path ); - -#if USE_ITT_BUILD - __kmp_itt_initialize(); -#endif /* USE_ITT_BUILD */ - - __kmp_init_runtime = TRUE; -} // __kmp_runtime_initialize - -void -__kmp_runtime_destroy( void ) -{ - if ( ! __kmp_init_runtime ) { - return; - } - -#if USE_ITT_BUILD - __kmp_itt_destroy(); -#endif /* USE_ITT_BUILD */ - - /* we can't DeleteCriticalsection( & __kmp_win32_section ); */ - /* due to the KX_TRACE() commands */ - KA_TRACE( 40, ("__kmp_runtime_destroy\n" )); - - if( __kmp_gtid_threadprivate_key ) { - TlsFree( __kmp_gtid_threadprivate_key ); - __kmp_gtid_threadprivate_key = 0; - } - - __kmp_affinity_uninitialize(); - DeleteCriticalSection( & __kmp_win32_section ); - - ntdll = NULL; - NtQuerySystemInformation = NULL; - -#if KMP_ARCH_X86_64 - kernel32 = NULL; - __kmp_GetActiveProcessorCount = NULL; - __kmp_GetActiveProcessorGroupCount = NULL; - __kmp_GetThreadGroupAffinity = NULL; - __kmp_SetThreadGroupAffinity = NULL; -#endif // KMP_ARCH_X86_64 - - __kmp_init_runtime = FALSE; -} - - -void -__kmp_terminate_thread( int gtid ) -{ - kmp_info_t *th = __kmp_threads[ gtid ]; - - if( !th ) return; - - KA_TRACE( 10, ("__kmp_terminate_thread: kill (%d)\n", gtid ) ); - - if (TerminateThread( th->th.th_info.ds.ds_thread, (DWORD) -1) == FALSE) { - /* It's OK, the thread may have exited already */ - } - __kmp_free_handle( th->th.th_info.ds.ds_thread ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void -__kmp_clear_system_time( void ) -{ - BOOL status; - LARGE_INTEGER time; - status = QueryPerformanceCounter( & time ); - __kmp_win32_time = (kmp_int64) time.QuadPart; -} - -void -__kmp_initialize_system_tick( void ) -{ - { - BOOL status; - LARGE_INTEGER freq; - - status = QueryPerformanceFrequency( & freq ); - if (! status) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( FunctionError, "QueryPerformanceFrequency()" ), - KMP_ERR( error ), - __kmp_msg_null - ); - - } - else { - __kmp_win32_tick = ((double) 1.0) / (double) freq.QuadPart; - } - } -} - -/* Calculate the elapsed wall clock time for the user */ - -void -__kmp_elapsed( double *t ) -{ - BOOL status; - LARGE_INTEGER now; - status = QueryPerformanceCounter( & now ); - *t = ((double) now.QuadPart) * __kmp_win32_tick; -} - -/* Calculate the elapsed wall clock tick for the user */ - -void -__kmp_elapsed_tick( double *t ) -{ - *t = __kmp_win32_tick; -} - -void -__kmp_read_system_time( double *delta ) -{ - if (delta != NULL) { - BOOL status; - LARGE_INTEGER now; - - status = QueryPerformanceCounter( & now ); - - *delta = ((double) (((kmp_int64) now.QuadPart) - __kmp_win32_time)) - * __kmp_win32_tick; - } -} - -/* Return the current time stamp in nsec */ -kmp_uint64 -__kmp_now_nsec() -{ - LARGE_INTEGER now; - QueryPerformanceCounter(&now); - return 1e9 * __kmp_win32_tick * now.QuadPart; -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -void * __stdcall -__kmp_launch_worker( void *arg ) -{ - volatile void *stack_data; - void *exit_val; - void *padding = 0; - kmp_info_t *this_thr = (kmp_info_t *) arg; - int gtid; - - gtid = this_thr->th.th_info.ds.ds_gtid; - __kmp_gtid_set_specific( gtid ); -#ifdef KMP_TDATA_GTID - #error "This define causes problems with LoadLibrary() + declspec(thread) " \ - "on Windows* OS. See CQ50564, tests kmp_load_library*.c and this MSDN " \ - "reference: http://support.microsoft.com/kb/118816" - //__kmp_gtid = gtid; -#endif - -#if USE_ITT_BUILD - __kmp_itt_thread_name( gtid ); -#endif /* USE_ITT_BUILD */ - - __kmp_affinity_set_init_mask( gtid, FALSE ); - -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 - // - // Set the FP control regs to be a copy of - // the parallel initialization thread's. - // - __kmp_clear_x87_fpu_status_word(); - __kmp_load_x87_fpu_control_word( &__kmp_init_x87_fpu_control_word ); - __kmp_load_mxcsr( &__kmp_init_mxcsr ); -#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ - - if ( __kmp_stkoffset > 0 && gtid > 0 ) { - padding = KMP_ALLOCA( gtid * __kmp_stkoffset ); - } - - KMP_FSYNC_RELEASING( &this_thr -> th.th_info.ds.ds_alive ); - this_thr -> th.th_info.ds.ds_thread_id = GetCurrentThreadId(); - TCW_4( this_thr -> th.th_info.ds.ds_alive, TRUE ); - - if ( TCR_4(__kmp_gtid_mode) < 2 ) { // check stack only if it is used to get gtid - TCW_PTR(this_thr->th.th_info.ds.ds_stackbase, &stack_data); - KMP_ASSERT( this_thr -> th.th_info.ds.ds_stackgrow == FALSE ); - __kmp_check_stack_overlap( this_thr ); - } - KMP_MB(); - exit_val = __kmp_launch_thread( this_thr ); - KMP_FSYNC_RELEASING( &this_thr -> th.th_info.ds.ds_alive ); - TCW_4( this_thr -> th.th_info.ds.ds_alive, FALSE ); - KMP_MB(); - return exit_val; -} - -#if KMP_USE_MONITOR -/* The monitor thread controls all of the threads in the complex */ - -void * __stdcall -__kmp_launch_monitor( void *arg ) -{ - DWORD wait_status; - kmp_thread_t monitor; - int status; - int interval; - kmp_info_t *this_thr = (kmp_info_t *) arg; - - KMP_DEBUG_ASSERT(__kmp_init_monitor); - TCW_4( __kmp_init_monitor, 2 ); // AC: Signal the library that monitor has started - // TODO: hide "2" in enum (like {true,false,started}) - this_thr -> th.th_info.ds.ds_thread_id = GetCurrentThreadId(); - TCW_4( this_thr -> th.th_info.ds.ds_alive, TRUE ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - KA_TRACE( 10, ("__kmp_launch_monitor: launched\n" ) ); - - monitor = GetCurrentThread(); - - /* set thread priority */ - status = SetThreadPriority( monitor, THREAD_PRIORITY_HIGHEST ); - if (! status) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetThreadPriority ), - KMP_ERR( error ), - __kmp_msg_null - ); - } - - /* register us as monitor */ - __kmp_gtid_set_specific( KMP_GTID_MONITOR ); -#ifdef KMP_TDATA_GTID - #error "This define causes problems with LoadLibrary() + declspec(thread) " \ - "on Windows* OS. See CQ50564, tests kmp_load_library*.c and this MSDN " \ - "reference: http://support.microsoft.com/kb/118816" - //__kmp_gtid = KMP_GTID_MONITOR; -#endif - -#if USE_ITT_BUILD - __kmp_itt_thread_ignore(); // Instruct Intel(R) Threading Tools to ignore monitor thread. -#endif /* USE_ITT_BUILD */ - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - interval = ( 1000 / __kmp_monitor_wakeups ); /* in milliseconds */ - - while (! TCR_4(__kmp_global.g.g_done)) { - /* This thread monitors the state of the system */ - - KA_TRACE( 15, ( "__kmp_launch_monitor: update\n" ) ); - - wait_status = WaitForSingleObject( __kmp_monitor_ev, interval ); - - if (wait_status == WAIT_TIMEOUT) { - TCW_4( __kmp_global.g.g_time.dt.t_value, - TCR_4( __kmp_global.g.g_time.dt.t_value ) + 1 ); - } - - KMP_MB(); /* Flush all pending memory write invalidates. */ - } - - KA_TRACE( 10, ("__kmp_launch_monitor: finished\n" ) ); - - status = SetThreadPriority( monitor, THREAD_PRIORITY_NORMAL ); - if (! status) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetThreadPriority ), - KMP_ERR( error ), - __kmp_msg_null - ); - } - - if (__kmp_global.g.g_abort != 0) { - /* now we need to terminate the worker threads */ - /* the value of t_abort is the signal we caught */ - - int gtid; - - KA_TRACE( 10, ("__kmp_launch_monitor: terminate sig=%d\n", (__kmp_global.g.g_abort) ) ); - - /* terminate the OpenMP worker threads */ - /* TODO this is not valid for sibling threads!! - * the uber master might not be 0 anymore.. */ - for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid) - __kmp_terminate_thread( gtid ); - - __kmp_cleanup(); - - Sleep( 0 ); - - KA_TRACE( 10, ("__kmp_launch_monitor: raise sig=%d\n", (__kmp_global.g.g_abort) ) ); - - if (__kmp_global.g.g_abort > 0) { - raise( __kmp_global.g.g_abort ); - } - } - - TCW_4( this_thr -> th.th_info.ds.ds_alive, FALSE ); - - KMP_MB(); - return arg; -} -#endif - -void -__kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size ) -{ - kmp_thread_t handle; - DWORD idThread; - - KA_TRACE( 10, ("__kmp_create_worker: try to create thread (%d)\n", gtid ) ); - - th->th.th_info.ds.ds_gtid = gtid; - - if ( KMP_UBER_GTID(gtid) ) { - int stack_data; - - /* TODO: GetCurrentThread() returns a pseudo-handle that is unsuitable for other threads to use. - Is it appropriate to just use GetCurrentThread? When should we close this handle? When - unregistering the root? - */ - { - BOOL rc; - rc = DuplicateHandle( - GetCurrentProcess(), - GetCurrentThread(), - GetCurrentProcess(), - &th->th.th_info.ds.ds_thread, - 0, - FALSE, - DUPLICATE_SAME_ACCESS - ); - KMP_ASSERT( rc ); - KA_TRACE( 10, (" __kmp_create_worker: ROOT Handle duplicated, th = %p, handle = %" KMP_UINTPTR_SPEC "\n", - (LPVOID)th, - th->th.th_info.ds.ds_thread ) ); - th->th.th_info.ds.ds_thread_id = GetCurrentThreadId(); - } - if ( TCR_4(__kmp_gtid_mode) < 2 ) { // check stack only if it is used to get gtid - /* we will dynamically update the stack range if gtid_mode == 1 */ - TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data); - TCW_PTR(th->th.th_info.ds.ds_stacksize, 0); - TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE); - __kmp_check_stack_overlap( th ); - } - } - else { - KMP_MB(); /* Flush all pending memory write invalidates. */ - - /* Set stack size for this thread now. */ - KA_TRACE( 10, ( "__kmp_create_worker: stack_size = %" KMP_SIZE_T_SPEC - " bytes\n", stack_size ) ); - - stack_size += gtid * __kmp_stkoffset; - - TCW_PTR(th->th.th_info.ds.ds_stacksize, stack_size); - TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE); - - KA_TRACE( 10, ( "__kmp_create_worker: (before) stack_size = %" - KMP_SIZE_T_SPEC - " bytes, &__kmp_launch_worker = %p, th = %p, " - "&idThread = %p\n", - (SIZE_T) stack_size, - (LPTHREAD_START_ROUTINE) & __kmp_launch_worker, - (LPVOID) th, &idThread ) ); - - handle = CreateThread( NULL, (SIZE_T) stack_size, - (LPTHREAD_START_ROUTINE) __kmp_launch_worker, - (LPVOID) th, STACK_SIZE_PARAM_IS_A_RESERVATION, &idThread ); - - KA_TRACE( 10, ( "__kmp_create_worker: (after) stack_size = %" - KMP_SIZE_T_SPEC - " bytes, &__kmp_launch_worker = %p, th = %p, " - "idThread = %u, handle = %" KMP_UINTPTR_SPEC "\n", - (SIZE_T) stack_size, - (LPTHREAD_START_ROUTINE) & __kmp_launch_worker, - (LPVOID) th, idThread, handle ) ); - - if ( handle == 0 ) { - DWORD error = GetLastError(); - __kmp_msg(kmp_ms_fatal, KMP_MSG( CantCreateThread ), KMP_ERR( error ), __kmp_msg_null); - } else { - th->th.th_info.ds.ds_thread = handle; - } - - KMP_MB(); /* Flush all pending memory write invalidates. */ - } - - KA_TRACE( 10, ("__kmp_create_worker: done creating thread (%d)\n", gtid ) ); -} - -int -__kmp_still_running(kmp_info_t *th) { - return (WAIT_TIMEOUT == WaitForSingleObject( th->th.th_info.ds.ds_thread, 0)); -} - -#if KMP_USE_MONITOR -void -__kmp_create_monitor( kmp_info_t *th ) -{ - kmp_thread_t handle; - DWORD idThread; - int ideal, new_ideal; - - if( __kmp_dflt_blocktime == KMP_MAX_BLOCKTIME ) { - // We don't need monitor thread in case of MAX_BLOCKTIME - KA_TRACE( 10, ("__kmp_create_monitor: skipping monitor thread because of MAX blocktime\n" ) ); - th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op - th->th.th_info.ds.ds_gtid = 0; - TCW_4( __kmp_init_monitor, 2 ); // Signal to stop waiting for monitor creation - return; - } - KA_TRACE( 10, ("__kmp_create_monitor: try to create monitor\n" ) ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - __kmp_monitor_ev = CreateEvent( NULL, TRUE, FALSE, NULL ); - if ( __kmp_monitor_ev == NULL ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantCreateEvent ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if -#if USE_ITT_BUILD - __kmp_itt_system_object_created( __kmp_monitor_ev, "Event" ); -#endif /* USE_ITT_BUILD */ - - th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR; - th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR; - - // FIXME - on Windows* OS, if __kmp_monitor_stksize = 0, figure out how - // to automatically expand stacksize based on CreateThread error code. - if ( __kmp_monitor_stksize == 0 ) { - __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE; - } - if ( __kmp_monitor_stksize < __kmp_sys_min_stksize ) { - __kmp_monitor_stksize = __kmp_sys_min_stksize; - } - - KA_TRACE( 10, ("__kmp_create_monitor: requested stacksize = %d bytes\n", - (int) __kmp_monitor_stksize ) ); - - TCW_4( __kmp_global.g.g_time.dt.t_value, 0 ); - - handle = CreateThread( NULL, (SIZE_T) __kmp_monitor_stksize, - (LPTHREAD_START_ROUTINE) __kmp_launch_monitor, - (LPVOID) th, STACK_SIZE_PARAM_IS_A_RESERVATION, &idThread ); - if (handle == 0) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantCreateThread ), - KMP_ERR( error ), - __kmp_msg_null - ); - } - else - th->th.th_info.ds.ds_thread = handle; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ("__kmp_create_monitor: monitor created %p\n", - (void *) th->th.th_info.ds.ds_thread ) ); -} -#endif - -/* - Check to see if thread is still alive. - - NOTE: The ExitProcess(code) system call causes all threads to Terminate - with a exit_val = code. Because of this we can not rely on - exit_val having any particular value. So this routine may - return STILL_ALIVE in exit_val even after the thread is dead. -*/ - -int -__kmp_is_thread_alive( kmp_info_t * th, DWORD *exit_val ) -{ - DWORD rc; - rc = GetExitCodeThread( th->th.th_info.ds.ds_thread, exit_val ); - if ( rc == 0 ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( FunctionError, "GetExitCodeThread()" ), - KMP_ERR( error ), - __kmp_msg_null - ); - }; // if - return ( *exit_val == STILL_ACTIVE ); -} - - -void -__kmp_exit_thread( - int exit_status -) { - ExitThread( exit_status ); -} // __kmp_exit_thread - -/* - This is a common part for both __kmp_reap_worker() and __kmp_reap_monitor(). -*/ -static void -__kmp_reap_common( kmp_info_t * th ) -{ - DWORD exit_val; - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - KA_TRACE( 10, ( "__kmp_reap_common: try to reap (%d)\n", th->th.th_info.ds.ds_gtid ) ); - - /* - 2006-10-19: - - There are two opposite situations: - - 1. Windows* OS keep thread alive after it resets ds_alive flag and exits from thread - function. (For example, see C70770/Q394281 "unloading of dll based on OMP is very - slow".) - 2. Windows* OS may kill thread before it resets ds_alive flag. - - Right solution seems to be waiting for *either* thread termination *or* ds_alive resetting. - - */ - - { - // TODO: This code is very similar to KMP_WAIT_YIELD. Need to generalize KMP_WAIT_YIELD to - // cover this usage also. - void * obj = NULL; - register kmp_uint32 spins; -#if USE_ITT_BUILD - KMP_FSYNC_SPIN_INIT( obj, (void*) & th->th.th_info.ds.ds_alive ); -#endif /* USE_ITT_BUILD */ - KMP_INIT_YIELD( spins ); - do { -#if USE_ITT_BUILD - KMP_FSYNC_SPIN_PREPARE( obj ); -#endif /* USE_ITT_BUILD */ - __kmp_is_thread_alive( th, &exit_val ); - KMP_YIELD( TCR_4(__kmp_nth) > __kmp_avail_proc ); - KMP_YIELD_SPIN( spins ); - } while ( exit_val == STILL_ACTIVE && TCR_4( th->th.th_info.ds.ds_alive ) ); -#if USE_ITT_BUILD - if ( exit_val == STILL_ACTIVE ) { - KMP_FSYNC_CANCEL( obj ); - } else { - KMP_FSYNC_SPIN_ACQUIRED( obj ); - }; // if -#endif /* USE_ITT_BUILD */ - } - - __kmp_free_handle( th->th.th_info.ds.ds_thread ); - - /* - * NOTE: The ExitProcess(code) system call causes all threads to Terminate - * with a exit_val = code. Because of this we can not rely on - * exit_val having any particular value. - */ - if ( exit_val == STILL_ACTIVE ) { - KA_TRACE( 1, ( "__kmp_reap_common: thread still active.\n" ) ); - } else if ( (void *) exit_val != (void *) th) { - KA_TRACE( 1, ( "__kmp_reap_common: ExitProcess / TerminateThread used?\n" ) ); - }; // if - - KA_TRACE( 10, - ( - "__kmp_reap_common: done reaping (%d), handle = %" KMP_UINTPTR_SPEC "\n", - th->th.th_info.ds.ds_gtid, - th->th.th_info.ds.ds_thread - ) - ); - - th->th.th_info.ds.ds_thread = 0; - th->th.th_info.ds.ds_tid = KMP_GTID_DNE; - th->th.th_info.ds.ds_gtid = KMP_GTID_DNE; - th->th.th_info.ds.ds_thread_id = 0; - - KMP_MB(); /* Flush all pending memory write invalidates. */ -} - -#if KMP_USE_MONITOR -void -__kmp_reap_monitor( kmp_info_t *th ) -{ - int status; - - KA_TRACE( 10, ("__kmp_reap_monitor: try to reap %p\n", - (void *) th->th.th_info.ds.ds_thread ) ); - - // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR. - // If both tid and gtid are 0, it means the monitor did not ever start. - // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down. - KMP_DEBUG_ASSERT( th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid ); - if ( th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR ) { - KA_TRACE( 10, ("__kmp_reap_monitor: monitor did not start, returning\n") ); - return; - }; // if - - KMP_MB(); /* Flush all pending memory write invalidates. */ - - status = SetEvent( __kmp_monitor_ev ); - if ( status == FALSE ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantSetEvent ), - KMP_ERR( error ), - __kmp_msg_null - ); - } - KA_TRACE( 10, ( "__kmp_reap_monitor: reaping thread (%d)\n", th->th.th_info.ds.ds_gtid ) ); - __kmp_reap_common( th ); - - __kmp_free_handle( __kmp_monitor_ev ); - - KMP_MB(); /* Flush all pending memory write invalidates. */ -} -#endif - -void -__kmp_reap_worker( kmp_info_t * th ) -{ - KA_TRACE( 10, ( "__kmp_reap_worker: reaping thread (%d)\n", th->th.th_info.ds.ds_gtid ) ); - __kmp_reap_common( th ); -} - -/* ------------------------------------------------------------------------ */ -/* ------------------------------------------------------------------------ */ - -#if KMP_HANDLE_SIGNALS - - -static void -__kmp_team_handler( int signo ) -{ - if ( __kmp_global.g.g_abort == 0 ) { - // Stage 1 signal handler, let's shut down all of the threads. - if ( __kmp_debug_buf ) { - __kmp_dump_debug_buffer(); - }; // if - KMP_MB(); // Flush all pending memory write invalidates. - TCW_4( __kmp_global.g.g_abort, signo ); - KMP_MB(); // Flush all pending memory write invalidates. - TCW_4( __kmp_global.g.g_done, TRUE ); - KMP_MB(); // Flush all pending memory write invalidates. - } -} // __kmp_team_handler - - - -static -sig_func_t __kmp_signal( int signum, sig_func_t handler ) { - sig_func_t old = signal( signum, handler ); - if ( old == SIG_ERR ) { - int error = errno; - __kmp_msg( kmp_ms_fatal, KMP_MSG( FunctionError, "signal" ), KMP_ERR( error ), __kmp_msg_null ); - }; // if - return old; -} - -static void -__kmp_install_one_handler( - int sig, - sig_func_t handler, - int parallel_init -) { - sig_func_t old; - KMP_MB(); /* Flush all pending memory write invalidates. */ - KB_TRACE( 60, ("__kmp_install_one_handler: called: sig=%d\n", sig ) ); - if ( parallel_init ) { - old = __kmp_signal( sig, handler ); - // SIG_DFL on Windows* OS in NULL or 0. - if ( old == __kmp_sighldrs[ sig ] ) { - __kmp_siginstalled[ sig ] = 1; - } else { - // Restore/keep user's handler if one previously installed. - old = __kmp_signal( sig, old ); - }; // if - } else { - // Save initial/system signal handlers to see if user handlers installed. - // 2009-09-23: It is a dead code. On Windows* OS __kmp_install_signals called once with - // parallel_init == TRUE. - old = __kmp_signal( sig, SIG_DFL ); - __kmp_sighldrs[ sig ] = old; - __kmp_signal( sig, old ); - }; // if - KMP_MB(); /* Flush all pending memory write invalidates. */ -} // __kmp_install_one_handler - -static void -__kmp_remove_one_handler( int sig ) { - if ( __kmp_siginstalled[ sig ] ) { - sig_func_t old; - KMP_MB(); // Flush all pending memory write invalidates. - KB_TRACE( 60, ( "__kmp_remove_one_handler: called: sig=%d\n", sig ) ); - old = __kmp_signal( sig, __kmp_sighldrs[ sig ] ); - if ( old != __kmp_team_handler ) { - KB_TRACE( 10, ( "__kmp_remove_one_handler: oops, not our handler, restoring: sig=%d\n", sig ) ); - old = __kmp_signal( sig, old ); - }; // if - __kmp_sighldrs[ sig ] = NULL; - __kmp_siginstalled[ sig ] = 0; - KMP_MB(); // Flush all pending memory write invalidates. - }; // if -} // __kmp_remove_one_handler - - -void -__kmp_install_signals( int parallel_init ) -{ - KB_TRACE( 10, ( "__kmp_install_signals: called\n" ) ); - if ( ! __kmp_handle_signals ) { - KB_TRACE( 10, ( "__kmp_install_signals: KMP_HANDLE_SIGNALS is false - handlers not installed\n" ) ); - return; - }; // if - __kmp_install_one_handler( SIGINT, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGILL, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGABRT, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGFPE, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGSEGV, __kmp_team_handler, parallel_init ); - __kmp_install_one_handler( SIGTERM, __kmp_team_handler, parallel_init ); -} // __kmp_install_signals - - -void -__kmp_remove_signals( void ) -{ - int sig; - KB_TRACE( 10, ("__kmp_remove_signals: called\n" ) ); - for ( sig = 1; sig < NSIG; ++ sig ) { - __kmp_remove_one_handler( sig ); - }; // for sig -} // __kmp_remove_signals - - -#endif // KMP_HANDLE_SIGNALS - -/* Put the thread to sleep for a time period */ -void -__kmp_thread_sleep( int millis ) -{ - DWORD status; - - status = SleepEx( (DWORD) millis, FALSE ); - if ( status ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( FunctionError, "SleepEx()" ), - KMP_ERR( error ), - __kmp_msg_null - ); - } -} - -/* Determine whether the given address is mapped into the current address space. */ -int -__kmp_is_address_mapped( void * addr ) -{ - DWORD status; - MEMORY_BASIC_INFORMATION lpBuffer; - SIZE_T dwLength; - - dwLength = sizeof(MEMORY_BASIC_INFORMATION); - - status = VirtualQuery( addr, &lpBuffer, dwLength ); - - return !((( lpBuffer.State == MEM_RESERVE) || ( lpBuffer.State == MEM_FREE )) || - (( lpBuffer.Protect == PAGE_NOACCESS ) || ( lpBuffer.Protect == PAGE_EXECUTE ))); -} - -kmp_uint64 -__kmp_hardware_timestamp(void) -{ - kmp_uint64 r = 0; - - QueryPerformanceCounter((LARGE_INTEGER*) &r); - return r; -} - -/* Free handle and check the error code */ -void -__kmp_free_handle( kmp_thread_t tHandle ) -{ -/* called with parameter type HANDLE also, thus suppose kmp_thread_t defined as HANDLE */ - BOOL rc; - rc = CloseHandle( tHandle ); - if ( !rc ) { - DWORD error = GetLastError(); - __kmp_msg( - kmp_ms_fatal, - KMP_MSG( CantCloseHandle ), - KMP_ERR( error ), - __kmp_msg_null - ); - } -} - -int -__kmp_get_load_balance( int max ) { - - static ULONG glb_buff_size = 100 * 1024; - - static int glb_running_threads = 0; /* Saved count of the running threads for the thread balance algortihm */ - static double glb_call_time = 0; /* Thread balance algorithm call time */ - - int running_threads = 0; // Number of running threads in the system. - NTSTATUS status = 0; - ULONG buff_size = 0; - ULONG info_size = 0; - void * buffer = NULL; - PSYSTEM_PROCESS_INFORMATION spi = NULL; - int first_time = 1; - - double call_time = 0.0; //start, finish; - - __kmp_elapsed( & call_time ); - - if ( glb_call_time && - ( call_time - glb_call_time < __kmp_load_balance_interval ) ) { - running_threads = glb_running_threads; - goto finish; - } - glb_call_time = call_time; - - // Do not spend time on running algorithm if we have a permanent error. - if ( NtQuerySystemInformation == NULL ) { - running_threads = -1; - goto finish; - }; // if - - if ( max <= 0 ) { - max = INT_MAX; - }; // if - - do { - - if ( first_time ) { - buff_size = glb_buff_size; - } else { - buff_size = 2 * buff_size; - } - - buffer = KMP_INTERNAL_REALLOC( buffer, buff_size ); - if ( buffer == NULL ) { - running_threads = -1; - goto finish; - }; // if - status = NtQuerySystemInformation( SystemProcessInformation, buffer, buff_size, & info_size ); - first_time = 0; - - } while ( status == STATUS_INFO_LENGTH_MISMATCH ); - glb_buff_size = buff_size; - - #define CHECK( cond ) \ - { \ - KMP_DEBUG_ASSERT( cond ); \ - if ( ! ( cond ) ) { \ - running_threads = -1; \ - goto finish; \ - } \ - } - - CHECK( buff_size >= info_size ); - spi = PSYSTEM_PROCESS_INFORMATION( buffer ); - for ( ; ; ) { - ptrdiff_t offset = uintptr_t( spi ) - uintptr_t( buffer ); - CHECK( 0 <= offset && offset + sizeof( SYSTEM_PROCESS_INFORMATION ) < info_size ); - HANDLE pid = spi->ProcessId; - ULONG num = spi->NumberOfThreads; - CHECK( num >= 1 ); - size_t spi_size = sizeof( SYSTEM_PROCESS_INFORMATION ) + sizeof( SYSTEM_THREAD ) * ( num - 1 ); - CHECK( offset + spi_size < info_size ); // Make sure process info record fits the buffer. - if ( spi->NextEntryOffset != 0 ) { - CHECK( spi_size <= spi->NextEntryOffset ); // And do not overlap with the next record. - }; // if - // pid == 0 corresponds to the System Idle Process. It always has running threads - // on all cores. So, we don't consider the running threads of this process. - if ( pid != 0 ) { - for ( int i = 0; i < num; ++ i ) { - THREAD_STATE state = spi->Threads[ i ].State; - // Count threads that have Ready or Running state. - // !!! TODO: Why comment does not match the code??? - if ( state == StateRunning ) { - ++ running_threads; - // Stop counting running threads if the number is already greater than - // the number of available cores - if ( running_threads >= max ) { - goto finish; - } - } // if - }; // for i - } // if - if ( spi->NextEntryOffset == 0 ) { - break; - }; // if - spi = PSYSTEM_PROCESS_INFORMATION( uintptr_t( spi ) + spi->NextEntryOffset ); - }; // forever - - #undef CHECK - - finish: // Clean up and exit. - - if ( buffer != NULL ) { - KMP_INTERNAL_FREE( buffer ); - }; // if - - glb_running_threads = running_threads; - - return running_threads; - -} //__kmp_get_load_balance() - Index: runtime/src/z_Windows_NT_util.cpp =================================================================== --- runtime/src/z_Windows_NT_util.cpp +++ runtime/src/z_Windows_NT_util.cpp @@ -1,5 +1,5 @@ /* - * z_Windows_NT_util.c -- platform specific routines. + * z_Windows_NT_util.cpp -- platform specific routines. */