Index: openmp/trunk/runtime/src/kmp_csupport.c =================================================================== --- openmp/trunk/runtime/src/kmp_csupport.c +++ openmp/trunk/runtime/src/kmp_csupport.c @@ -1801,6 +1801,13 @@ INIT_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled && + ompt_callbacks.ompt_callback(ompt_event_init_lock)) { + ompt_callbacks.ompt_callback(ompt_event_init_lock)((uint64_t) lck); + } +#endif + #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ @@ -1870,6 +1877,13 @@ INIT_NESTED_LOCK( lck ); __kmp_set_user_lock_location( lck, loc ); +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled && + ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)) { + ompt_callbacks.ompt_callback(ompt_event_init_nest_lock)((uint64_t) lck); + } +#endif + #if USE_ITT_BUILD __kmp_itt_lock_creating( lck ); #endif /* USE_ITT_BUILD */ @@ -1908,6 +1922,13 @@ lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_lock" ); } +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled && + ompt_callbacks.ompt_callback(ompt_event_destroy_lock)) { + ompt_callbacks.ompt_callback(ompt_event_destroy_lock)((uint64_t) lck); + } +#endif + #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ @@ -1959,6 +1980,13 @@ lck = __kmp_lookup_user_lock( user_lock, "omp_destroy_nest_lock" ); } +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled && + ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)) { + ompt_callbacks.ompt_callback(ompt_event_destroy_nest_lock)((uint64_t) lck); + } +#endif + #if USE_ITT_BUILD __kmp_itt_lock_destroyed( lck ); #endif /* USE_ITT_BUILD */ @@ -2034,6 +2062,13 @@ __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled && + ompt_callbacks.ompt_callback(ompt_event_acquired_lock)) { + ompt_callbacks.ompt_callback(ompt_event_acquired_lock)((uint64_t) lck); + } +#endif + #endif // KMP_USE_DYNAMIC_LOCK } @@ -2050,6 +2085,7 @@ #endif #else // KMP_USE_DYNAMIC_LOCK + int acquire_status; kmp_user_lock_p lck; if ( ( __kmp_user_lock_kind == lk_tas ) && ( sizeof( lck->tas.lk.poll ) @@ -2071,12 +2107,24 @@ __kmp_itt_lock_acquiring( lck ); #endif /* USE_ITT_BUILD */ - ACQUIRE_NESTED_LOCK( lck, gtid ); + ACQUIRE_NESTED_LOCK( lck, gtid, &acquire_status ); #if USE_ITT_BUILD __kmp_itt_lock_acquired( lck ); #endif /* USE_ITT_BUILD */ #endif // KMP_USE_DYNAMIC_LOCK + +#if OMPT_SUPPORT && OMPT_TRACE + if (ompt_enabled) { + if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) { + if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)) + ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_first)((uint64_t) lck); + } else { + if(ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)) + ompt_callbacks.ompt_callback(ompt_event_acquired_nest_lock_next)((uint64_t) lck); + } + } +#endif } void Index: openmp/trunk/runtime/src/kmp_lock.h =================================================================== --- openmp/trunk/runtime/src/kmp_lock.h +++ openmp/trunk/runtime/src/kmp_lock.h @@ -159,13 +159,13 @@ // #define KMP_TAS_LOCK_INITIALIZER( lock ) { { 0, 0 } } -extern void __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck ); extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck ); -extern void __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck ); @@ -173,7 +173,8 @@ #define KMP_LOCK_RELEASED 1 #define KMP_LOCK_STILL_HELD 0 - +#define KMP_LOCK_ACQUIRED_FIRST 1 +#define KMP_LOCK_ACQUIRED_NEXT 0 #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) @@ -213,13 +214,13 @@ // #define KMP_FUTEX_LOCK_INITIALIZER( lock ) { { 0, 0 } } -extern void __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck ); extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck ); -extern void __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck ); @@ -261,14 +262,14 @@ // #define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } } -extern void __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck ); extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck ); -extern void __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck ); @@ -364,13 +365,13 @@ typedef union kmp_queuing_lock kmp_queuing_lock_t; -extern void __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck ); extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck ); -extern void __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck ); @@ -468,13 +469,13 @@ typedef union kmp_drdpa_lock kmp_drdpa_lock_t; -extern void __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck ); extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck ); -extern void __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); +extern int __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern int __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ); extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck ); @@ -500,10 +501,10 @@ #define KMP_BOOTSTRAP_LOCK_INITIALIZER( lock ) KMP_TICKET_LOCK_INITIALIZER( (lock) ) -static inline void +static inline int __kmp_acquire_bootstrap_lock( kmp_bootstrap_lock_t *lck ) { - __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE ); + return __kmp_acquire_ticket_lock( lck, KMP_GTID_DNE ); } static inline int @@ -545,10 +546,10 @@ typedef kmp_ticket_lock_t kmp_lock_t; -static inline void +static inline int __kmp_acquire_lock( kmp_lock_t *lck, kmp_int32 gtid ) { - __kmp_acquire_ticket_lock( lck, gtid ); + return __kmp_acquire_ticket_lock( lck, gtid ); } static inline int @@ -636,7 +637,7 @@ return ( *__kmp_get_user_lock_owner_ )( lck ); } -extern void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ); +extern int ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ); #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) @@ -678,11 +679,11 @@ } #else -static inline void +static inline int __kmp_acquire_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( __kmp_acquire_user_lock_with_checks_ != NULL ); - ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); + return ( *__kmp_acquire_user_lock_with_checks_ )( lck, gtid ); } #endif @@ -759,11 +760,11 @@ ( *__kmp_destroy_user_lock_with_checks_ )( lck ); } -extern void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ); +extern int ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ); #if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64) -#define __kmp_acquire_nested_user_lock_with_checks(lck,gtid) \ +#define __kmp_acquire_nested_user_lock_with_checks(lck,gtid,depth) \ if (__kmp_user_lock_kind == lk_tas) { \ if ( __kmp_env_consistency_check ) { \ char const * const func = "omp_set_nest_lock"; \ @@ -774,6 +775,7 @@ } \ if ( lck->tas.lk.poll - 1 == gtid ) { \ lck->tas.lk.depth_locked += 1; \ + *depth = KMP_LOCK_ACQUIRED_NEXT; \ } else { \ if ( ( lck->tas.lk.poll != 0 ) || \ ( ! KMP_COMPARE_AND_STORE_ACQ32( &(lck->tas.lk.poll), 0, gtid + 1 ) ) ) { \ @@ -795,19 +797,20 @@ } \ } \ lck->tas.lk.depth_locked = 1; \ + *depth = KMP_LOCK_ACQUIRED_FIRST; \ } \ KMP_FSYNC_ACQUIRED( lck ); \ } else { \ KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); \ - ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \ + *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); \ } #else static inline void -__kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid ) +__kmp_acquire_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid, int* depth ) { KMP_DEBUG_ASSERT( __kmp_acquire_nested_user_lock_with_checks_ != NULL ); - ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); + *depth = ( *__kmp_acquire_nested_user_lock_with_checks_ )( lck, gtid ); } #endif @@ -940,7 +943,7 @@ // Macros for binding user lock functions. // #define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) { \ - __kmp_acquire##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p, kmp_int32 ) ) \ + __kmp_acquire##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \ __kmp_acquire##nest##kind##_##suffix; \ __kmp_release##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \ __kmp_release##nest##kind##_##suffix; \ Index: openmp/trunk/runtime/src/kmp_lock.cpp =================================================================== --- openmp/trunk/runtime/src/kmp_lock.cpp +++ openmp/trunk/runtime/src/kmp_lock.cpp @@ -84,7 +84,7 @@ return lck->lk.depth_locked != -1; } -__forceinline static void +__forceinline static int __kmp_acquire_tas_lock_timed_template( kmp_tas_lock_t *lck, kmp_int32 gtid ) { KMP_MB(); @@ -99,7 +99,7 @@ if ( ( lck->lk.poll == DYNA_LOCK_FREE(tas) ) && KMP_COMPARE_AND_STORE_ACQ32( & ( lck->lk.poll ), DYNA_LOCK_FREE(tas), DYNA_LOCK_BUSY(gtid+1, tas) ) ) { KMP_FSYNC_ACQUIRED(lck); - return; + return KMP_LOCK_ACQUIRED_FIRST; } kmp_uint32 spins; @@ -127,15 +127,16 @@ } } KMP_FSYNC_ACQUIRED( lck ); + return KMP_LOCK_ACQUIRED_FIRST; } -void +int __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) { - __kmp_acquire_tas_lock_timed_template( lck, gtid ); + return __kmp_acquire_tas_lock_timed_template( lck, gtid ); } -static void +static int __kmp_acquire_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_lock"; @@ -146,7 +147,7 @@ if ( ( gtid >= 0 ) && ( __kmp_get_tas_lock_owner( lck ) == gtid ) ) { KMP_FATAL( LockIsAlreadyOwned, func ); } - __kmp_acquire_tas_lock( lck, gtid ); + return __kmp_acquire_tas_lock( lck, gtid ); } int @@ -241,28 +242,30 @@ // nested test and set locks // -void +int __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); if ( __kmp_get_tas_lock_owner( lck ) == gtid ) { lck->lk.depth_locked += 1; + return KMP_LOCK_ACQUIRED_NEXT; } else { __kmp_acquire_tas_lock_timed_template( lck, gtid ); lck->lk.depth_locked = 1; + return KMP_LOCK_ACQUIRED_FIRST; } } -static void +static int __kmp_acquire_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_nest_lock"; if ( ! __kmp_is_tas_lock_nestable( lck ) ) { KMP_FATAL( LockSimpleUsedAsNestable, func ); } - __kmp_acquire_nested_tas_lock( lck, gtid ); + return __kmp_acquire_nested_tas_lock( lck, gtid ); } int @@ -381,7 +384,7 @@ return lck->lk.depth_locked != -1; } -__forceinline static void +__forceinline static int __kmp_acquire_futex_lock_timed_template( kmp_futex_lock_t *lck, kmp_int32 gtid ) { kmp_int32 gtid_code = ( gtid + 1 ) << 1; @@ -457,15 +460,16 @@ KMP_FSYNC_ACQUIRED( lck ); KA_TRACE( 1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck, lck->lk.poll, gtid ) ); + return KMP_LOCK_ACQUIRED_FIRST; } -void +int __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) { - __kmp_acquire_futex_lock_timed_template( lck, gtid ); + return __kmp_acquire_futex_lock_timed_template( lck, gtid ); } -static void +static int __kmp_acquire_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_lock"; @@ -476,7 +480,7 @@ if ( ( gtid >= 0 ) && ( __kmp_get_futex_lock_owner( lck ) == gtid ) ) { KMP_FATAL( LockIsAlreadyOwned, func ); } - __kmp_acquire_futex_lock( lck, gtid ); + return __kmp_acquire_futex_lock( lck, gtid ); } int @@ -587,28 +591,30 @@ // nested futex locks // -void +int __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); if ( __kmp_get_futex_lock_owner( lck ) == gtid ) { lck->lk.depth_locked += 1; + return KMP_LOCK_ACQUIRED_NEXT; } else { __kmp_acquire_futex_lock_timed_template( lck, gtid ); lck->lk.depth_locked = 1; + return KMP_LOCK_ACQUIRED_FIRST; } } -static void +static int __kmp_acquire_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_nest_lock"; if ( ! __kmp_is_futex_lock_nestable( lck ) ) { KMP_FATAL( LockSimpleUsedAsNestable, func ); } - __kmp_acquire_nested_futex_lock( lck, gtid ); + return __kmp_acquire_nested_futex_lock( lck, gtid ); } int @@ -734,7 +740,7 @@ return FALSE; } -__forceinline static void +__forceinline static int __kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid ) { kmp_uint32 my_ticket; @@ -750,19 +756,20 @@ if ( TCR_4( lck->lk.now_serving ) == my_ticket ) { KMP_FSYNC_ACQUIRED(lck); - return; + return KMP_LOCK_ACQUIRED_FIRST; } KMP_WAIT_YIELD( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck ); KMP_FSYNC_ACQUIRED(lck); + return KMP_LOCK_ACQUIRED_FIRST; } -void +int __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) { - __kmp_acquire_ticket_lock_timed_template( lck, gtid ); + return __kmp_acquire_ticket_lock_timed_template( lck, gtid ); } -static void +static int __kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_lock"; @@ -779,6 +786,7 @@ __kmp_acquire_ticket_lock( lck, gtid ); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } int @@ -905,13 +913,14 @@ // nested ticket locks // -void +int __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) { lck->lk.depth_locked += 1; + return KMP_LOCK_ACQUIRED_NEXT; } else { __kmp_acquire_ticket_lock_timed_template( lck, gtid ); @@ -919,10 +928,11 @@ lck->lk.depth_locked = 1; KMP_MB(); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } } -static void +static int __kmp_acquire_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_nest_lock"; @@ -932,7 +942,7 @@ if ( ! __kmp_is_ticket_lock_nestable( lck ) ) { KMP_FATAL( LockSimpleUsedAsNestable, func ); } - __kmp_acquire_nested_ticket_lock( lck, gtid ); + return __kmp_acquire_nested_ticket_lock( lck, gtid ); } int @@ -1196,7 +1206,7 @@ template /* [TLW] The unused template above is left behind because of what BEB believes is a potential compiler problem with __forceinline. */ -__forceinline static void +__forceinline static int __kmp_acquire_queuing_lock_timed_template( kmp_queuing_lock_t *lck, kmp_int32 gtid ) { @@ -1323,7 +1333,7 @@ #endif KMP_FSYNC_ACQUIRED( lck ); - return; /* lock holder cannot be on queue */ + return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */ } enqueued = FALSE; } @@ -1376,7 +1386,7 @@ #endif /* got lock, we were dequeued by the thread that released lock */ - return; + return KMP_LOCK_ACQUIRED_FIRST; } /* Yield if number of threads > number of logical processors */ @@ -1390,17 +1400,18 @@ } KMP_ASSERT2( 0, "should not get here" ); + return KMP_LOCK_ACQUIRED_FIRST; } -void +int __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); - __kmp_acquire_queuing_lock_timed_template( lck, gtid ); + return __kmp_acquire_queuing_lock_timed_template( lck, gtid ); } -static void +static int __kmp_acquire_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid ) { @@ -1418,6 +1429,7 @@ __kmp_acquire_queuing_lock( lck, gtid ); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } int @@ -1689,13 +1701,14 @@ // nested queuing locks // -void +int __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); if ( __kmp_get_queuing_lock_owner( lck ) == gtid ) { lck->lk.depth_locked += 1; + return KMP_LOCK_ACQUIRED_NEXT; } else { __kmp_acquire_queuing_lock_timed_template( lck, gtid ); @@ -1703,10 +1716,11 @@ lck->lk.depth_locked = 1; KMP_MB(); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } } -static void +static int __kmp_acquire_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_nest_lock"; @@ -1716,7 +1730,7 @@ if ( ! __kmp_is_queuing_lock_nestable( lck ) ) { KMP_FATAL( LockSimpleUsedAsNestable, func ); } - __kmp_acquire_nested_queuing_lock( lck, gtid ); + return __kmp_acquire_nested_queuing_lock( lck, gtid ); } int @@ -2458,7 +2472,7 @@ return lck->lk.depth_locked != -1; } -__forceinline static void +__forceinline static int __kmp_acquire_drdpa_lock_timed_template( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) { kmp_uint64 ticket = KMP_TEST_THEN_INC64((kmp_int64 *)&lck->lk.next_ticket); @@ -2621,15 +2635,16 @@ lck->lk.cleanup_ticket = TCR_8(lck->lk.next_ticket); } } + return KMP_LOCK_ACQUIRED_FIRST; } -void +int __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) { - __kmp_acquire_drdpa_lock_timed_template( lck, gtid ); + return __kmp_acquire_drdpa_lock_timed_template( lck, gtid ); } -static void +static int __kmp_acquire_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) { char const * const func = "omp_set_lock"; @@ -2646,6 +2661,7 @@ __kmp_acquire_drdpa_lock( lck, gtid ); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } int @@ -2813,13 +2829,14 @@ // nested drdpa ticket locks // -void +int __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid ) { KMP_DEBUG_ASSERT( gtid >= 0 ); if ( __kmp_get_drdpa_lock_owner( lck ) == gtid ) { lck->lk.depth_locked += 1; + return KMP_LOCK_ACQUIRED_NEXT; } else { __kmp_acquire_drdpa_lock_timed_template( lck, gtid ); @@ -2827,6 +2844,7 @@ lck->lk.depth_locked = 1; KMP_MB(); lck->lk.owner_id = gtid + 1; + return KMP_LOCK_ACQUIRED_FIRST; } } @@ -3567,14 +3585,14 @@ size_t __kmp_user_lock_size = 0; kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck ) = NULL; -void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; +int ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL; void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck ) = NULL; void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL; -void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; +int ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL; Index: openmp/trunk/runtime/src/ompt-event-specific.h =================================================================== --- openmp/trunk/runtime/src/ompt-event-specific.h +++ openmp/trunk/runtime/src/ompt-event-specific.h @@ -126,18 +126,18 @@ #define ompt_event_wait_atomic_implemented ompt_event_MAY_ALWAYS_TRACE #define ompt_event_wait_ordered_implemented ompt_event_MAY_ALWAYS_TRACE -#define ompt_event_acquired_lock_implemented ompt_event_UNIMPLEMENTED -#define ompt_event_acquired_nest_lock_first_implemented ompt_event_UNIMPLEMENTED -#define ompt_event_acquired_nest_lock_next_implemented ompt_event_UNIMPLEMENTED +#define ompt_event_acquired_lock_implemented ompt_event_MAY_ALWAYS_TRACE +#define ompt_event_acquired_nest_lock_first_implemented ompt_event_MAY_ALWAYS_TRACE +#define ompt_event_acquired_nest_lock_next_implemented ompt_event_MAY_ALWAYS_TRACE #define ompt_event_acquired_critical_implemented ompt_event_UNIMPLEMENTED #define ompt_event_acquired_atomic_implemented ompt_event_MAY_ALWAYS_TRACE #define ompt_event_acquired_ordered_implemented ompt_event_MAY_ALWAYS_TRACE -#define ompt_event_init_lock_implemented ompt_event_UNIMPLEMENTED -#define ompt_event_init_nest_lock_implemented ompt_event_UNIMPLEMENTED +#define ompt_event_init_lock_implemented ompt_event_MAY_ALWAYS_TRACE +#define ompt_event_init_nest_lock_implemented ompt_event_MAY_ALWAYS_TRACE -#define ompt_event_destroy_lock_implemented ompt_event_UNIMPLEMENTED -#define ompt_event_destroy_nest_lock_implemented ompt_event_UNIMPLEMENTED +#define ompt_event_destroy_lock_implemented ompt_event_MAY_ALWAYS_TRACE +#define ompt_event_destroy_nest_lock_implemented ompt_event_MAY_ALWAYS_TRACE #define ompt_event_flush_implemented ompt_event_UNIMPLEMENTED