diff --git a/openmp/runtime/src/dllexports b/openmp/runtime/src/dllexports --- a/openmp/runtime/src/dllexports +++ b/openmp/runtime/src/dllexports @@ -817,10 +817,7 @@ %endif - # These are specific to x86 and x64 - %ifdef IS_IA_ARCH - - # ATOMIC extensions for OpenMP 3.1 spec (x86 and x64 only) + # ATOMIC extensions for OpenMP 3.1 spec __kmpc_atomic_fixed1_rd 2265 __kmpc_atomic_fixed2_rd 2266 @@ -1047,6 +1044,9 @@ __kmpc_atomic_float10_div_cpt_fp %endif + # These are specific to x86 and x64 + %ifdef IS_IA_ARCH + # ATOMIC extensions for OpenMP 4.0 spec (x86 and x64 only) __kmpc_atomic_fixed1_swp 2412 diff --git a/openmp/runtime/src/kmp_atomic.h b/openmp/runtime/src/kmp_atomic.h --- a/openmp/runtime/src/kmp_atomic.h +++ b/openmp/runtime/src/kmp_atomic.h @@ -1005,8 +1005,7 @@ void __kmpc_atomic_32(ident_t *id_ref, int gtid, void *lhs, void *rhs, void (*f)(void *, void *, void *)); -// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 +// READ, WRITE, CAPTURE // Below routines for atomic READ are listed char __kmpc_atomic_fixed1_rd(ident_t *id_ref, int gtid, char *loc); @@ -1030,10 +1029,12 @@ kmp_cmplx32 __kmpc_atomic_cmplx4_rd(ident_t *id_ref, int gtid, kmp_cmplx32 *loc); #endif +#if KMP_ARCH_X86 || KMP_ARCH_X86_64 kmp_cmplx64 __kmpc_atomic_cmplx8_rd(ident_t *id_ref, int gtid, kmp_cmplx64 *loc); kmp_cmplx80 __kmpc_atomic_cmplx10_rd(ident_t *id_ref, int gtid, kmp_cmplx80 *loc); +#endif #if KMP_HAVE_QUAD CPLX128_LEG __kmpc_atomic_cmplx16_rd(ident_t *id_ref, int gtid, CPLX128_LEG *loc); @@ -1337,7 +1338,7 @@ kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); void __kmpc_atomic_cmplx4_div_cpt(ident_t *id_ref, int gtid, kmp_cmplx32 *lhs, kmp_cmplx32 rhs, kmp_cmplx32 *out, int flag); - +#if KMP_ARCH_X86 || KMP_ARCH_X86_64 kmp_cmplx64 __kmpc_atomic_cmplx8_add_cpt(ident_t *id_ref, int gtid, kmp_cmplx64 *lhs, kmp_cmplx64 rhs, int flag); @@ -1362,6 +1363,7 @@ kmp_cmplx80 __kmpc_atomic_cmplx10_div_cpt(ident_t *id_ref, int gtid, kmp_cmplx80 *lhs, kmp_cmplx80 rhs, int flag); +#endif #if KMP_HAVE_QUAD CPLX128_LEG __kmpc_atomic_cmplx16_add_cpt(ident_t *id_ref, int gtid, CPLX128_LEG *lhs, CPLX128_LEG rhs, @@ -1419,7 +1421,7 @@ // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr // binop x; v = x; } for non-commutative operations. - +#if KMP_ARCH_X86 || KMP_ARCH_X86_64 char __kmpc_atomic_fixed1_sub_cpt_rev(ident_t *id_ref, int gtid, char *lhs, char rhs, int flag); char __kmpc_atomic_fixed1_div_cpt_rev(ident_t *id_ref, int gtid, char *lhs, diff --git a/openmp/runtime/src/kmp_atomic.cpp b/openmp/runtime/src/kmp_atomic.cpp --- a/openmp/runtime/src/kmp_atomic.cpp +++ b/openmp/runtime/src/kmp_atomic.cpp @@ -1914,8 +1914,7 @@ ATOMIC_CMPXCHG_CMPLX(cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86) // __kmpc_atomic_cmplx4_div_cmplx8 -// READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 -#if KMP_ARCH_X86 || KMP_ARCH_X86_64 +// READ, WRITE, CAPTURE // ------------------------------------------------------------------------ // Atomic READ routines @@ -2925,6 +2924,7 @@ // binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 +#if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) diff --git a/openmp/runtime/src/kmp_os.h b/openmp/runtime/src/kmp_os.h --- a/openmp/runtime/src/kmp_os.h +++ b/openmp/runtime/src/kmp_os.h @@ -596,27 +596,26 @@ } // The _RET versions return the value instead of a bool -/* + #define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \ _InterlockedCompareExchange8((p), (sv), (cv)) #define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \ _InterlockedCompareExchange16((p), (sv), (cv)) -*/ + #define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \ _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \ (kmp_int64)(cv)) -/* + #define KMP_XCHG_FIXED8(p, v) \ _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v)); -*/ -// #define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v)); -// #define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v))); +#define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v)); +#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v))); -// inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) { -// kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64 -// *)&v); return *(kmp_real64 *)&tmp; -// } +inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) { + kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64 + *)&v); return *(kmp_real64 *)&tmp; +} #else // !KMP_ARCH_AARCH64