diff --git a/clang/lib/Headers/__clang_cuda_intrinsics.h b/clang/lib/Headers/__clang_cuda_intrinsics.h --- a/clang/lib/Headers/__clang_cuda_intrinsics.h +++ b/clang/lib/Headers/__clang_cuda_intrinsics.h @@ -45,7 +45,7 @@ _Static_assert(sizeof(__val) == sizeof(__Bits)); \ _Static_assert(sizeof(__Bits) == 2 * sizeof(int)); \ __Bits __tmp; \ - memcpy(&__tmp, &__val, sizeof(__val)); \ + memcpy(&__tmp, &__val, sizeof(__val)); \ __tmp.__a = ::__FnName(__tmp.__a, __offset, __width); \ __tmp.__b = ::__FnName(__tmp.__b, __offset, __width); \ long long __ret; \ @@ -234,8 +234,8 @@ return __nvvm_match_any_sync_i32(mask, value); } -inline __device__ unsigned int -__match64_any_sync(unsigned int mask, unsigned long long value) { +inline __device__ unsigned int __match64_any_sync(unsigned int mask, + unsigned long long value) { return __nvvm_match_any_sync_i64(mask, value); } @@ -573,10 +573,137 @@ // know what exactly it is supposed to do. However, CUDA headers suggest that // just passing through __ptr should not affect correctness. They do so on // pre-sm80 GPUs where this builtin is not available. - return (void*)__ptr; + return (void *)__ptr; } #endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 800 +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 +__device__ inline unsigned __isCtaShared(const void *ptr) { + return __isShared(ptr); +} + +__device__ inline unsigned __isClusterShared(const void *__ptr) { + return __nvvm_isspacep_shared_cluster(__ptr); +} + +__device__ inline void *__cluster_map_shared_rank(const void *__ptr, + unsigned __rank) { + return __nvvm_mapa((void *)__ptr, __rank); +} + +__device__ inline unsigned __cluster_query_shared_rank(const void *__ptr) { + return __nvvm_getctarank((void *)__ptr); +} + +__device__ inline uint2 +__cluster_map_shared_multicast(const void *__ptr, + unsigned int __cluster_cta_mask) { + return make_uint2((unsigned)__cvta_generic_to_shared(__ptr), + __cluster_cta_mask); +} + +__device__ inline unsigned __clusterDimIsSpecified() { + return __nvvm_is_explicit_cluster(); +} + +__device__ inline dim3 __clusterDim() { + return {__nvvm_read_ptx_sreg_cluster_nctaid_x(), + __nvvm_read_ptx_sreg_cluster_nctaid_y(), + __nvvm_read_ptx_sreg_cluster_nctaid_z()}; +} + +__device__ inline dim3 __clusterRelativeBlockIdx() { + return {__nvvm_read_ptx_sreg_cluster_ctaid_x(), + __nvvm_read_ptx_sreg_cluster_ctaid_y(), + __nvvm_read_ptx_sreg_cluster_ctaid_z()}; +} + +__device__ inline dim3 __clusterGridDimInClusters() { + return {__nvvm_read_ptx_sreg_nclusterid_x(), + __nvvm_read_ptx_sreg_nclusterid_y(), + __nvvm_read_ptx_sreg_nclusterid_z()}; +} + +__device__ inline dim3 __clusterIdx() { + return {__nvvm_read_ptx_sreg_clusterid_x(), + __nvvm_read_ptx_sreg_clusterid_y(), + __nvvm_read_ptx_sreg_clusterid_z()}; +} + +__device__ inline unsigned __clusterRelativeBlockRank() { + return __nvvm_read_ptx_sreg_cluster_ctarank(); +} + +__device__ inline unsigned __clusterSizeInBlocks() { + return __nvvm_read_ptx_sreg_cluster_nctarank(); +} + +__device__ inline void __cluster_barrier_arrive() { + __nvvm_barrier_cluster_arrive(); +} + +__device__ inline void __cluster_barrier_arrive_relaxed() { + __nvvm_barrier_cluster_arrive_relaxed(); +} + +__device__ inline void __cluster_barrier_wait() { + __nvvm_barrier_cluster_wait(); +} + +__device__ inline void __threadfence_cluster() { __nvvm_fence_sc_cluster(); } + +__device__ inline float2 atomicAdd(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float2 atomicAdd_block(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.cta.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float2 atomicAdd_system(float2 *__ptr, float2 __val) { + float2 __ret; + __asm__("atom.sys.add.v2.f32 {%0, %1}, [%2], {%3, %4};" + : "=f"(__ret.x), "=f"(__ret.y) + : "l"(__ptr), "f"(__val.x), "f"(__val.y)); + return __ret; +} + +__device__ inline float4 atomicAdd(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__("atom.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)); + return __ret; +} + +__device__ inline float4 atomicAdd_block(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__( + "atom.cta.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w)); + return __ret; +} + +__device__ inline float4 atomicAdd_system(float4 *__ptr, float4 __val) { + float4 __ret; + __asm__( + "atom.sys.add.v4.f32 {%0, %1, %2, %3}, [%4], {%5, %6, %7, %8};" + : "=f"(__ret.x), "=f"(__ret.y), "=f"(__ret.z), "=f"(__ret.w) + : "l"(__ptr), "f"(__val.x), "f"(__val.y), "f"(__val.z), "f"(__val.w) + :); + return __ret; +} + +#endif // !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 900 #endif // CUDA_VERSION >= 11000 #endif // defined(__CLANG_CUDA_INTRINSICS_H__)