diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -12867,12 +12867,23 @@ return DenormMode == DenormalMode::getIEEE(); } +// The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe +// floating point atomic instructions. May generate more efficient code, +// but may not respect rounding and denormal modes, and may give incorrect +// results for certain memory destinations. +bool unsafeFPAtomicsDisabled(Function *F) { + return F->getFnAttribute("amdgpu-unsafe-fp-atomics").getValueAsString() != + "true"; +} + TargetLowering::AtomicExpansionKind SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { unsigned AS = RMW->getPointerAddressSpace(); if (AS == AMDGPUAS::PRIVATE_ADDRESS) return AtomicExpansionKind::NotAtomic; + auto SSID = RMW->getSyncScopeID(); + auto ReportUnsafeHWInst = [&](TargetLowering::AtomicExpansionKind Kind) { OptimizationRemarkEmitter ORE(RMW->getFunction()); LLVMContext &Ctx = RMW->getFunction()->getContext(); @@ -12891,6 +12902,10 @@ return Kind; }; + bool HasSystemScope = + SSID == SyncScope::System || + SSID == RMW->getContext().getOrInsertSyncScopeID("one-as"); + switch (RMW->getOperation()) { case AtomicRMWInst::FAdd: { Type *Ty = RMW->getType(); @@ -12901,21 +12916,13 @@ if (!Ty->isFloatTy() && (!Subtarget->hasGFX90AInsts() || !Ty->isDoubleTy())) return AtomicExpansionKind::CmpXChg; - if ((AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS) && + if (AMDGPU::isFlatGlobalAddrSpace(AS) && Subtarget->hasAtomicFaddNoRtnInsts()) { - // The amdgpu-unsafe-fp-atomics attribute enables generation of unsafe - // floating point atomic instructions. May generate more efficient code, - // but may not respect rounding and denormal modes, and may give incorrect - // results for certain memory destinations. - if (RMW->getFunction() - ->getFnAttribute("amdgpu-unsafe-fp-atomics") - .getValueAsString() != "true") + if (unsafeFPAtomicsDisabled(RMW->getFunction())) return AtomicExpansionKind::CmpXChg; // Always expand system scope fp atomics. - auto SSID = RMW->getSyncScopeID(); - if (SSID == SyncScope::System || - SSID == RMW->getContext().getOrInsertSyncScopeID("one-as")) + if (HasSystemScope) return AtomicExpansionKind::CmpXChg; if (AS == AMDGPUAS::GLOBAL_ADDRESS && Ty->isFloatTy()) { @@ -12971,6 +12978,23 @@ return AtomicExpansionKind::CmpXChg; } + case AtomicRMWInst::FMin: + case AtomicRMWInst::FMax: + case AtomicRMWInst::Min: + case AtomicRMWInst::Max: + case AtomicRMWInst::UMin: + case AtomicRMWInst::UMax: { + if (AMDGPU::isFlatGlobalAddrSpace(AS)) { + if (RMW->getType()->isFloatTy() && + unsafeFPAtomicsDisabled(RMW->getFunction())) + return AtomicExpansionKind::CmpXChg; + + // Always expand system scope min/max atomics. + if (HasSystemScope) + return AtomicExpansionKind::CmpXChg; + } + break; + } default: break; } diff --git a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll --- a/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll +++ b/llvm/test/CodeGen/AMDGPU/dag-divergence-atomic.ll @@ -151,8 +151,8 @@ ret void } -define protected amdgpu_kernel void @max(i32 addrspace(1)* %p, %S addrspace(1)* %q) { -; CHECK-LABEL: max: +define protected amdgpu_kernel void @max_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: max_workgroup: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; CHECK-NEXT: v_mov_b32_e32 v0, 0 @@ -165,6 +165,41 @@ ; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1] ; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 ; CHECK-NEXT: global_store_dword v[0:1], v2, off +; CHECK-NEXT: s_endpgm + %n32 = atomicrmw max i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic + %n64 = zext i32 %n32 to i64 + %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0 + store float 1.0, float addrspace(1)* %p1 + ret void +} + +define protected amdgpu_kernel void @max(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: max: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; CHECK-NEXT: s_mov_b64 s[4:5], 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: .LBB7_1: ; %atomicrmw.start +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: v_max_i32_e32 v2, 1, v3 +; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CHECK-NEXT: s_cbranch_execnz .LBB7_1 +; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end +; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3] +; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 +; CHECK-NEXT: global_store_dword v[0:1], v2, off ; CHECK-NEXT: s_endpgm %n32 = atomicrmw max i32 addrspace(1)* %p, i32 1 monotonic %n64 = zext i32 %n32 to i64 @@ -173,8 +208,8 @@ ret void } -define protected amdgpu_kernel void @min(i32 addrspace(1)* %p, %S addrspace(1)* %q) { -; CHECK-LABEL: min: +define protected amdgpu_kernel void @min_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: min_workgroup: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; CHECK-NEXT: v_mov_b32_e32 v0, 0 @@ -187,6 +222,41 @@ ; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1] ; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 ; CHECK-NEXT: global_store_dword v[0:1], v2, off +; CHECK-NEXT: s_endpgm + %n32 = atomicrmw min i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic + %n64 = zext i32 %n32 to i64 + %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0 + store float 1.0, float addrspace(1)* %p1 + ret void +} + +define protected amdgpu_kernel void @min(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: min: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; CHECK-NEXT: s_mov_b64 s[4:5], 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: .LBB9_1: ; %atomicrmw.start +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: v_min_i32_e32 v2, 1, v3 +; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CHECK-NEXT: s_cbranch_execnz .LBB9_1 +; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end +; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3] +; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 +; CHECK-NEXT: global_store_dword v[0:1], v2, off ; CHECK-NEXT: s_endpgm %n32 = atomicrmw min i32 addrspace(1)* %p, i32 1 monotonic %n64 = zext i32 %n32 to i64 @@ -195,8 +265,8 @@ ret void } -define protected amdgpu_kernel void @umax(i32 addrspace(1)* %p, %S addrspace(1)* %q) { -; CHECK-LABEL: umax: +define protected amdgpu_kernel void @umax_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: umax_workgroup: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; CHECK-NEXT: v_mov_b32_e32 v0, 0 @@ -209,6 +279,41 @@ ; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1] ; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 ; CHECK-NEXT: global_store_dword v[0:1], v2, off +; CHECK-NEXT: s_endpgm + %n32 = atomicrmw umax i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic + %n64 = zext i32 %n32 to i64 + %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0 + store float 1.0, float addrspace(1)* %p1 + ret void +} + +define protected amdgpu_kernel void @umax(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: umax: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; CHECK-NEXT: s_mov_b64 s[4:5], 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: .LBB11_1: ; %atomicrmw.start +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: v_max_u32_e32 v2, 1, v3 +; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CHECK-NEXT: s_cbranch_execnz .LBB11_1 +; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end +; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3] +; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 +; CHECK-NEXT: global_store_dword v[0:1], v2, off ; CHECK-NEXT: s_endpgm %n32 = atomicrmw umax i32 addrspace(1)* %p, i32 1 monotonic %n64 = zext i32 %n32 to i64 @@ -217,8 +322,8 @@ ret void } -define protected amdgpu_kernel void @umin(i32 addrspace(1)* %p, %S addrspace(1)* %q) { -; CHECK-LABEL: umin: +define protected amdgpu_kernel void @umin_workgroup(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: umin_workgroup: ; CHECK: ; %bb.0: ; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 ; CHECK-NEXT: v_mov_b32_e32 v0, 0 @@ -231,6 +336,41 @@ ; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v2, 12, v[0:1] ; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 ; CHECK-NEXT: global_store_dword v[0:1], v2, off +; CHECK-NEXT: s_endpgm + %n32 = atomicrmw umin i32 addrspace(1)* %p, i32 1 syncscope("workgroup") monotonic + %n64 = zext i32 %n32 to i64 + %p1 = getelementptr inbounds %S, %S addrspace(1)* %q, i64 %n64, i32 0 + store float 1.0, float addrspace(1)* %p1 + ret void +} + +define protected amdgpu_kernel void @umin(i32 addrspace(1)* %p, %S addrspace(1)* %q) { +; CHECK-LABEL: umin: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; CHECK-NEXT: s_mov_b64 s[4:5], 0 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v0, s6 +; CHECK-NEXT: .LBB13_1: ; %atomicrmw.start +; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: v_mov_b32_e32 v3, v0 +; CHECK-NEXT: v_min_u32_e32 v2, 1, v3 +; CHECK-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; CHECK-NEXT: s_waitcnt vmcnt(0) +; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CHECK-NEXT: s_cbranch_execnz .LBB13_1 +; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end +; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] +; CHECK-NEXT: v_mov_b32_e32 v2, s2 +; CHECK-NEXT: v_mov_b32_e32 v3, s3 +; CHECK-NEXT: v_mad_u64_u32 v[0:1], s[0:1], v0, 12, v[2:3] +; CHECK-NEXT: v_mov_b32_e32 v2, 1.0 +; CHECK-NEXT: global_store_dword v[0:1], v2, off ; CHECK-NEXT: s_endpgm %n32 = atomicrmw umin i32 addrspace(1)* %p, i32 1 monotonic %n64 = zext i32 %n32 to i64 @@ -337,7 +477,7 @@ ; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v0, s6 -; CHECK-NEXT: .LBB14_1: ; %atomicrmw.start +; CHECK-NEXT: .LBB18_1: ; %atomicrmw.start ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: v_mov_b32_e32 v3, v0 ; CHECK-NEXT: v_add_f32_e32 v2, 1.0, v3 @@ -346,7 +486,7 @@ ; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 ; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] -; CHECK-NEXT: s_cbranch_execnz .LBB14_1 +; CHECK-NEXT: s_cbranch_execnz .LBB18_1 ; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0 @@ -374,7 +514,7 @@ ; CHECK-NEXT: s_load_dword s6, s[0:1], 0x0 ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: v_mov_b32_e32 v0, s6 -; CHECK-NEXT: .LBB15_1: ; %atomicrmw.start +; CHECK-NEXT: .LBB19_1: ; %atomicrmw.start ; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: v_mov_b32_e32 v3, v0 ; CHECK-NEXT: v_add_f32_e32 v2, -1.0, v3 @@ -383,7 +523,7 @@ ; CHECK-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 ; CHECK-NEXT: s_or_b64 s[4:5], vcc, s[4:5] ; CHECK-NEXT: s_andn2_b64 exec, exec, s[4:5] -; CHECK-NEXT: s_cbranch_execnz .LBB15_1 +; CHECK-NEXT: s_cbranch_execnz .LBB19_1 ; CHECK-NEXT: ; %bb.2: ; %atomicrmw.end ; CHECK-NEXT: s_or_b64 exec, exec, s[4:5] ; CHECK-NEXT: v_cvt_u32_f32_e32 v2, v0 diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll --- a/llvm/test/CodeGen/AMDGPU/flat_atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics.ll @@ -1587,10 +1587,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s4 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i32_offset: @@ -1603,10 +1602,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s4 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_max_i32_offset: @@ -1617,14 +1615,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1639,12 +1636,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -1658,12 +1655,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -1675,17 +1672,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -1705,10 +1702,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i32_addr64_offset: @@ -1725,10 +1721,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_max_i32_addr64_offset: @@ -1743,15 +1738,14 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1770,12 +1764,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -1793,12 +1787,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -1814,18 +1808,18 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -1839,10 +1833,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i32: @@ -1853,10 +1846,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_max_i32: @@ -1867,13 +1859,12 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile max i32* %out, i32 %in seq_cst + %val = atomicrmw volatile max i32* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1886,12 +1877,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s4 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -1903,12 +1894,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s4 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -1920,16 +1911,16 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile max i32* %out, i32 %in seq_cst + %val = atomicrmw volatile max i32* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -1947,10 +1938,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i32_addr64: @@ -1965,10 +1955,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_max_i32_addr64: @@ -1983,14 +1972,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile max i32* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2007,12 +1995,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2028,12 +2016,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2049,17 +2037,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smax v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile max i32* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2075,10 +2063,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s4 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i32_offset: @@ -2091,10 +2078,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s4 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umax_i32_offset: @@ -2105,14 +2091,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2127,12 +2112,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2146,12 +2131,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2163,17 +2148,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2193,10 +2178,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i32_addr64_offset: @@ -2213,10 +2197,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umax_i32_addr64_offset: @@ -2231,15 +2214,14 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2258,12 +2240,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2281,12 +2263,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2302,18 +2284,18 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2327,10 +2309,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i32: @@ -2341,10 +2322,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umax_i32: @@ -2355,13 +2335,12 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2374,12 +2353,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s4 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2391,12 +2370,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s4 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2408,16 +2387,16 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2435,10 +2414,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i32_addr64: @@ -2453,10 +2431,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umax_i32_addr64: @@ -2471,14 +2448,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2495,12 +2471,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2516,12 +2492,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2537,17 +2513,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umax v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umax i32* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2563,10 +2539,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s4 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i32_offset: @@ -2579,10 +2554,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s4 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_min_i32_offset: @@ -2593,14 +2567,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2615,12 +2588,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2634,12 +2607,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2651,17 +2624,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2681,10 +2654,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i32_addr64_offset: @@ -2701,10 +2673,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_min_i32_addr64_offset: @@ -2719,15 +2690,14 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2746,12 +2716,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2769,12 +2739,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2790,18 +2760,18 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2815,10 +2785,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i32: @@ -2829,10 +2798,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_min_i32: @@ -2843,13 +2811,12 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile min i32* %out, i32 %in seq_cst + %val = atomicrmw volatile min i32* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2862,12 +2829,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s4 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -2879,12 +2846,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s4 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -2896,16 +2863,16 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile min i32* %out, i32 %in seq_cst + %val = atomicrmw volatile min i32* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -2923,10 +2890,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i32_addr64: @@ -2941,10 +2907,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_min_i32_addr64: @@ -2959,14 +2924,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile min i32* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2983,12 +2947,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -3004,12 +2968,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -3025,17 +2989,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_smin v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile min i32* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -3051,10 +3015,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s4 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i32_offset: @@ -3067,10 +3030,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s4 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umin_i32_offset: @@ -3081,14 +3043,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3103,12 +3064,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -3122,12 +3083,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -3139,17 +3100,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %gep = getelementptr i32, i32* %out, i32 4 - %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -3169,10 +3130,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i32_addr64_offset: @@ -3189,10 +3149,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umin_i32_addr64_offset: @@ -3207,15 +3166,14 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v[0:1], v2 offset:16 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3234,12 +3192,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -3257,12 +3215,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -3278,18 +3236,18 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 offset:16 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index %gep = getelementptr i32, i32* %ptr, i32 4 - %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -3303,10 +3261,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i32: @@ -3317,10 +3274,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umin_i32: @@ -3331,13 +3287,12 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s2 ; GCN3-NEXT: v_mov_b32_e32 v1, s3 ; GCN3-NEXT: v_mov_b32_e32 v2, s4 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3350,12 +3305,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s4 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -3367,12 +3322,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s4 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -3384,16 +3339,16 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s4 ; GCN3-NEXT: v_mov_b32_e32 v1, s5 ; GCN3-NEXT: v_mov_b32_e32 v2, s2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: - %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } @@ -3411,10 +3366,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s6 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v[0:1], v2 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i32_addr64: @@ -3429,10 +3383,9 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s6 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v[0:1], v2 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm ; ; GCN3-LABEL: atomic_umin_i32_addr64: @@ -3447,14 +3400,13 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s6 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v[0:1], v2 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3471,12 +3423,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s0 ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s8 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v0, s6 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dword v[0:1], v2 ; GCN1-NEXT: s_endpgm ; @@ -3492,12 +3444,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s0 ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s8 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v0, s6 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dword v[0:1], v2 ; GCN2-NEXT: s_endpgm ; @@ -3513,17 +3465,17 @@ ; GCN3-NEXT: v_mov_b32_e32 v0, s0 ; GCN3-NEXT: v_mov_b32_e32 v1, s1 ; GCN3-NEXT: v_mov_b32_e32 v2, s8 -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: flat_atomic_umin v2, v[0:1], v2 glc -; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: s_waitcnt lgkmcnt(0) ; GCN3-NEXT: v_mov_b32_e32 v0, s6 ; GCN3-NEXT: v_mov_b32_e32 v1, s7 +; GCN3-NEXT: s_waitcnt vmcnt(0) ; GCN3-NEXT: flat_store_dword v[0:1], v2 ; GCN3-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32* %out, i64 %index - %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umin i32* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32* %out2 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll --- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll @@ -1057,10 +1057,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i64_offset: @@ -1073,14 +1072,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1096,12 +1094,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1116,17 +1114,17 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1146,10 +1144,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i64_addr64_offset: @@ -1166,15 +1163,14 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1192,12 +1188,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, 0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1214,18 +1210,18 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, 0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1239,10 +1235,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i64: @@ -1253,13 +1248,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1273,12 +1267,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s6 ; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1291,16 +1285,16 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s6 ; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1318,10 +1312,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_max_i64_addr64: @@ -1336,14 +1329,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1359,12 +1351,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, s5 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1379,17 +1371,17 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, s5 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1405,10 +1397,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i64_offset: @@ -1421,14 +1412,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1444,12 +1434,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1464,17 +1454,17 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1494,10 +1484,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i64_addr64_offset: @@ -1514,15 +1503,14 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1540,12 +1528,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, 0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1562,18 +1550,18 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, 0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1587,10 +1575,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i64: @@ -1601,13 +1588,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1621,12 +1607,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s6 ; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1639,16 +1625,16 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s6 ; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1666,10 +1652,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umax_i64_addr64: @@ -1684,14 +1669,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1707,12 +1691,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, s5 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1727,17 +1711,17 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, s5 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1753,10 +1737,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i64_offset: @@ -1769,14 +1752,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1792,12 +1774,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1812,17 +1794,17 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1842,10 +1824,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i64_addr64_offset: @@ -1862,15 +1843,14 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1888,12 +1868,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, 0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1910,18 +1890,18 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, 0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -1935,10 +1915,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i64: @@ -1949,13 +1928,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1969,12 +1947,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s6 ; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -1987,16 +1965,16 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s6 ; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -2014,10 +1992,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_min_i64_addr64: @@ -2032,14 +2009,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2055,12 +2031,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, s5 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -2075,17 +2051,17 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, s5 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -2101,10 +2077,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v0, s2 ; GCN1-NEXT: v_mov_b32_e32 v1, s3 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i64_offset: @@ -2117,14 +2092,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v0, s2 ; GCN2-NEXT: v_mov_b32_e32 v1, s3 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2140,12 +2114,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -2160,17 +2134,17 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %gep = getelementptr i64, i64* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -2190,10 +2164,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i64_addr64_offset: @@ -2210,15 +2183,14 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2236,12 +2208,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, 0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -2258,18 +2230,18 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, 0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index %gep = getelementptr i64, i64* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -2283,10 +2255,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i64: @@ -2297,13 +2268,12 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2317,12 +2287,12 @@ ; GCN1-NEXT: v_mov_b32_e32 v1, s5 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s6 ; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -2335,16 +2305,16 @@ ; GCN2-NEXT: v_mov_b32_e32 v1, s5 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s6 ; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } @@ -2362,10 +2332,9 @@ ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v1, s7 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: s_endpgm ; ; GCN2-LABEL: atomic_umin_i64_addr64: @@ -2380,14 +2349,13 @@ ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v1, s7 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2403,12 +2371,12 @@ ; GCN1-NEXT: s_addc_u32 s1, s1, s5 ; GCN1-NEXT: v_mov_b32_e32 v3, s1 ; GCN1-NEXT: v_mov_b32_e32 v2, s0 -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: s_waitcnt lgkmcnt(0) ; GCN1-NEXT: v_mov_b32_e32 v2, s2 ; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN1-NEXT: s_endpgm ; @@ -2423,17 +2391,17 @@ ; GCN2-NEXT: s_addc_u32 s1, s1, s5 ; GCN2-NEXT: v_mov_b32_e32 v3, s1 ; GCN2-NEXT: v_mov_b32_e32 v2, s0 -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: s_waitcnt lgkmcnt(0) ; GCN2-NEXT: v_mov_b32_e32 v2, s2 ; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] ; GCN2-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64* %out2 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_min_max_system.ll @@ -0,0 +1,2515 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN1 %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN2 %s + +define amdgpu_kernel void @atomic_max_i64_offset(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_max_i64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB0_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB0_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_offset(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_max_i64_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s2, s4, 32 +; GCN1-NEXT: s_addc_u32 s3, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v5, s3 +; GCN1-NEXT: v_mov_b32_e32 v4, s2 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB1_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s2, s4, 32 +; GCN2-NEXT: s_addc_u32 s3, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v5, s3 +; GCN2-NEXT: v_mov_b32_e32 v4, s2 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB1_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i64_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB2_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB2_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i64_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB3_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB3_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile max i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_max_i64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB4_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB4_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_max_i64_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v4, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s5 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB5_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v4, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s5 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB5_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile max i64* %out, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64_addr64(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i64_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB6_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB6_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i64_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB7_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i64_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB7_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile max i64* %ptr, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_offset(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_umax_i64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB8_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB8_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB8_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB8_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_umax_i64_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s2, s4, 32 +; GCN1-NEXT: s_addc_u32 s3, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB9_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v5, s3 +; GCN1-NEXT: v_mov_b32_e32 v4, s2 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB9_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s2, s4, 32 +; GCN2-NEXT: s_addc_u32 s3, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB9_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v5, s3 +; GCN2-NEXT: v_mov_b32_e32 v4, s2 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB9_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i64_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB10_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB10_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB10_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB10_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i64_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB11_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB11_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB11_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB11_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile umax i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_umax_i64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB12_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB12_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB12_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB12_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_umax_i64_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: .LBB13_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v4, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s5 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB13_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: .LBB13_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v4, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s5 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB13_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umax i64* %out, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_addr64(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i64_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB14_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB14_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB14_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB14_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i64_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB15_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB15_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i64_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB15_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB15_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile umax i64* %ptr, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_offset(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_min_i64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB16_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB16_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB16_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB16_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_offset(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_min_i64_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s2, s4, 32 +; GCN1-NEXT: s_addc_u32 s3, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB17_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v5, s3 +; GCN1-NEXT: v_mov_b32_e32 v4, s2 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB17_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s2, s4, 32 +; GCN2-NEXT: s_addc_u32 s3, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB17_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v5, s3 +; GCN2-NEXT: v_mov_b32_e32 v4, s2 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB17_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i64_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB18_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB18_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB18_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB18_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i64_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB19_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB19_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB19_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB19_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile min i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_min_i64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB20_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB20_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB20_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB20_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_min_i64_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: .LBB21_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v4, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s5 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB21_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: .LBB21_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v4, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s5 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB21_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile min i64* %out, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_addr64(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i64_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB22_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB22_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB22_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB22_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i64_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB23_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB23_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i64_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB23_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB23_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile min i64* %ptr, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_offset(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_umin_i64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB24_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB24_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB24_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB24_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_umin_i64_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s2, s4, 32 +; GCN1-NEXT: s_addc_u32 s3, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB25_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v5, s3 +; GCN1-NEXT: v_mov_b32_e32 v4, s2 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB25_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s2, s4, 32 +; GCN2-NEXT: s_addc_u32 s3, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB25_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v5, s3 +; GCN2-NEXT: v_mov_b32_e32 v4, s2 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB25_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64* %out, i64 4 + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i64_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB26_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB26_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB26_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB26_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i64_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: s_add_u32 s0, s0, 32 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB27_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB27_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: s_add_u32 s0, s0, 32 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB27_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB27_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %gep = getelementptr i64, i64* %ptr, i64 4 + %tmp0 = atomicrmw volatile umin i64* %gep, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64(i64* %out, i64 %in) { +; GCN1-LABEL: atomic_umin_i64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: .LBB28_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB28_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: .LBB28_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB28_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret(i64* %out, i64* %out2, i64 %in) { +; GCN1-LABEL: atomic_umin_i64_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: .LBB29_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s1 +; GCN1-NEXT: v_mov_b32_e32 v6, s0 +; GCN1-NEXT: v_mov_b32_e32 v4, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s5 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB29_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v2, s6 +; GCN1-NEXT: v_mov_b32_e32 v3, s7 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: .LBB29_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s1 +; GCN2-NEXT: v_mov_b32_e32 v6, s0 +; GCN2-NEXT: v_mov_b32_e32 v4, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s5 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB29_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v2, s6 +; GCN2-NEXT: v_mov_b32_e32 v3, s7 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umin i64* %out, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_addr64(i64* %out, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i64_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s4 +; GCN1-NEXT: s_addc_u32 s1, s1, s5 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_mov_b64 s[4:5], 0 +; GCN1-NEXT: .LBB30_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s3 +; GCN1-NEXT: v_mov_b32_e32 v6, s2 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN1-NEXT: s_cbranch_execnz .LBB30_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s4 +; GCN2-NEXT: s_addc_u32 s1, s1, s5 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_mov_b64 s[4:5], 0 +; GCN2-NEXT: .LBB30_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s3 +; GCN2-NEXT: v_mov_b32_e32 v6, s2 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GCN2-NEXT: s_cbranch_execnz .LBB30_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64* %out, i64* %out2, i64 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i64_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN1-NEXT: s_add_u32 s0, s0, s6 +; GCN1-NEXT: s_addc_u32 s1, s1, s7 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN1-NEXT: s_mov_b64 s[6:7], 0 +; GCN1-NEXT: .LBB31_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v3, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, v0 +; GCN1-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; GCN1-NEXT: v_mov_b32_e32 v0, s5 +; GCN1-NEXT: v_mov_b32_e32 v6, s4 +; GCN1-NEXT: v_mov_b32_e32 v5, s1 +; GCN1-NEXT: v_mov_b32_e32 v4, s0 +; GCN1-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN1-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN1-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN1-NEXT: s_cbranch_execnz .LBB31_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i64_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GCN2-NEXT: s_add_u32 s0, s0, s6 +; GCN2-NEXT: s_addc_u32 s1, s1, s7 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; GCN2-NEXT: s_mov_b64 s[6:7], 0 +; GCN2-NEXT: .LBB31_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v3, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, v0 +; GCN2-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; GCN2-NEXT: v_mov_b32_e32 v0, s5 +; GCN2-NEXT: v_mov_b32_e32 v6, s4 +; GCN2-NEXT: v_mov_b32_e32 v5, s1 +; GCN2-NEXT: v_mov_b32_e32 v4, s0 +; GCN2-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GCN2-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GCN2-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GCN2-NEXT: s_cbranch_execnz .LBB31_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[6:7] +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1] +; GCN2-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64* %out, i64 %index + %tmp0 = atomicrmw volatile umin i64* %ptr, i64 %in seq_cst + store i64 %tmp0, i64* %out2 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_min_max_system.ll @@ -0,0 +1,3252 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN1 %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN2 %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN3 %s + +define amdgpu_kernel void @atomic_max_i32_offset(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_max_i32_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s2, 16 +; GCN1-NEXT: s_addc_u32 s1, s3, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB0_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB0_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s2, 16 +; GCN2-NEXT: s_addc_u32 s1, s3, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB0_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB0_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: .LBB0_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB0_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_offset(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_max_i32_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s4, 16 +; GCN1-NEXT: s_addc_u32 s1, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB1_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB1_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s4, 16 +; GCN2-NEXT: s_addc_u32 s1, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB1_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB1_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_ret_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: .LBB1_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_max_i32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB1_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i32_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB2_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB2_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB2_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB2_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB2_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB2_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i32_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB3_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB3_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB3_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB3_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_ret_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB3_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB3_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile max i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_max_i32: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: .LBB4_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB4_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: .LBB4_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB4_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: .LBB4_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB4_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile max i32* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_max_i32_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB5_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v2, s4 +; GCN1-NEXT: v_mov_b32_e32 v3, s5 +; GCN1-NEXT: v_max_i32_e32 v0, s2, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB5_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB5_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v2, s4 +; GCN2-NEXT: v_mov_b32_e32 v3, s5 +; GCN2-NEXT: v_max_i32_e32 v0, s2, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB5_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_ret: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: .LBB5_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_max_i32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB5_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile max i32* %out, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32_addr64(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i32_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB6_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB6_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB6_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB6_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB6_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB6_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_max_i32_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB7_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB7_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_max_i32_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB7_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB7_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_max_i32_ret_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB7_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_max_i32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB7_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile max i32* %ptr, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_offset(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_umax_i32_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s2, 16 +; GCN1-NEXT: s_addc_u32 s1, s3, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB8_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB8_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s2, 16 +; GCN2-NEXT: s_addc_u32 s1, s3, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB8_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB8_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: .LBB8_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB8_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_umax_i32_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s4, 16 +; GCN1-NEXT: s_addc_u32 s1, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB9_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB9_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s4, 16 +; GCN2-NEXT: s_addc_u32 s1, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB9_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB9_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_ret_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: .LBB9_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_max_u32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB9_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i32_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB10_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB10_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB10_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB10_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB10_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB10_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i32_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB11_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB11_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB11_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB11_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_ret_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB11_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB11_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile umax i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_umax_i32: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: .LBB12_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB12_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: .LBB12_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB12_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: .LBB12_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB12_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_umax_i32_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB13_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v2, s4 +; GCN1-NEXT: v_mov_b32_e32 v3, s5 +; GCN1-NEXT: v_max_u32_e32 v0, s2, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB13_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB13_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v2, s4 +; GCN2-NEXT: v_mov_b32_e32 v3, s5 +; GCN2-NEXT: v_max_u32_e32 v0, s2, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB13_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_ret: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: .LBB13_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_max_u32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB13_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umax i32* %out, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_addr64(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i32_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB14_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB14_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB14_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB14_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB14_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_max_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB14_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umax_i32_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB15_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB15_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umax_i32_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB15_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB15_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umax_i32_ret_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB15_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_max_u32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB15_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile umax i32* %ptr, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_offset(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_min_i32_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s2, 16 +; GCN1-NEXT: s_addc_u32 s1, s3, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB16_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB16_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s2, 16 +; GCN2-NEXT: s_addc_u32 s1, s3, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB16_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB16_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: .LBB16_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB16_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_offset(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_min_i32_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s4, 16 +; GCN1-NEXT: s_addc_u32 s1, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB17_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB17_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s4, 16 +; GCN2-NEXT: s_addc_u32 s1, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB17_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB17_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_ret_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: .LBB17_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_min_i32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB17_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i32_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB18_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB18_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB18_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB18_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB18_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB18_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i32_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB19_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB19_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB19_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB19_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_ret_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB19_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB19_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile min i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_min_i32: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: .LBB20_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB20_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: .LBB20_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB20_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: .LBB20_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB20_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile min i32* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_min_i32_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB21_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v2, s4 +; GCN1-NEXT: v_mov_b32_e32 v3, s5 +; GCN1-NEXT: v_min_i32_e32 v0, s2, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB21_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB21_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v2, s4 +; GCN2-NEXT: v_mov_b32_e32 v3, s5 +; GCN2-NEXT: v_min_i32_e32 v0, s2, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB21_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_ret: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: .LBB21_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_min_i32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB21_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile min i32* %out, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_addr64(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i32_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB22_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB22_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB22_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB22_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB22_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_i32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB22_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_min_i32_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB23_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB23_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_min_i32_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB23_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB23_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_min_i32_ret_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB23_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_min_i32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB23_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile min i32* %ptr, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_offset(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_umin_i32_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s2, 16 +; GCN1-NEXT: s_addc_u32 s1, s3, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB24_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB24_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s2, 16 +; GCN2-NEXT: s_addc_u32 s1, s3, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB24_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB24_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: .LBB24_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB24_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_umin_i32_ret_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_add_u32 s0, s4, 16 +; GCN1-NEXT: s_addc_u32 s1, s5, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB25_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB25_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_ret_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_add_u32 s0, s4, 16 +; GCN2-NEXT: s_addc_u32 s1, s5, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB25_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB25_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_ret_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: .LBB25_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_min_u32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB25_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32* %out, i32 4 + %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i32_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB26_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB26_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB26_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB26_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB26_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB26_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i32_ret_addr64_offset: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: s_add_u32 s0, s0, 16 +; GCN1-NEXT: s_addc_u32 s1, s1, 0 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB27_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB27_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_ret_addr64_offset: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: s_add_u32 s0, s0, 16 +; GCN2-NEXT: s_addc_u32 s1, s1, 0 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB27_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB27_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_ret_addr64_offset: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] offset:16 +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB27_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] offset:16 glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB27_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %gep = getelementptr i32, i32* %ptr, i32 4 + %val = atomicrmw volatile umin i32* %gep, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32(i32* %out, i32 %in) { +; GCN1-LABEL: atomic_umin_i32: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s2 +; GCN1-NEXT: v_mov_b32_e32 v1, s3 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: .LBB28_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v2, s2 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v3, s3 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB28_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s2 +; GCN2-NEXT: v_mov_b32_e32 v1, s3 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: .LBB28_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v2, s2 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v3, s3 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB28_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s2 +; GCN3-NEXT: v_mov_b32_e32 v1, s3 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: .LBB28_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v2, s2 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v3, s3 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB28_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret(i32* %out, i32* %out2, i32 %in) { +; GCN1-LABEL: atomic_umin_i32_ret: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s2, s[0:1], 0xd +; GCN1-NEXT: s_mov_b64 s[0:1], 0 +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v0, s4 +; GCN1-NEXT: v_mov_b32_e32 v1, s5 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: .LBB29_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v2, s4 +; GCN1-NEXT: v_mov_b32_e32 v3, s5 +; GCN1-NEXT: v_min_u32_e32 v0, s2, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN1-NEXT: s_cbranch_execnz .LBB29_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_ret: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN2-NEXT: s_mov_b64 s[0:1], 0 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v0, s4 +; GCN2-NEXT: v_mov_b32_e32 v1, s5 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: .LBB29_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v2, s4 +; GCN2-NEXT: v_mov_b32_e32 v3, s5 +; GCN2-NEXT: v_min_u32_e32 v0, s2, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN2-NEXT: s_cbranch_execnz .LBB29_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_ret: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s2, s[0:1], 0x34 +; GCN3-NEXT: s_mov_b64 s[0:1], 0 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v0, s4 +; GCN3-NEXT: v_mov_b32_e32 v1, s5 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: .LBB29_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v2, s4 +; GCN3-NEXT: v_mov_b32_e32 v3, s5 +; GCN3-NEXT: v_min_u32_e32 v0, s2, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN3-NEXT: s_cbranch_execnz .LBB29_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umin i32* %out, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_addr64(i32* %out, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i32_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; GCN1-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s4, s[0:1], 0xb +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s6, s0 +; GCN1-NEXT: s_addc_u32 s1, s7, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v1, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB30_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB30_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN2-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s6, s0 +; GCN2-NEXT: s_addc_u32 s1, s7, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v1, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB30_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB30_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GCN3-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s4, s[0:1], 0x2c +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s6, s0 +; GCN3-NEXT: s_addc_u32 s1, s7, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v1, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB30_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_min_u32_e32 v0, s4, v1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB30_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32* %out, i32* %out2, i32 %in, i64 %index) { +; GCN1-LABEL: atomic_umin_i32_ret_addr64: +; GCN1: ; %bb.0: ; %entry +; GCN1-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN1-NEXT: s_load_dword s8, s[0:1], 0xd +; GCN1-NEXT: s_waitcnt lgkmcnt(0) +; GCN1-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN1-NEXT: s_add_u32 s0, s4, s0 +; GCN1-NEXT: s_addc_u32 s1, s5, s1 +; GCN1-NEXT: v_mov_b32_e32 v0, s0 +; GCN1-NEXT: v_mov_b32_e32 v1, s1 +; GCN1-NEXT: flat_load_dword v0, v[0:1] +; GCN1-NEXT: s_mov_b64 s[2:3], 0 +; GCN1-NEXT: .LBB31_1: ; %atomicrmw.start +; GCN1-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: v_mov_b32_e32 v1, v0 +; GCN1-NEXT: v_mov_b32_e32 v3, s1 +; GCN1-NEXT: v_mov_b32_e32 v2, s0 +; GCN1-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN1-NEXT: buffer_wbinvl1_vol +; GCN1-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN1-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN1-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN1-NEXT: s_cbranch_execnz .LBB31_1 +; GCN1-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN1-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN1-NEXT: v_mov_b32_e32 v1, s6 +; GCN1-NEXT: v_mov_b32_e32 v2, s7 +; GCN1-NEXT: flat_store_dword v[1:2], v0 +; GCN1-NEXT: s_endpgm +; +; GCN2-LABEL: atomic_umin_i32_ret_addr64: +; GCN2: ; %bb.0: ; %entry +; GCN2-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN2-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN2-NEXT: s_waitcnt lgkmcnt(0) +; GCN2-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN2-NEXT: s_add_u32 s0, s4, s0 +; GCN2-NEXT: s_addc_u32 s1, s5, s1 +; GCN2-NEXT: v_mov_b32_e32 v0, s0 +; GCN2-NEXT: v_mov_b32_e32 v1, s1 +; GCN2-NEXT: flat_load_dword v0, v[0:1] +; GCN2-NEXT: s_mov_b64 s[2:3], 0 +; GCN2-NEXT: .LBB31_1: ; %atomicrmw.start +; GCN2-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: v_mov_b32_e32 v1, v0 +; GCN2-NEXT: v_mov_b32_e32 v3, s1 +; GCN2-NEXT: v_mov_b32_e32 v2, s0 +; GCN2-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN2-NEXT: buffer_wbinvl1_vol +; GCN2-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN2-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN2-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN2-NEXT: s_cbranch_execnz .LBB31_1 +; GCN2-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN2-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN2-NEXT: v_mov_b32_e32 v1, s6 +; GCN2-NEXT: v_mov_b32_e32 v2, s7 +; GCN2-NEXT: flat_store_dword v[1:2], v0 +; GCN2-NEXT: s_endpgm +; +; GCN3-LABEL: atomic_umin_i32_ret_addr64: +; GCN3: ; %bb.0: ; %entry +; GCN3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GCN3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GCN3-NEXT: s_load_dword s8, s[0:1], 0x34 +; GCN3-NEXT: s_waitcnt lgkmcnt(0) +; GCN3-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GCN3-NEXT: s_add_u32 s0, s4, s0 +; GCN3-NEXT: s_addc_u32 s1, s5, s1 +; GCN3-NEXT: v_mov_b32_e32 v0, s0 +; GCN3-NEXT: v_mov_b32_e32 v1, s1 +; GCN3-NEXT: flat_load_dword v0, v[0:1] +; GCN3-NEXT: s_mov_b64 s[2:3], 0 +; GCN3-NEXT: .LBB31_1: ; %atomicrmw.start +; GCN3-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: v_mov_b32_e32 v1, v0 +; GCN3-NEXT: v_mov_b32_e32 v3, s1 +; GCN3-NEXT: v_mov_b32_e32 v2, s0 +; GCN3-NEXT: v_min_u32_e32 v0, s8, v1 +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc +; GCN3-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN3-NEXT: buffer_wbinvl1_vol +; GCN3-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GCN3-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GCN3-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GCN3-NEXT: s_cbranch_execnz .LBB31_1 +; GCN3-NEXT: ; %bb.2: ; %atomicrmw.end +; GCN3-NEXT: s_or_b64 exec, exec, s[2:3] +; GCN3-NEXT: v_mov_b32_e32 v1, s6 +; GCN3-NEXT: v_mov_b32_e32 v2, s7 +; GCN3-NEXT: flat_store_dword v[1:2], v0 +; GCN3-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32* %out, i64 %index + %val = atomicrmw volatile umin i32* %ptr, i32 %in seq_cst + store i32 %val, i32* %out2 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics-min-max-system.ll @@ -0,0 +1,2928 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GFX9 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1010 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX10 %s +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx1100 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GFX11 %s + +; Test using saddr addressing mode of global_* flat atomic instructions. + +; -------------------------------------------------------------------------------- +; atomicrmw max +; -------------------------------------------------------------------------------- + +define amdgpu_ps float @global_max_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_max_saddr_i32_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB0_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_max_saddr_i32_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB0_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB0_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_max_saddr_i32_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB0_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB0_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_max_saddr_i32_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB1_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_max_saddr_i32_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB1_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB1_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_max_saddr_i32_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB1_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB1_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps void @global_max_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_max_saddr_i32_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB2_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_max_saddr_i32_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB2_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB2_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_max_saddr_i32_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB2_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB2_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + ret void +} + +define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_max_saddr_i32_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB3_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_max_saddr_i32_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB3_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB3_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_max_saddr_i32_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB3_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_max_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB3_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + ret void +} + +define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_max_saddr_i64_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB4_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_max_saddr_i64_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB4_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB4_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_max_saddr_i64_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB4_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB4_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_max_saddr_i64_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB5_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_max_saddr_i64_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB5_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB5_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_max_saddr_i64_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB5_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB5_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps void @global_max_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_max_saddr_i64_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB6_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_max_saddr_i64_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB6_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB6_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_max_saddr_i64_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB6_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB6_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + ret void +} + +define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_max_saddr_i64_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB7_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_max_saddr_i64_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB7_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB7_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_max_saddr_i64_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB7_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_gt_i64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB7_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + ret void +} + +; -------------------------------------------------------------------------------- +; atomicrmw min +; -------------------------------------------------------------------------------- + +define amdgpu_ps float @global_min_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_min_saddr_i32_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB8_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_min_saddr_i32_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB8_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB8_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_min_saddr_i32_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB8_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB8_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_min_saddr_i32_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB9_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_min_saddr_i32_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB9_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB9_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_min_saddr_i32_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB9_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB9_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps void @global_min_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_min_saddr_i32_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB10_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_min_saddr_i32_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB10_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB10_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_min_saddr_i32_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB10_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB10_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + ret void +} + +define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_min_saddr_i32_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB11_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_min_saddr_i32_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB11_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB11_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_min_saddr_i32_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB11_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_min_i32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB11_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + ret void +} + +define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_min_saddr_i64_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB12_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_min_saddr_i64_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB12_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB12_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_min_saddr_i64_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB12_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB12_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_min_saddr_i64_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB13_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_min_saddr_i64_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB13_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB13_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_min_saddr_i64_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB13_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB13_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps void @global_min_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_min_saddr_i64_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB14_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_min_saddr_i64_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB14_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB14_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_min_saddr_i64_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB14_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB14_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + ret void +} + +define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_min_saddr_i64_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB15_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_min_saddr_i64_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB15_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB15_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_min_saddr_i64_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB15_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_le_i64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB15_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + ret void +} + +; -------------------------------------------------------------------------------- +; atomicrmw umax +; -------------------------------------------------------------------------------- + +define amdgpu_ps float @global_umax_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umax_saddr_i32_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB16_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umax_saddr_i32_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB16_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB16_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umax_saddr_i32_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB16_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB16_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umax_saddr_i32_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB17_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umax_saddr_i32_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB17_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB17_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umax_saddr_i32_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB17_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB17_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps void @global_umax_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umax_saddr_i32_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB18_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umax_saddr_i32_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB18_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB18_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umax_saddr_i32_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB18_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB18_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + ret void +} + +define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umax_saddr_i32_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB19_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umax_saddr_i32_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB19_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB19_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umax_saddr_i32_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB19_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_max_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB19_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + ret void +} + +define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umax_saddr_i64_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB20_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umax_saddr_i64_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB20_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB20_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umax_saddr_i64_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB20_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB20_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umax_saddr_i64_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB21_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umax_saddr_i64_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB21_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB21_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umax_saddr_i64_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB21_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB21_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps void @global_umax_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umax_saddr_i64_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB22_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umax_saddr_i64_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB22_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB22_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umax_saddr_i64_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB22_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB22_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + ret void +} + +define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umax_saddr_i64_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB23_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umax_saddr_i64_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB23_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB23_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umax_saddr_i64_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB23_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_gt_u64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB23_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + ret void +} + +; -------------------------------------------------------------------------------- +; atomicrmw umin +; -------------------------------------------------------------------------------- + +define amdgpu_ps float @global_umin_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umin_saddr_i32_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB24_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umin_saddr_i32_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB24_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB24_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umin_saddr_i32_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB24_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB24_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umin_saddr_i32_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v2 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB25_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umin_saddr_i32_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: v_mov_b32_e32 v2, v0 +; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB25_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB25_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umin_saddr_i32_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: v_mov_b32_e32 v2, v0 +; GFX11-NEXT: global_load_b32 v0, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v2 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB25_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB25_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %cast.rtn = bitcast i32 %rtn to float + ret float %cast.rtn +} + +define amdgpu_ps void @global_umin_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umin_saddr_i32_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB26_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umin_saddr_i32_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB26_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB26_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umin_saddr_i32_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB26_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB26_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* + %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + ret void +} + +define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { +; GFX9-LABEL: global_umin_saddr_i32_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB27_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umin_saddr_i32_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dword v5, v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap v0, v[2:3], v[4:5], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX10-NEXT: v_mov_b32_e32 v5, v0 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB27_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umin_saddr_i32_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b32 v5, v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v2, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_min_u32_e32 v4, v5, v1 +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b32 v0, v[2:3], v[4:5], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u32_e32 vcc, v0, v5 +; GFX11-NEXT: v_mov_b32_e32 v5, v0 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB27_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + ret void +} + +define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umin_saddr_i64_rtn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB28_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umin_saddr_i64_rtn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB28_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umin_saddr_i64_rtn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB28_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umin_saddr_i64_rtn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v6, s3 +; GFX9-NEXT: v_add_co_u32_e32 v5, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v10, v4 +; GFX9-NEXT: v_mov_b32_e32 v9, v3 +; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB29_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v0, v3 +; GFX9-NEXT: v_mov_b32_e32 v1, v4 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX10-LABEL: global_umin_saddr_i64_rtn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[3:4], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_mov_b32_e32 v10, v4 +; GFX10-NEXT: v_mov_b32_e32 v9, v3 +; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB29_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX10-NEXT: v_mov_b32_e32 v0, v3 +; GFX10-NEXT: v_mov_b32_e32 v1, v4 +; GFX10-NEXT: ; return to shader part epilog +; +; GFX11-LABEL: global_umin_saddr_i64_rtn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[3:4], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v5, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v6, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_mov_b32_e32 v10, v4 +; GFX11-NEXT: v_mov_b32_e32 v9, v3 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[9:10], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v8, v2, v10, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v7, v1, v9, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[5:6], v[7:10], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[9:10] +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB29_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX11-NEXT: v_mov_b32_e32 v0, v3 +; GFX11-NEXT: v_mov_b32_e32 v1, v4 +; GFX11-NEXT: ; return to shader part epilog + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %cast.rtn = bitcast i64 %rtn to <2 x float> + ret <2 x float> %cast.rtn +} + +define amdgpu_ps void @global_umin_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umin_saddr_i64_nortn: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB30_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umin_saddr_i64_nortn: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB30_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umin_saddr_i64_nortn: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB30_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* + %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + ret void +} + +define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { +; GFX9-LABEL: global_umin_saddr_i64_nortn_neg128: +; GFX9: ; %bb.0: +; GFX9-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX9-NEXT: v_mov_b32_e32 v3, s3 +; GFX9-NEXT: v_add_co_u32_e32 v7, vcc, s2, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v8, vcc, 0, v3, vcc +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1 +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v6, v4 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v5, v3 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB31_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: global_umin_saddr_i64_nortn_neg128: +; GFX10: ; %bb.0: +; GFX10-NEXT: global_load_dwordx2 v[5:6], v0, s[2:3] offset:-128 +; GFX10-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX10-NEXT: v_add_co_ci_u32_e64 v8, s[0:1], s3, 0, s[0:1] +; GFX10-NEXT: s_mov_b64 s[0:1], 0 +; GFX10-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX10-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX10-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: global_atomic_cmpswap_x2 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX10-NEXT: s_waitcnt vmcnt(0) +; GFX10-NEXT: buffer_gl0_inv +; GFX10-NEXT: buffer_gl1_inv +; GFX10-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX10-NEXT: v_mov_b32_e32 v6, v4 +; GFX10-NEXT: v_mov_b32_e32 v5, v3 +; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX10-NEXT: s_cbranch_execnz .LBB31_1 +; GFX10-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX10-NEXT: s_endpgm +; +; GFX11-LABEL: global_umin_saddr_i64_nortn_neg128: +; GFX11: ; %bb.0: +; GFX11-NEXT: global_load_b64 v[5:6], v0, s[2:3] offset:-128 +; GFX11-NEXT: v_add_co_u32 v7, s[0:1], s2, v0 +; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-NEXT: v_add_co_ci_u32_e64 v8, null, s3, 0, s[0:1] +; GFX11-NEXT: s_mov_b64 s[0:1], 0 +; GFX11-NEXT: s_waitcnt_depctr 0xfffe +; GFX11-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX11-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: v_cmp_le_u64_e32 vcc, v[5:6], v[1:2] +; GFX11-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc +; GFX11-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX11-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX11-NEXT: global_atomic_cmpswap_b64 v[3:4], v[7:8], v[3:6], off offset:-128 glc +; GFX11-NEXT: s_waitcnt vmcnt(0) +; GFX11-NEXT: buffer_gl0_inv +; GFX11-NEXT: buffer_gl1_inv +; GFX11-NEXT: v_cmp_eq_u64_e32 vcc, v[3:4], v[5:6] +; GFX11-NEXT: v_mov_b32_e32 v6, v4 +; GFX11-NEXT: v_mov_b32_e32 v5, v3 +; GFX11-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) +; GFX11-NEXT: s_and_not1_b64 exec, exec, s[0:1] +; GFX11-NEXT: s_cbranch_execnz .LBB31_1 +; GFX11-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX11-NEXT: s_endpgm + %zext.offset = zext i32 %voffset to i64 + %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 + %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* + %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + ret void +} + +attributes #0 = { argmemonly nounwind willreturn } diff --git a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll --- a/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/global-saddr-atomics.ll @@ -2069,10 +2069,9 @@ define amdgpu_ps float @global_max_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_max_saddr_i32_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v0, v1, s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_max_saddr_i32_rtn: @@ -2082,7 +2081,6 @@ ; GFX10-NEXT: global_atomic_smax v0, v0, v1, s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_max_saddr_i32_rtn: @@ -2092,12 +2090,11 @@ ; GFX11-NEXT: global_atomic_max_i32 v0, v0, v1, s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %rtn = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2105,10 +2102,9 @@ define amdgpu_ps float @global_max_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_max_saddr_i32_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v0, v1, s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_max_saddr_i32_rtn_neg128: @@ -2118,7 +2114,6 @@ ; GFX10-NEXT: global_atomic_smax v0, v0, v1, s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_max_saddr_i32_rtn_neg128: @@ -2128,13 +2123,12 @@ ; GFX11-NEXT: global_atomic_max_i32 v0, v0, v1, s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %rtn = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2142,10 +2136,8 @@ define amdgpu_ps void @global_max_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_max_saddr_i32_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_max_saddr_i32_nortn: @@ -2155,7 +2147,6 @@ ; GFX10-NEXT: global_atomic_smax v0, v1, s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_max_saddr_i32_nortn: @@ -2165,22 +2156,19 @@ ; GFX11-NEXT: global_atomic_max_i32 v0, v1, s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %unused = atomicrmw max i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_max_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_max_saddr_i32_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_max_saddr_i32_nortn_neg128: @@ -2190,7 +2178,6 @@ ; GFX10-NEXT: global_atomic_smax v0, v1, s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_max_saddr_i32_nortn_neg128: @@ -2200,23 +2187,21 @@ ; GFX11-NEXT: global_atomic_max_i32 v0, v1, s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %unused = atomicrmw max i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_max_saddr_i64_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_max_saddr_i64_rtn: @@ -2226,7 +2211,6 @@ ; GFX10-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_max_saddr_i64_rtn: @@ -2236,12 +2220,11 @@ ; GFX11-NEXT: global_atomic_max_i64 v[0:1], v0, v[1:2], s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %rtn = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2249,10 +2232,9 @@ define amdgpu_ps <2 x float> @global_max_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_max_saddr_i64_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_max_saddr_i64_rtn_neg128: @@ -2262,7 +2244,6 @@ ; GFX10-NEXT: global_atomic_smax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_max_saddr_i64_rtn_neg128: @@ -2272,13 +2253,12 @@ ; GFX11-NEXT: global_atomic_max_i64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %rtn = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2286,10 +2266,8 @@ define amdgpu_ps void @global_max_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_max_saddr_i64_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_max_saddr_i64_nortn: @@ -2299,7 +2277,6 @@ ; GFX10-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_max_saddr_i64_nortn: @@ -2309,22 +2286,19 @@ ; GFX11-NEXT: global_atomic_max_i64 v0, v[1:2], s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %unused = atomicrmw max i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_max_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_max_saddr_i64_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_max_saddr_i64_nortn_neg128: @@ -2334,7 +2308,6 @@ ; GFX10-NEXT: global_atomic_smax_x2 v0, v[1:2], s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_max_saddr_i64_nortn_neg128: @@ -2344,13 +2317,12 @@ ; GFX11-NEXT: global_atomic_max_i64 v0, v[1:2], s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %unused = atomicrmw max i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst ret void } @@ -2361,10 +2333,9 @@ define amdgpu_ps float @global_min_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_min_saddr_i32_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v0, v1, s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_min_saddr_i32_rtn: @@ -2374,7 +2345,6 @@ ; GFX10-NEXT: global_atomic_smin v0, v0, v1, s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_min_saddr_i32_rtn: @@ -2384,12 +2354,11 @@ ; GFX11-NEXT: global_atomic_min_i32 v0, v0, v1, s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %rtn = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2397,10 +2366,9 @@ define amdgpu_ps float @global_min_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_min_saddr_i32_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v0, v1, s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_min_saddr_i32_rtn_neg128: @@ -2410,7 +2378,6 @@ ; GFX10-NEXT: global_atomic_smin v0, v0, v1, s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_min_saddr_i32_rtn_neg128: @@ -2420,13 +2387,12 @@ ; GFX11-NEXT: global_atomic_min_i32 v0, v0, v1, s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %rtn = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2434,10 +2400,8 @@ define amdgpu_ps void @global_min_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_min_saddr_i32_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_min_saddr_i32_nortn: @@ -2447,7 +2411,6 @@ ; GFX10-NEXT: global_atomic_smin v0, v1, s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_min_saddr_i32_nortn: @@ -2457,22 +2420,19 @@ ; GFX11-NEXT: global_atomic_min_i32 v0, v1, s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %unused = atomicrmw min i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_min_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_min_saddr_i32_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_min_saddr_i32_nortn_neg128: @@ -2482,7 +2442,6 @@ ; GFX10-NEXT: global_atomic_smin v0, v1, s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_min_saddr_i32_nortn_neg128: @@ -2492,23 +2451,21 @@ ; GFX11-NEXT: global_atomic_min_i32 v0, v1, s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %unused = atomicrmw min i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_min_saddr_i64_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_min_saddr_i64_rtn: @@ -2518,7 +2475,6 @@ ; GFX10-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_min_saddr_i64_rtn: @@ -2528,12 +2484,11 @@ ; GFX11-NEXT: global_atomic_min_i64 v[0:1], v0, v[1:2], s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %rtn = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2541,10 +2496,9 @@ define amdgpu_ps <2 x float> @global_min_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_min_saddr_i64_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_min_saddr_i64_rtn_neg128: @@ -2554,7 +2508,6 @@ ; GFX10-NEXT: global_atomic_smin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_min_saddr_i64_rtn_neg128: @@ -2564,13 +2517,12 @@ ; GFX11-NEXT: global_atomic_min_i64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %rtn = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2578,10 +2530,8 @@ define amdgpu_ps void @global_min_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_min_saddr_i64_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_min_saddr_i64_nortn: @@ -2591,7 +2541,6 @@ ; GFX10-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_min_saddr_i64_nortn: @@ -2601,22 +2550,19 @@ ; GFX11-NEXT: global_atomic_min_i64 v0, v[1:2], s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %unused = atomicrmw min i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_min_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_min_saddr_i64_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_min_saddr_i64_nortn_neg128: @@ -2626,7 +2572,6 @@ ; GFX10-NEXT: global_atomic_smin_x2 v0, v[1:2], s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_min_saddr_i64_nortn_neg128: @@ -2636,13 +2581,12 @@ ; GFX11-NEXT: global_atomic_min_i64 v0, v[1:2], s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %unused = atomicrmw min i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst ret void } @@ -2653,10 +2597,9 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umax_saddr_i32_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v0, v1, s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umax_saddr_i32_rtn: @@ -2666,7 +2609,6 @@ ; GFX10-NEXT: global_atomic_umax v0, v0, v1, s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umax_saddr_i32_rtn: @@ -2676,12 +2618,11 @@ ; GFX11-NEXT: global_atomic_max_u32 v0, v0, v1, s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2689,10 +2630,9 @@ define amdgpu_ps float @global_umax_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umax_saddr_i32_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v0, v1, s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umax_saddr_i32_rtn_neg128: @@ -2702,7 +2642,6 @@ ; GFX10-NEXT: global_atomic_umax v0, v0, v1, s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umax_saddr_i32_rtn_neg128: @@ -2712,13 +2651,12 @@ ; GFX11-NEXT: global_atomic_max_u32 v0, v0, v1, s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %rtn = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2726,10 +2664,8 @@ define amdgpu_ps void @global_umax_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umax_saddr_i32_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umax_saddr_i32_nortn: @@ -2739,7 +2675,6 @@ ; GFX10-NEXT: global_atomic_umax v0, v1, s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umax_saddr_i32_nortn: @@ -2749,22 +2684,19 @@ ; GFX11-NEXT: global_atomic_max_u32 v0, v1, s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %unused = atomicrmw umax i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_umax_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umax_saddr_i32_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umax_saddr_i32_nortn_neg128: @@ -2774,7 +2706,6 @@ ; GFX10-NEXT: global_atomic_umax v0, v1, s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umax_saddr_i32_nortn_neg128: @@ -2784,23 +2715,21 @@ ; GFX11-NEXT: global_atomic_max_u32 v0, v1, s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %unused = atomicrmw umax i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umax_saddr_i64_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umax_saddr_i64_rtn: @@ -2810,7 +2739,6 @@ ; GFX10-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umax_saddr_i64_rtn: @@ -2820,12 +2748,11 @@ ; GFX11-NEXT: global_atomic_max_u64 v[0:1], v0, v[1:2], s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2833,10 +2760,9 @@ define amdgpu_ps <2 x float> @global_umax_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umax_saddr_i64_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umax_saddr_i64_rtn_neg128: @@ -2846,7 +2772,6 @@ ; GFX10-NEXT: global_atomic_umax_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umax_saddr_i64_rtn_neg128: @@ -2856,13 +2781,12 @@ ; GFX11-NEXT: global_atomic_max_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %rtn = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -2870,10 +2794,8 @@ define amdgpu_ps void @global_umax_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umax_saddr_i64_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umax_saddr_i64_nortn: @@ -2883,7 +2805,6 @@ ; GFX10-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umax_saddr_i64_nortn: @@ -2893,22 +2814,19 @@ ; GFX11-NEXT: global_atomic_max_u64 v0, v[1:2], s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %unused = atomicrmw umax i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_umax_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umax_saddr_i64_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umax_saddr_i64_nortn_neg128: @@ -2918,7 +2836,6 @@ ; GFX10-NEXT: global_atomic_umax_x2 v0, v[1:2], s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umax_saddr_i64_nortn_neg128: @@ -2928,13 +2845,12 @@ ; GFX11-NEXT: global_atomic_max_u64 v0, v[1:2], s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %unused = atomicrmw umax i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst ret void } @@ -2945,10 +2861,9 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umin_saddr_i32_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v0, v1, s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umin_saddr_i32_rtn: @@ -2958,7 +2873,6 @@ ; GFX10-NEXT: global_atomic_umin v0, v0, v1, s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umin_saddr_i32_rtn: @@ -2968,12 +2882,11 @@ ; GFX11-NEXT: global_atomic_min_u32 v0, v0, v1, s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -2981,10 +2894,9 @@ define amdgpu_ps float @global_umin_saddr_i32_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umin_saddr_i32_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v0, v1, s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umin_saddr_i32_rtn_neg128: @@ -2994,7 +2906,6 @@ ; GFX10-NEXT: global_atomic_umin v0, v0, v1, s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umin_saddr_i32_rtn_neg128: @@ -3004,13 +2915,12 @@ ; GFX11-NEXT: global_atomic_min_u32 v0, v0, v1, s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %rtn = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i32 %rtn to float ret float %cast.rtn } @@ -3018,10 +2928,8 @@ define amdgpu_ps void @global_umin_saddr_i32_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umin_saddr_i32_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umin_saddr_i32_nortn: @@ -3031,7 +2939,6 @@ ; GFX10-NEXT: global_atomic_umin v0, v1, s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umin_saddr_i32_nortn: @@ -3041,22 +2948,19 @@ ; GFX11-NEXT: global_atomic_min_u32 v0, v1, s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i32 addrspace(1)* - %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data seq_cst + %unused = atomicrmw umin i32 addrspace(1)* %cast.gep0, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_umin_saddr_i32_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i32 %data) { ; GFX9-LABEL: global_umin_saddr_i32_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umin_saddr_i32_nortn_neg128: @@ -3066,7 +2970,6 @@ ; GFX10-NEXT: global_atomic_umin v0, v1, s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umin_saddr_i32_nortn_neg128: @@ -3076,23 +2979,21 @@ ; GFX11-NEXT: global_atomic_min_u32 v0, v1, s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* - %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data seq_cst + %unused = atomicrmw umin i32 addrspace(1)* %cast.gep1, i32 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umin_saddr_i64_rtn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umin_saddr_i64_rtn: @@ -3102,7 +3003,6 @@ ; GFX10-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umin_saddr_i64_rtn: @@ -3112,12 +3012,11 @@ ; GFX11-NEXT: global_atomic_min_u64 v[0:1], v0, v[1:2], s[2:3] glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -3125,10 +3024,9 @@ define amdgpu_ps <2 x float> @global_umin_saddr_i64_rtn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umin_saddr_i64_rtn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: ; return to shader part epilog ; ; GFX10-LABEL: global_umin_saddr_i64_rtn_neg128: @@ -3138,7 +3036,6 @@ ; GFX10-NEXT: global_atomic_umin_x2 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX10-NEXT: s_waitcnt vmcnt(0) ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: ; return to shader part epilog ; ; GFX11-LABEL: global_umin_saddr_i64_rtn_neg128: @@ -3148,13 +3045,12 @@ ; GFX11-NEXT: global_atomic_min_u64 v[0:1], v0, v[1:2], s[2:3] offset:-128 glc ; GFX11-NEXT: s_waitcnt vmcnt(0) ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: ; return to shader part epilog %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %rtn = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst %cast.rtn = bitcast i64 %rtn to <2 x float> ret <2 x float> %cast.rtn } @@ -3162,10 +3058,8 @@ define amdgpu_ps void @global_umin_saddr_i64_nortn(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umin_saddr_i64_nortn: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umin_saddr_i64_nortn: @@ -3175,7 +3069,6 @@ ; GFX10-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umin_saddr_i64_nortn: @@ -3185,22 +3078,19 @@ ; GFX11-NEXT: global_atomic_min_u64 v0, v[1:2], s[2:3] ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %cast.gep0 = bitcast i8 addrspace(1)* %gep0 to i64 addrspace(1)* - %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data seq_cst + %unused = atomicrmw umin i64 addrspace(1)* %cast.gep0, i64 %data syncscope("workgroup") seq_cst ret void } define amdgpu_ps void @global_umin_saddr_i64_nortn_neg128(i8 addrspace(1)* inreg %sbase, i32 %voffset, i64 %data) { ; GFX9-LABEL: global_umin_saddr_i64_nortn_neg128: ; GFX9: ; %bb.0: -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] offset:-128 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1 ; GFX9-NEXT: s_endpgm ; ; GFX10-LABEL: global_umin_saddr_i64_nortn_neg128: @@ -3210,7 +3100,6 @@ ; GFX10-NEXT: global_atomic_umin_x2 v0, v[1:2], s[2:3] offset:-128 ; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX10-NEXT: buffer_gl0_inv -; GFX10-NEXT: buffer_gl1_inv ; GFX10-NEXT: s_endpgm ; ; GFX11-LABEL: global_umin_saddr_i64_nortn_neg128: @@ -3220,13 +3109,12 @@ ; GFX11-NEXT: global_atomic_min_u64 v0, v[1:2], s[2:3] offset:-128 ; GFX11-NEXT: s_waitcnt_vscnt null, 0x0 ; GFX11-NEXT: buffer_gl0_inv -; GFX11-NEXT: buffer_gl1_inv ; GFX11-NEXT: s_endpgm %zext.offset = zext i32 %voffset to i64 %gep0 = getelementptr inbounds i8, i8 addrspace(1)* %sbase, i64 %zext.offset %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %gep0, i64 -128 %cast.gep1 = bitcast i8 addrspace(1)* %gep1 to i64 addrspace(1)* - %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data seq_cst + %unused = atomicrmw umin i64 addrspace(1)* %cast.gep1, i64 %data syncscope("workgroup") seq_cst ret void } diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics.ll b/llvm/test/CodeGen/AMDGPU/global_atomics.ll --- a/llvm/test/CodeGen/AMDGPU/global_atomics.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics.ll @@ -1588,43 +1588,86 @@ define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) { ; SI-LABEL: atomic_max_i32_offset: ; SI: ; %bb.0: ; %entry -; SI-NEXT: s_load_dword s4, s[0:1], 0xb -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 -; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: s_load_dword s3, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB27_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 ; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16 +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB27_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i32_offset: ; VI: ; %bb.0: ; %entry -; VI-NEXT: s_load_dword s4, s[0:1], 0x2c -; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[4:5], 0 ; VI-NEXT: s_mov_b32 s3, 0xf000 -; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: v_mov_b32_e32 v0, s4 +; VI-NEXT: s_load_dword s2, s[8:9], 0x10 +; VI-NEXT: s_add_u32 s0, s8, 16 +; VI-NEXT: s_addc_u32 s1, s9, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB27_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16 +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc ; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB27_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i32_offset: ; GFX9: ; %bb.0: ; %entry -; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c ; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 -; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) -; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_i32_e32 v0, s4, v1 ; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) -; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] offset:16 +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) ; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB27_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 @@ -1645,10 +1688,9 @@ ; SI-NEXT: s_mov_b32 s6, s2 ; SI-NEXT: s_mov_b32 s7, s3 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -1664,10 +1706,9 @@ ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -1678,15 +1719,14 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -1704,10 +1744,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i32_addr64_offset: @@ -1724,10 +1762,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i32_addr64_offset: @@ -1741,15 +1777,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1769,11 +1803,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -1791,14 +1824,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -1813,16 +1845,15 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -1836,10 +1867,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i32: @@ -1850,10 +1879,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i32: @@ -1863,13 +1890,11 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -1884,12 +1909,11 @@ ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s0, s6 ; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -1903,12 +1927,11 @@ ; VI-NEXT: s_mov_b32 s0, s4 ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -1919,14 +1942,13 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -1944,10 +1966,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i32_addr64: @@ -1962,10 +1982,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i32_addr64: @@ -1979,14 +1997,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2006,11 +2022,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2026,14 +2041,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2048,15 +2062,14 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2070,10 +2083,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i32_offset: @@ -2084,10 +2095,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i32_offset: @@ -2097,14 +2106,12 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2121,10 +2128,9 @@ ; SI-NEXT: s_mov_b32 s6, s2 ; SI-NEXT: s_mov_b32 s7, s3 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2140,10 +2146,9 @@ ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2154,15 +2159,14 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2180,10 +2184,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i32_addr64_offset: @@ -2200,10 +2202,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i32_addr64_offset: @@ -2217,15 +2217,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2245,11 +2243,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2267,14 +2264,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2289,16 +2285,15 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2312,10 +2307,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i32: @@ -2326,10 +2319,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i32: @@ -2339,13 +2330,11 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2360,12 +2349,11 @@ ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s0, s6 ; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2379,12 +2367,11 @@ ; VI-NEXT: s_mov_b32 s0, s4 ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2395,14 +2382,13 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2420,10 +2406,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i32_addr64: @@ -2438,10 +2422,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i32_addr64: @@ -2455,14 +2437,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2482,11 +2462,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2502,14 +2481,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2524,15 +2502,14 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2546,10 +2523,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i32_offset: @@ -2560,10 +2535,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i32_offset: @@ -2573,14 +2546,12 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2597,10 +2568,9 @@ ; SI-NEXT: s_mov_b32 s6, s2 ; SI-NEXT: s_mov_b32 s7, s3 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2616,10 +2586,9 @@ ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2630,15 +2599,14 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2656,10 +2624,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i32_addr64_offset: @@ -2676,10 +2642,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i32_addr64_offset: @@ -2693,15 +2657,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2721,11 +2683,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2743,14 +2704,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2765,16 +2725,15 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2788,10 +2747,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i32: @@ -2802,10 +2759,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i32: @@ -2815,13 +2770,11 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2836,12 +2789,11 @@ ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s0, s6 ; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2855,12 +2807,11 @@ ; VI-NEXT: s_mov_b32 s0, s4 ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2871,14 +2822,13 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -2896,10 +2846,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i32_addr64: @@ -2914,10 +2862,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i32_addr64: @@ -2931,14 +2877,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -2958,11 +2902,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -2978,14 +2921,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3000,15 +2942,14 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -3022,10 +2963,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i32_offset: @@ -3036,10 +2975,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i32_offset: @@ -3049,14 +2986,12 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3073,10 +3008,9 @@ ; SI-NEXT: s_mov_b32 s6, s2 ; SI-NEXT: s_mov_b32 s7, s3 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc ; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -3092,10 +3026,9 @@ ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3106,15 +3039,14 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 - %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -3132,10 +3064,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64 offset:16 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i32_addr64_offset: @@ -3152,10 +3082,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i32_addr64_offset: @@ -3169,15 +3097,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1] offset:16 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3197,11 +3123,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -3219,14 +3144,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3241,16 +3165,15 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] offset:16 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 - %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -3264,10 +3187,8 @@ ; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i32: @@ -3278,10 +3199,8 @@ ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: v_mov_b32_e32 v0, s4 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i32: @@ -3291,13 +3210,11 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s4 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[2:3] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3312,12 +3229,11 @@ ; SI-NEXT: s_mov_b32 s0, s4 ; SI-NEXT: s_mov_b32 s1, s5 ; SI-NEXT: v_mov_b32_e32 v0, s8 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s0, s6 ; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -3331,12 +3247,11 @@ ; VI-NEXT: s_mov_b32 s0, s4 ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3347,14 +3262,13 @@ ; GFX9-NEXT: v_mov_b32_e32 v0, 0 ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v1, s2 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: - %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } @@ -3372,10 +3286,8 @@ ; SI-NEXT: v_mov_b32_e32 v2, s6 ; SI-NEXT: v_mov_b32_e32 v0, s4 ; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i32_addr64: @@ -3390,10 +3302,8 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s6 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin v[0:1], v2 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i32_addr64: @@ -3407,14 +3317,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s6 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst ret void } @@ -3434,11 +3342,10 @@ ; SI-NEXT: v_mov_b32_e32 v2, s2 ; SI-NEXT: v_mov_b32_e32 v0, s8 ; SI-NEXT: v_mov_b32_e32 v1, s9 -; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 glc -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: buffer_wbinvl1 ; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -3454,14 +3361,13 @@ ; VI-NEXT: v_mov_b32_e32 v0, s0 ; VI-NEXT: v_mov_b32_e32 v1, s1 ; VI-NEXT: v_mov_b32_e32 v2, s8 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3476,15 +3382,14 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 ; GFX9-NEXT: v_mov_b32_e32 v1, s8 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dword v0, v1, s[6:7] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index - %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in syncscope("workgroup") seq_cst store i32 %val, i32 addrspace(1)* %out2 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll --- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64.ll @@ -1452,10 +1452,8 @@ ; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i64_offset: @@ -1466,10 +1464,8 @@ ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 offset:32 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i64_offset: @@ -1479,14 +1475,12 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1504,10 +1498,9 @@ ; CI-NEXT: v_mov_b32_e32 v1, s9 ; CI-NEXT: s_mov_b32 s6, s2 ; CI-NEXT: s_mov_b32 s7, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -1524,10 +1517,9 @@ ; VI-NEXT: v_mov_b32_e32 v1, s9 ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -1539,15 +1531,14 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -1565,10 +1556,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i64_addr64_offset: @@ -1585,10 +1574,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i64_addr64_offset: @@ -1602,15 +1589,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1630,10 +1615,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -1650,14 +1634,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, 0 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -1671,16 +1654,15 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -1696,10 +1678,8 @@ ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: v_mov_b32_e32 v0, s2 ; CI-NEXT: v_mov_b32_e32 v1, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i64: @@ -1712,10 +1692,8 @@ ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[4:7], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i64: @@ -1725,13 +1703,11 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1747,12 +1723,11 @@ ; CI-NEXT: s_mov_b32 s1, s5 ; CI-NEXT: v_mov_b32_e32 v0, s8 ; CI-NEXT: v_mov_b32_e32 v1, s9 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 glc -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_mov_b32 s0, s6 ; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -1767,12 +1742,11 @@ ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s9 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smax_x2 v[0:1], off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -1784,14 +1758,13 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -1809,10 +1782,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_max_i64_addr64: @@ -1827,10 +1798,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_max_i64_addr64: @@ -1844,14 +1813,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1871,10 +1838,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -1889,14 +1855,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, s5 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -1910,15 +1875,14 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smax_x2 v[0:1], v2, v[0:1], s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -1932,10 +1896,8 @@ ; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i64_offset: @@ -1946,10 +1908,8 @@ ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 offset:32 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i64_offset: @@ -1959,14 +1919,12 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -1984,10 +1942,9 @@ ; CI-NEXT: v_mov_b32_e32 v1, s9 ; CI-NEXT: s_mov_b32 s6, s2 ; CI-NEXT: s_mov_b32 s7, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -2004,10 +1961,9 @@ ; VI-NEXT: v_mov_b32_e32 v1, s9 ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2019,15 +1975,14 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2045,10 +2000,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i64_addr64_offset: @@ -2065,10 +2018,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i64_addr64_offset: @@ -2082,15 +2033,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2110,10 +2059,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -2130,14 +2078,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, 0 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -2151,16 +2098,15 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2176,10 +2122,8 @@ ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: v_mov_b32_e32 v0, s2 ; CI-NEXT: v_mov_b32_e32 v1, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i64: @@ -2192,10 +2136,8 @@ ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[4:7], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i64: @@ -2205,13 +2147,11 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2227,12 +2167,11 @@ ; CI-NEXT: s_mov_b32 s1, s5 ; CI-NEXT: v_mov_b32_e32 v0, s8 ; CI-NEXT: v_mov_b32_e32 v1, s9 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 glc -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_mov_b32 s0, s6 ; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -2247,12 +2186,11 @@ ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s9 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umax_x2 v[0:1], off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2264,14 +2202,13 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2289,10 +2226,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[4:7], 0 addr64 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umax_i64_addr64: @@ -2307,10 +2242,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umax_i64_addr64: @@ -2324,14 +2257,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2351,10 +2282,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umax_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -2369,14 +2299,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, s5 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -2390,15 +2319,14 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umax_x2 v[0:1], v2, v[0:1], s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2412,10 +2340,8 @@ ; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i64_offset: @@ -2426,10 +2352,8 @@ ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 offset:32 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i64_offset: @@ -2439,14 +2363,12 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2464,10 +2386,9 @@ ; CI-NEXT: v_mov_b32_e32 v1, s9 ; CI-NEXT: s_mov_b32 s6, s2 ; CI-NEXT: s_mov_b32 s7, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -2484,10 +2405,9 @@ ; VI-NEXT: v_mov_b32_e32 v1, s9 ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2499,15 +2419,14 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2525,10 +2444,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i64_addr64_offset: @@ -2545,10 +2462,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i64_addr64_offset: @@ -2562,15 +2477,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2590,10 +2503,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -2610,14 +2522,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, 0 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -2631,16 +2542,15 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2656,10 +2566,8 @@ ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: v_mov_b32_e32 v0, s2 ; CI-NEXT: v_mov_b32_e32 v1, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i64: @@ -2672,10 +2580,8 @@ ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[4:7], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i64: @@ -2685,13 +2591,11 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2707,12 +2611,11 @@ ; CI-NEXT: s_mov_b32 s1, s5 ; CI-NEXT: v_mov_b32_e32 v0, s8 ; CI-NEXT: v_mov_b32_e32 v1, s9 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 glc -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_mov_b32 s0, s6 ; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -2727,12 +2630,11 @@ ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s9 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_smin_x2 v[0:1], off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2744,14 +2646,13 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2769,10 +2670,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_min_i64_addr64: @@ -2787,10 +2686,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_min_i64_addr64: @@ -2804,14 +2701,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2831,10 +2726,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_smin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -2849,14 +2743,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, s5 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -2870,15 +2763,14 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_smin_x2 v[0:1], v2, v[0:1], s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -2892,10 +2784,8 @@ ; CI-NEXT: v_mov_b32_e32 v1, s3 ; CI-NEXT: s_mov_b32 s3, 0xf000 ; CI-NEXT: s_mov_b32 s2, -1 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i64_offset: @@ -2906,10 +2796,8 @@ ; VI-NEXT: v_mov_b32_e32 v1, s3 ; VI-NEXT: s_mov_b32 s3, 0xf000 ; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 offset:32 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i64_offset: @@ -2919,14 +2807,12 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -2944,10 +2830,9 @@ ; CI-NEXT: v_mov_b32_e32 v1, s9 ; CI-NEXT: s_mov_b32 s6, s2 ; CI-NEXT: s_mov_b32 s7, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -2964,10 +2849,9 @@ ; VI-NEXT: v_mov_b32_e32 v1, s9 ; VI-NEXT: s_mov_b32 s6, s2 ; VI-NEXT: s_mov_b32 s7, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 offset:32 glc ; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -2979,15 +2863,14 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -3005,10 +2888,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 offset:32 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i64_addr64_offset: @@ -3025,10 +2906,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i64_addr64_offset: @@ -3042,15 +2921,13 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] offset:32 -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst ret void } @@ -3070,10 +2947,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 offset:32 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -3090,14 +2966,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, 0 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -3111,16 +2986,15 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] offset:32 glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -3136,10 +3010,8 @@ ; CI-NEXT: s_mov_b32 s5, s1 ; CI-NEXT: v_mov_b32_e32 v0, s2 ; CI-NEXT: v_mov_b32_e32 v1, s3 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i64: @@ -3152,10 +3024,8 @@ ; VI-NEXT: s_mov_b32 s5, s1 ; VI-NEXT: v_mov_b32_e32 v0, s2 ; VI-NEXT: v_mov_b32_e32 v1, s3 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[4:7], 0 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i64: @@ -3165,13 +3035,11 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst ret void } @@ -3187,12 +3055,11 @@ ; CI-NEXT: s_mov_b32 s1, s5 ; CI-NEXT: v_mov_b32_e32 v0, s8 ; CI-NEXT: v_mov_b32_e32 v1, s9 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 glc -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_mov_b32 s0, s6 ; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: s_waitcnt vmcnt(0) ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; CI-NEXT: s_endpgm ; @@ -3207,12 +3074,11 @@ ; VI-NEXT: s_mov_b32 s1, s5 ; VI-NEXT: v_mov_b32_e32 v0, s8 ; VI-NEXT: v_mov_b32_e32 v1, s9 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: buffer_atomic_umin_x2 v[0:1], off, s[0:3], 0 glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s0, s6 ; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; VI-NEXT: s_endpgm ; @@ -3224,14 +3090,13 @@ ; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: v_mov_b32_e32 v0, s2 ; GFX9-NEXT: v_mov_b32_e32 v1, s3 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[4:5] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] ; GFX9-NEXT: s_endpgm entry: - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } @@ -3249,10 +3114,8 @@ ; CI-NEXT: s_mov_b32 s7, 0xf000 ; CI-NEXT: s_mov_b32 s6, 0 ; CI-NEXT: v_mov_b32_e32 v2, s0 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[4:7], 0 addr64 -; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: s_endpgm ; ; VI-LABEL: atomic_umin_i64_addr64: @@ -3267,10 +3130,8 @@ ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v1, s7 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1] -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_endpgm ; ; GFX9-LABEL: atomic_umin_i64_addr64: @@ -3284,14 +3145,12 @@ ; GFX9-NEXT: s_add_u32 s0, s4, s0 ; GFX9-NEXT: v_mov_b32_e32 v1, s7 ; GFX9-NEXT: s_addc_u32 s1, s5, s1 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v2, v[0:1], s[0:1] -; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst ret void } @@ -3311,10 +3170,9 @@ ; CI-NEXT: s_mov_b32 s2, 0 ; CI-NEXT: s_mov_b32 s3, s11 ; CI-NEXT: v_mov_b32_e32 v3, s5 -; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: s_waitcnt lgkmcnt(0) ; CI-NEXT: buffer_atomic_umin_x2 v[0:1], v[2:3], s[0:3], 0 addr64 glc ; CI-NEXT: s_waitcnt vmcnt(0) -; CI-NEXT: buffer_wbinvl1_vol ; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 ; CI-NEXT: s_endpgm ; @@ -3329,14 +3187,13 @@ ; VI-NEXT: s_addc_u32 s1, s1, s5 ; VI-NEXT: v_mov_b32_e32 v3, s1 ; VI-NEXT: v_mov_b32_e32 v2, s0 -; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: buffer_wbinvl1_vol ; VI-NEXT: s_mov_b32 s7, 0xf000 ; VI-NEXT: s_mov_b32 s6, -1 ; VI-NEXT: s_mov_b32 s4, s2 ; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: s_waitcnt vmcnt(0) ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; VI-NEXT: s_endpgm ; @@ -3350,15 +3207,14 @@ ; GFX9-NEXT: s_lshl_b64 s[4:5], s[6:7], 3 ; GFX9-NEXT: s_add_u32 s0, s0, s4 ; GFX9-NEXT: s_addc_u32 s1, s1, s5 -; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_waitcnt lgkmcnt(0) ; GFX9-NEXT: global_atomic_umin_x2 v[0:1], v2, v[0:1], s[0:1] glc ; GFX9-NEXT: s_waitcnt vmcnt(0) -; GFX9-NEXT: buffer_wbinvl1_vol ; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] ; GFX9-NEXT: s_endpgm entry: %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index - %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in syncscope("workgroup") seq_cst store i64 %tmp0, i64 addrspace(1)* %out2 ret void } diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_min_max_system.ll @@ -0,0 +1,3912 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=bonaire -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=CI %s +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizations=false -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s + +define amdgpu_kernel void @atomic_max_i64_offset(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_max_i64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB0_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB0_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20 +; VI-NEXT: s_add_u32 s4, s0, 32 +; VI-NEXT: s_addc_u32 s5, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s10 +; VI-NEXT: v_mov_b32_e32 v3, s11 +; VI-NEXT: .LBB0_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB0_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB0_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_max_i64_ret_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB1_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB1_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s0, s4, 32 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB1_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB1_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB1_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_max_i64_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB2_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB2_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s4, s4, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s5, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB2_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB2_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB2_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_max_i64_ret_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB3_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB3_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s6, s0, s6 +; VI-NEXT: s_addc_u32 s7, s1, s7 +; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20 +; VI-NEXT: s_add_u32 s8, s6, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s9, s7, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB3_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB3_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB3_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_max_i64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB4_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB4_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; VI-NEXT: s_mov_b32 s6, s2 +; VI-NEXT: s_mov_b32 s7, s3 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB4_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s7 +; VI-NEXT: v_mov_b32_e32 v4, s6 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB4_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB4_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_max_i64_ret: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB5_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB5_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB5_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB5_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB5_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %out, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_max_i64_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB6_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB6_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB6_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB6_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB6_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_max_i64_ret_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB7_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB7_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i64_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s8, s0, s6 +; VI-NEXT: s_addc_u32 s9, s1, s7 +; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 +; VI-NEXT: .LBB7_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB7_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i64_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB7_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile max i64 addrspace(1)* %ptr, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_offset(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_umax_i64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB8_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB8_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20 +; VI-NEXT: s_add_u32 s4, s0, 32 +; VI-NEXT: s_addc_u32 s5, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s10 +; VI-NEXT: v_mov_b32_e32 v3, s11 +; VI-NEXT: .LBB8_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB8_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB8_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_umax_i64_ret_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB9_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB9_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s0, s4, 32 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB9_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB9_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB9_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_umax_i64_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB10_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB10_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s4, s4, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s5, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB10_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB10_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB10_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_umax_i64_ret_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB11_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB11_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s6, s0, s6 +; VI-NEXT: s_addc_u32 s7, s1, s7 +; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20 +; VI-NEXT: s_add_u32 s8, s6, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s9, s7, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB11_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB11_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB11_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_umax_i64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB12_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB12_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; VI-NEXT: s_mov_b32 s6, s2 +; VI-NEXT: s_mov_b32 s7, s3 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB12_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s7 +; VI-NEXT: v_mov_b32_e32 v4, s6 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB12_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB12_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_umax_i64_ret: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB13_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB13_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB13_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB13_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB13_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %out, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_umax_i64_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB14_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB14_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB14_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB14_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB14_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_umax_i64_ret_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB15_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB15_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i64_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s8, s0, s6 +; VI-NEXT: s_addc_u32 s9, s1, s7 +; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 +; VI-NEXT: .LBB15_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB15_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i64_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB15_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile umax i64 addrspace(1)* %ptr, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_offset(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_min_i64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB16_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB16_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20 +; VI-NEXT: s_add_u32 s4, s0, 32 +; VI-NEXT: s_addc_u32 s5, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s10 +; VI-NEXT: v_mov_b32_e32 v3, s11 +; VI-NEXT: .LBB16_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB16_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB16_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_min_i64_ret_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB17_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB17_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s0, s4, 32 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB17_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB17_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB17_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_min_i64_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB18_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB18_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s4, s4, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s5, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB18_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB18_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB18_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_min_i64_ret_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB19_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB19_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s6, s0, s6 +; VI-NEXT: s_addc_u32 s7, s1, s7 +; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20 +; VI-NEXT: s_add_u32 s8, s6, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s9, s7, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB19_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB19_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB19_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_min_i64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB20_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB20_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; VI-NEXT: s_mov_b32 s6, s2 +; VI-NEXT: s_mov_b32 s7, s3 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB20_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s7 +; VI-NEXT: v_mov_b32_e32 v4, s6 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB20_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB20_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_min_i64_ret: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB21_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB21_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB21_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB21_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB21_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %out, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_min_i64_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB22_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB22_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB22_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB22_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB22_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_min_i64_ret_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB23_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB23_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i64_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s8, s0, s6 +; VI-NEXT: s_addc_u32 s9, s1, s7 +; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 +; VI-NEXT: .LBB23_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB23_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i64_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB23_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile min i64 addrspace(1)* %ptr, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_offset(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_umin_i64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x8 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB24_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB24_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[10:11], s[0:1], 0x20 +; VI-NEXT: s_add_u32 s4, s0, 32 +; VI-NEXT: s_addc_u32 s5, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s10 +; VI-NEXT: v_mov_b32_e32 v3, s11 +; VI-NEXT: .LBB24_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB24_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB24_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_umin_i64_ret_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB25_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB25_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s0, s4, 32 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB25_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB25_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB25_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i64, i64 addrspace(1)* %out, i64 4 + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_addr64_offset(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_umin_i64_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB26_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB26_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x20 +; VI-NEXT: s_add_u32 s4, s4, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s5, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB26_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB26_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB26_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_umin_i64_ret_addr64_offset: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB27_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB27_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s6, s0, s6 +; VI-NEXT: s_addc_u32 s7, s1, s7 +; VI-NEXT: s_load_dwordx2 s[12:13], s[6:7], 0x20 +; VI-NEXT: s_add_u32 s8, s6, 32 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_addc_u32 s9, s7, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB27_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB27_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x20 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] offset:32 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB27_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %gep = getelementptr i64, i64 addrspace(1)* %ptr, i64 4 + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %gep, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64(i64 addrspace(1)* %out, i64 %in) { +; CI-LABEL: atomic_umin_i64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b64 s[4:5], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; CI-NEXT: s_mov_b32 s6, s2 +; CI-NEXT: s_mov_b32 s7, s3 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB28_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s7 +; CI-NEXT: v_mov_b32_e32 v4, s6 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; CI-NEXT: s_cbranch_execnz .LBB28_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; VI-NEXT: s_mov_b32 s6, s2 +; VI-NEXT: s_mov_b32 s7, s3 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB28_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s7 +; VI-NEXT: v_mov_b32_e32 v4, s6 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB28_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s6 +; GFX9-NEXT: v_mov_b32_e32 v3, s7 +; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v5, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5] +; GFX9-NEXT: s_cbranch_execnz .LBB28_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in) { +; CI-LABEL: atomic_umin_i64_ret: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd +; CI-NEXT: s_mov_b64 s[10:11], 0 +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], s[4:5] +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s12 +; CI-NEXT: v_mov_b32_e32 v3, s13 +; CI-NEXT: .LBB29_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s9 +; CI-NEXT: v_mov_b32_e32 v4, s8 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; CI-NEXT: s_cbranch_execnz .LBB29_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[10:11] +; CI-NEXT: s_mov_b32 s3, 0xf000 +; CI-NEXT: s_mov_b32 s2, -1 +; CI-NEXT: s_mov_b32 s0, s6 +; CI-NEXT: s_mov_b32 s1, s7 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[10:11], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dwordx2 s[12:13], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s12 +; VI-NEXT: v_mov_b32_e32 v3, s13 +; VI-NEXT: .LBB29_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[8:9], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s9 +; VI-NEXT: v_mov_b32_e32 v4, s8 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[10:11], vcc, s[10:11] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[10:11] +; VI-NEXT: s_cbranch_execnz .LBB29_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[10:11] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_mov_b32_e32 v1, s2 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB29_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %out, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_addr64(i64 addrspace(1)* %out, i64 %in, i64 %index) { +; CI-LABEL: atomic_umin_i64_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd +; CI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; CI-NEXT: s_add_u32 s4, s0, s4 +; CI-NEXT: s_addc_u32 s5, s1, s5 +; CI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s8 +; CI-NEXT: v_mov_b32_e32 v3, s9 +; CI-NEXT: .LBB30_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s3 +; CI-NEXT: v_mov_b32_e32 v4, s2 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB30_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3 +; VI-NEXT: s_add_u32 s4, s0, s4 +; VI-NEXT: s_addc_u32 s5, s1, s5 +; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s8 +; VI-NEXT: v_mov_b32_e32 v3, s9 +; VI-NEXT: .LBB30_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[2:3], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s3 +; VI-NEXT: v_mov_b32_e32 v4, s2 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB30_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v4, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 3 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s4 +; GFX9-NEXT: v_mov_b32_e32 v3, s5 +; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v0, s7 +; GFX9-NEXT: v_mov_b32_e32 v5, s6 +; GFX9-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3] +; GFX9-NEXT: v_mov_b32_e32 v3, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v2, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB30_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i64_ret_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %out2, i64 %in, i64 %index) { +; CI-LABEL: atomic_umin_i64_ret_addr64: +; CI: ; %bb.0: ; %entry +; CI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9 +; CI-NEXT: s_mov_b32 s11, 0xf000 +; CI-NEXT: s_mov_b32 s10, -1 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; CI-NEXT: s_add_u32 s8, s0, s6 +; CI-NEXT: s_addc_u32 s9, s1, s7 +; CI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; CI-NEXT: s_mov_b64 s[0:1], 0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_mov_b32_e32 v2, s6 +; CI-NEXT: v_mov_b32_e32 v3, s7 +; CI-NEXT: .LBB31_1: ; %atomicrmw.start +; CI-NEXT: ; =>This Inner Loop Header: Depth=1 +; CI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v0, s5 +; CI-NEXT: v_mov_b32_e32 v4, s4 +; CI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; CI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; CI-NEXT: v_mov_b32_e32 v7, v3 +; CI-NEXT: v_mov_b32_e32 v6, v2 +; CI-NEXT: v_mov_b32_e32 v5, v1 +; CI-NEXT: v_mov_b32_e32 v4, v0 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; CI-NEXT: s_waitcnt vmcnt(0) +; CI-NEXT: buffer_wbinvl1_vol +; CI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; CI-NEXT: v_mov_b32_e32 v2, v4 +; CI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; CI-NEXT: v_mov_b32_e32 v3, v5 +; CI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; CI-NEXT: s_cbranch_execnz .LBB31_1 +; CI-NEXT: ; %bb.2: ; %atomicrmw.end +; CI-NEXT: s_or_b64 exec, exec, s[0:1] +; CI-NEXT: s_mov_b32 s7, 0xf000 +; CI-NEXT: s_mov_b32 s6, -1 +; CI-NEXT: s_mov_b32 s4, s2 +; CI-NEXT: s_mov_b32 s5, s3 +; CI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; CI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i64_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; VI-NEXT: s_mov_b32 s11, 0xf000 +; VI-NEXT: s_mov_b32 s10, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; VI-NEXT: s_add_u32 s8, s0, s6 +; VI-NEXT: s_addc_u32 s9, s1, s7 +; VI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s6 +; VI-NEXT: v_mov_b32_e32 v3, s7 +; VI-NEXT: .LBB31_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v0, s5 +; VI-NEXT: v_mov_b32_e32 v4, s4 +; VI-NEXT: v_cndmask_b32_e32 v1, v0, v3, vcc +; VI-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc +; VI-NEXT: v_mov_b32_e32 v7, v3 +; VI-NEXT: v_mov_b32_e32 v6, v2 +; VI-NEXT: v_mov_b32_e32 v5, v1 +; VI-NEXT: v_mov_b32_e32 v4, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3] +; VI-NEXT: v_mov_b32_e32 v2, v4 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v3, v5 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB31_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[0:1] +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_mov_b32 s4, s2 +; VI-NEXT: s_mov_b32 s5, s3 +; VI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i64_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 3 +; GFX9-NEXT: s_add_u32 s0, s0, s6 +; GFX9-NEXT: s_addc_u32 s1, s1, s7 +; GFX9-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[6:7], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s8 +; GFX9-NEXT: v_mov_b32_e32 v1, s9 +; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v6, v1 +; GFX9-NEXT: v_mov_b32_e32 v5, v0 +; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[4:5], v[5:6] +; GFX9-NEXT: v_mov_b32_e32 v0, s5 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v6, vcc +; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v5, vcc +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6] +; GFX9-NEXT: s_or_b64 s[6:7], vcc, s[6:7] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[6:7] +; GFX9-NEXT: s_cbranch_execnz .LBB31_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[6:7] +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i64, i64 addrspace(1)* %out, i64 %index + %tmp0 = atomicrmw volatile umin i64 addrspace(1)* %ptr, i64 %in seq_cst + store i64 %tmp0, i64 addrspace(1)* %out2 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/global_atomics_min_max_system.ll @@ -0,0 +1,3312 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -amdgpu-atomic-optimizations=false -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizations=false -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s + +define amdgpu_kernel void @atomic_max_i32_offset(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_max_i32_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB0_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB0_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[8:9], 0x10 +; VI-NEXT: s_add_u32 s0, s8, 16 +; VI-NEXT: s_addc_u32 s1, s9, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB0_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB0_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB0_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB0_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_max_i32_ret_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB1_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB1_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x10 +; VI-NEXT: s_add_u32 s0, s4, 16 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB1_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB1_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB1_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_i32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB1_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_max_i32_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB2_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB2_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB2_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB2_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB2_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB2_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_max_i32_ret_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB3_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB3_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB3_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB3_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB3_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_i32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB3_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile max i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_max_i32: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB4_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB4_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s2, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s3, s[4:5], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: .LBB4_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s2, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB4_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB4_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB4_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_max_i32_ret: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB5_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB5_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB5_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB5_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB5_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_i32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB5_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile max i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_max_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_max_i32_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB6_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB6_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB6_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB6_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB6_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB6_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_max_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_max_i32_ret_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB7_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_i32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB7_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_max_i32_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB7_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_i32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB7_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_max_i32_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_i32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB7_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile max i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_offset(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_umax_i32_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB8_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB8_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[8:9], 0x10 +; VI-NEXT: s_add_u32 s0, s8, 16 +; VI-NEXT: s_addc_u32 s1, s9, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB8_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB8_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB8_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB8_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_umax_i32_ret_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB9_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB9_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x10 +; VI-NEXT: s_add_u32 s0, s4, 16 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB9_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB9_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB9_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_u32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB9_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_umax_i32_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB10_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB10_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB10_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB10_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB10_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_umax_i32_ret_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB11_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB11_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB11_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB11_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_u32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB11_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umax i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_umax_i32: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB12_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB12_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s2, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s3, s[4:5], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: .LBB12_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s2, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB12_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB12_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB12_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_umax_i32_ret: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB13_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB13_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB13_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB13_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB13_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_u32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB13_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umax i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_umax_i32_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB14_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB14_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB14_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB14_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB14_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_max_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB14_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umax_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_umax_i32_ret_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB15_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_max_u32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB15_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umax_i32_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB15_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_max_u32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB15_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umax_i32_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB15_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_max_u32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB15_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile umax i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_offset(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_min_i32_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB16_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB16_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[8:9], 0x10 +; VI-NEXT: s_add_u32 s0, s8, 16 +; VI-NEXT: s_addc_u32 s1, s9, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB16_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB16_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB16_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB16_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_min_i32_ret_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB17_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB17_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x10 +; VI-NEXT: s_add_u32 s0, s4, 16 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB17_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB17_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_i32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB17_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_min_i32_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB18_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB18_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB18_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB18_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB18_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB18_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_min_i32_ret_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB19_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB19_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB19_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB19_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB19_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_i32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB19_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile min i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_min_i32: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB20_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB20_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s2, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s3, s[4:5], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: .LBB20_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s2, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB20_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB20_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB20_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_min_i32_ret: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB21_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB21_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB21_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB21_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB21_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_i32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB21_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile min i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_min_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_min_i32_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB22_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB22_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB22_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB22_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB22_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_i32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB22_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_min_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_min_i32_ret_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB23_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_i32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB23_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_min_i32_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB23_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_i32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB23_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_min_i32_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB23_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_i32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB23_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile min i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_offset(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_umin_i32_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB24_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB24_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[8:9], 0x10 +; VI-NEXT: s_add_u32 s0, s8, 16 +; VI-NEXT: s_addc_u32 s1, s9, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB24_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB24_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB24_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB24_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_umin_i32_ret_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x4 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB25_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB25_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_ret_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x10 +; VI-NEXT: s_add_u32 s0, s4, 16 +; VI-NEXT: s_addc_u32 s1, s5, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB25_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB25_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_ret_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x10 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB25_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_u32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB25_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %gep = getelementptr i32, i32 addrspace(1)* %out, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_addr64_offset(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_umin_i32_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB26_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB26_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB26_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB26_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB26_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB26_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_umin_i32_ret_addr64_offset: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x4 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB27_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 offset:16 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB27_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_ret_addr64_offset: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x10 +; VI-NEXT: s_add_u32 s0, s0, 16 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_addc_u32 s1, s1, 0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB27_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB27_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_ret_addr64_offset: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x10 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB27_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_u32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] offset:16 glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB27_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %gep = getelementptr i32, i32 addrspace(1)* %ptr, i64 4 + %val = atomicrmw volatile umin i32 addrspace(1)* %gep, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32(i32 addrspace(1)* %out, i32 %in) { +; SI-LABEL: atomic_umin_i32: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s2, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s3, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s3 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: .LBB28_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s2, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB28_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s2, s[0:1], 0x2c +; VI-NEXT: s_mov_b64 s[0:1], 0 +; VI-NEXT: s_mov_b32 s7, 0xf000 +; VI-NEXT: s_mov_b32 s6, -1 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s3, s[4:5], 0x0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s3 +; VI-NEXT: .LBB28_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s2, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[4:7], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[0:1] +; VI-NEXT: s_cbranch_execnz .LBB28_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s5, s[2:3], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB28_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[2:3] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB28_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in) { +; SI-LABEL: atomic_umin_i32_ret: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s10, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_load_dword s2, s[4:5], 0x0 +; SI-NEXT: s_mov_b64 s[8:9], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b64 s[0:1], s[4:5] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB29_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s10, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; SI-NEXT: s_cbranch_execnz .LBB29_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[8:9] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_ret: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s10, s[0:1], 0x34 +; VI-NEXT: s_mov_b64 s[8:9], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_load_dword s2, s[4:5], 0x0 +; VI-NEXT: s_mov_b64 s[0:1], s[4:5] +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB29_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s10, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[8:9] +; VI-NEXT: s_cbranch_execnz .LBB29_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[8:9] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_ret: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s2, s[0:1], 0x34 +; GFX9-NEXT: s_mov_b64 s[0:1], 0 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_load_dword s3, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: .LBB29_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_u32_e32 v2, s2, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[4:5] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GFX9-NEXT: s_cbranch_execnz .LBB29_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[0:1] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %val = atomicrmw volatile umin i32 addrspace(1)* %out, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_addr64(i32 addrspace(1)* %out, i32 %in, i64 %index) { +; SI-LABEL: atomic_umin_i32_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s6, s[0:1], 0xb +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB30_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s6, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB30_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 +; VI-NEXT: s_load_dword s6, s[0:1], 0x2c +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB30_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s6, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB30_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34 +; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9-NEXT: v_mov_b32_e32 v2, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s6, s0 +; GFX9-NEXT: s_addc_u32 s1, s7, s1 +; GFX9-NEXT: s_load_dword s5, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, s5 +; GFX9-NEXT: .LBB30_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_min_u32_e32 v0, s4, v1 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v1 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, v0 +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB30_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + ret void +} + +define amdgpu_kernel void @atomic_umin_i32_ret_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %out2, i32 %in, i64 %index) { +; SI-LABEL: atomic_umin_i32_ret_addr64: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xf +; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; SI-NEXT: s_load_dword s8, s[0:1], 0xd +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; SI-NEXT: s_add_u32 s0, s4, s0 +; SI-NEXT: s_addc_u32 s1, s5, s1 +; SI-NEXT: s_load_dword s2, s[0:1], 0x0 +; SI-NEXT: s_mov_b64 s[4:5], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s2 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: .LBB31_1: ; %atomicrmw.start +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: v_min_u32_e32 v0, s8, v1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, v1 +; SI-NEXT: v_mov_b32_e32 v2, v0 +; SI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; SI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_wbinvl1 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; SI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; SI-NEXT: v_mov_b32_e32 v1, v2 +; SI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB31_1 +; SI-NEXT: ; %bb.2: ; %atomicrmw.end +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 +; SI-NEXT: s_mov_b32 s0, s6 +; SI-NEXT: s_mov_b32 s1, s7 +; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; SI-NEXT: s_endpgm +; +; VI-LABEL: atomic_umin_i32_ret_addr64: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; VI-NEXT: s_load_dword s8, s[0:1], 0x34 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; VI-NEXT: s_add_u32 s0, s4, s0 +; VI-NEXT: s_addc_u32 s1, s5, s1 +; VI-NEXT: s_load_dword s2, s[0:1], 0x0 +; VI-NEXT: s_mov_b64 s[4:5], 0 +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, s2 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: .LBB31_1: ; %atomicrmw.start +; VI-NEXT: ; =>This Inner Loop Header: Depth=1 +; VI-NEXT: v_min_u32_e32 v0, s8, v1 +; VI-NEXT: v_mov_b32_e32 v3, v1 +; VI-NEXT: v_mov_b32_e32 v2, v0 +; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; VI-NEXT: buffer_atomic_cmpswap v[2:3], off, s[0:3], 0 glc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: buffer_wbinvl1_vol +; VI-NEXT: v_cmp_eq_u32_e32 vcc, v2, v1 +; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5] +; VI-NEXT: v_mov_b32_e32 v1, v2 +; VI-NEXT: s_andn2_b64 exec, exec, s[4:5] +; VI-NEXT: s_cbranch_execnz .LBB31_1 +; VI-NEXT: ; %bb.2: ; %atomicrmw.end +; VI-NEXT: s_or_b64 exec, exec, s[4:5] +; VI-NEXT: s_mov_b32 s3, 0xf000 +; VI-NEXT: s_mov_b32 s2, -1 +; VI-NEXT: s_mov_b32 s0, s6 +; VI-NEXT: s_mov_b32 s1, s7 +; VI-NEXT: buffer_store_dword v2, off, s[0:3], 0 +; VI-NEXT: s_endpgm +; +; GFX9-LABEL: atomic_umin_i32_ret_addr64: +; GFX9: ; %bb.0: ; %entry +; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x3c +; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 +; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_lshl_b64 s[0:1], s[2:3], 2 +; GFX9-NEXT: s_add_u32 s0, s4, s0 +; GFX9-NEXT: s_addc_u32 s1, s5, s1 +; GFX9-NEXT: s_load_dword s4, s[0:1], 0x0 +; GFX9-NEXT: s_mov_b64 s[2:3], 0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v0, s4 +; GFX9-NEXT: .LBB31_1: ; %atomicrmw.start +; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1 +; GFX9-NEXT: v_mov_b32_e32 v3, v0 +; GFX9-NEXT: v_min_u32_e32 v2, s8, v3 +; GFX9-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GFX9-NEXT: global_atomic_cmpswap v0, v1, v[2:3], s[0:1] glc +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: buffer_wbinvl1_vol +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, v0, v3 +; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3] +; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3] +; GFX9-NEXT: s_cbranch_execnz .LBB31_1 +; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end +; GFX9-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v1, v0, s[6:7] +; GFX9-NEXT: s_endpgm +entry: + %ptr = getelementptr i32, i32 addrspace(1)* %out, i64 %index + %val = atomicrmw volatile umin i32 addrspace(1)* %ptr, i32 %in seq_cst + store i32 %val, i32 addrspace(1)* %out2 + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw-system.ll b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw-system.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw-system.ll @@ -0,0 +1,129 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; XUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,VI %s + +; FIXME: broken on VI because flat instructions need to be emitted +; instead of addr64 equivalent of the _OFFSET variants. + +; Check that moving the pointer out of the resource descriptor to +; vaddr works for atomics. + +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 { +; GCN-LABEL: atomic_max_i32: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9 +; GCN-NEXT: s_mov_b32 s11, 0xf000 +; GCN-NEXT: s_mov_b32 s10, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[8:9], s[6:7] +; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[8:11], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GCN-NEXT: s_cbranch_execz .LBB0_4 +; GCN-NEXT: ; %bb.1: ; %atomic +; GCN-NEXT: s_mov_b32 s8, s10 +; GCN-NEXT: s_mov_b32 s9, s10 +; GCN-NEXT: buffer_load_dword v4, v[1:2], s[8:11], 0 addr64 offset:400 +; GCN-NEXT: s_load_dword s2, s[0:1], 0xf +; GCN-NEXT: s_mov_b64 s[0:1], 0 +; GCN-NEXT: .LBB0_2: ; %atomicrmw.start +; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_max_i32_e32 v3, s2, v4 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_mov_b32_e32 v6, v4 +; GCN-NEXT: v_mov_b32_e32 v5, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_atomic_cmpswap v[5:6], v[1:2], s[8:11], 0 addr64 offset:400 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_wbinvl1 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 +; GCN-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN-NEXT: v_mov_b32_e32 v4, v5 +; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN-NEXT: s_cbranch_execnz .LBB0_2 +; GCN-NEXT: ; %bb.3: ; %atomicrmw.end +; GCN-NEXT: s_or_b64 exec, exec, s[0:1] +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: buffer_store_dword v5, off, s[4:7], 0 +; GCN-NEXT: .LBB0_4: ; %exit +; GCN-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid + %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep + %xor = xor i32 %tid, 1 + %cmp = icmp ne i32 %xor, 0 + br i1 %cmp, label %atomic, label %exit + +atomic: + %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100 + %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst + store i32 %ret, ptr addrspace(1) %out + br label %exit + +exit: + ret void +} + +define amdgpu_kernel void @atomic_max_i32_noret(ptr addrspace(1) %out, ptr addrspace(1) %in, ptr addrspace(1) %x, i32 %y) #0 { +; GCN-LABEL: atomic_max_i32_noret: +; GCN: ; %bb.0: +; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v1, 3, v0 +; GCN-NEXT: v_mov_b32_e32 v2, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: buffer_load_dwordx2 v[1:2], v[1:2], s[4:7], 0 addr64 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0 +; GCN-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GCN-NEXT: s_cbranch_execz .LBB1_3 +; GCN-NEXT: ; %bb.1: ; %atomic +; GCN-NEXT: s_mov_b32 s4, s6 +; GCN-NEXT: s_mov_b32 s5, s6 +; GCN-NEXT: buffer_load_dword v4, v[1:2], s[4:7], 0 addr64 offset:400 +; GCN-NEXT: s_load_dword s2, s[0:1], 0xf +; GCN-NEXT: s_mov_b64 s[0:1], 0 +; GCN-NEXT: .LBB1_2: ; %atomicrmw.start +; GCN-NEXT: ; =>This Inner Loop Header: Depth=1 +; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: v_max_i32_e32 v3, s2, v4 +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_mov_b32_e32 v6, v4 +; GCN-NEXT: v_mov_b32_e32 v5, v3 +; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: buffer_atomic_cmpswap v[5:6], v[1:2], s[4:7], 0 addr64 offset:400 glc +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: buffer_wbinvl1 +; GCN-NEXT: v_cmp_eq_u32_e32 vcc, v5, v4 +; GCN-NEXT: s_or_b64 s[0:1], vcc, s[0:1] +; GCN-NEXT: v_mov_b32_e32 v4, v5 +; GCN-NEXT: s_andn2_b64 exec, exec, s[0:1] +; GCN-NEXT: s_cbranch_execnz .LBB1_2 +; GCN-NEXT: .LBB1_3: ; %exit +; GCN-NEXT: s_endpgm + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %tid.gep = getelementptr ptr addrspace(1), ptr addrspace(1) %in, i32 %tid + %ptr = load volatile ptr addrspace(1), ptr addrspace(1) %tid.gep + %xor = xor i32 %tid, 1 + %cmp = icmp ne i32 %xor, 0 + br i1 %cmp, label %atomic, label %exit + +atomic: + %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100 + %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst + br label %exit + +exit: + ret void +} + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } diff --git a/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll --- a/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll +++ b/llvm/test/CodeGen/AMDGPU/move-to-valu-atomicrmw.ll @@ -33,10 +33,9 @@ ; GCN-NEXT: s_mov_b32 s7, s11 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[8:11], 0 addr64 offset:400 glc ; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_wbinvl1 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; GCN-NEXT: .LBB0_2: ; %exit ; GCN-NEXT: s_endpgm @@ -49,7 +48,7 @@ atomic: %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100 - %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst + %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst store i32 %ret, ptr addrspace(1) %out br label %exit @@ -77,10 +76,8 @@ ; GCN-NEXT: s_mov_b32 s5, s6 ; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: v_mov_b32_e32 v0, s0 -; GCN-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; GCN-NEXT: s_waitcnt lgkmcnt(0) ; GCN-NEXT: buffer_atomic_smax v0, v[1:2], s[4:7], 0 addr64 offset:400 -; GCN-NEXT: s_waitcnt vmcnt(0) -; GCN-NEXT: buffer_wbinvl1 ; GCN-NEXT: .LBB1_2: ; %exit ; GCN-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() @@ -92,7 +89,7 @@ atomic: %gep = getelementptr i32, ptr addrspace(1) %ptr, i32 100 - %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y seq_cst + %ret = atomicrmw max ptr addrspace(1) %gep, i32 %y syncscope("workgroup") seq_cst br label %exit exit: