diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst --- a/llvm/docs/AMDGPUUsage.rst +++ b/llvm/docs/AMDGPUUsage.rst @@ -993,6 +993,26 @@ inputs. Backend will optimize out denormal scaling if marked with the :ref:`afn ` flag. + llvm.amdgcn.wave.reduce.umin Performs an arithmetic unsigned min reduction on the unsigned values + provided by each lane in the wavefront. + Intrinsic takes a hint for reduction strategy using second operand + 0: Target default preference, + 1: `Iterative strategy`, and + 2: `DPP`. + If target does not support the DPP operations (e.g. gfx6/7), + reduction will be performed using default iterative strategy. + Intrinsic is currently only implemented for i32. + + llvm.amdgcn.wave.reduce.umax Performs an arithmetic unsigned max reduction on the unsigned values + provided by each lane in the wavefront. + Intrinsic takes a hint for reduction strategy using second operand + 0: Target default preference, + 1: `Iterative strategy`, and + 2: `DPP`. + If target does not support the DPP operations (e.g. gfx6/7), + reduction will be performed using default iterative strategy. + Intrinsic is currently only implemented for i32. + ========================================= ========================================================== .. TODO:: diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -1928,6 +1928,19 @@ Intrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>; +class AMDGPUWaveReduce : Intrinsic< + [data_ty], + [ + LLVMMatchType<0>, // llvm value to reduce (SGPR/VGPR) + llvm_i32_ty // Reduction Strategy Switch for lowering ( 0: Default, + // 1: Iterative strategy, and + // 2. DPP) + ], + [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree, ImmArg>]>; + +def int_amdgcn_wave_reduce_umin : AMDGPUWaveReduce; +def int_amdgcn_wave_reduce_umax : AMDGPUWaveReduce; + def int_amdgcn_readfirstlane : ClangBuiltin<"__builtin_amdgcn_readfirstlane">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty], diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -4514,6 +4514,16 @@ OpdsMapping[2] = AMDGPU::getValueMapping(MaskBank, MaskSize); break; } + case Intrinsic::amdgcn_wave_reduce_umin: + case Intrinsic::amdgcn_wave_reduce_umax: { + unsigned DstSize = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits(); + OpdsMapping[0] = AMDGPU::getValueMapping(AMDGPU::SGPRRegBankID, DstSize); + unsigned OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); + auto regBankID = + isSALUMapping(MI) ? AMDGPU::SGPRRegBankID : AMDGPU::VGPRRegBankID; + OpdsMapping[2] = AMDGPU::getValueMapping(regBankID, OpSize); + break; + } } break; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4067,6 +4067,120 @@ return LoopBB; } +static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI, + MachineBasicBlock &BB, + const GCNSubtarget &ST, + unsigned Opc) { + MachineRegisterInfo &MRI = BB.getParent()->getRegInfo(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const DebugLoc &DL = MI.getDebugLoc(); + const SIInstrInfo *TII = ST.getInstrInfo(); + + // Reduction operations depend on whether the input operand is SGPR or VGPR. + Register SrcReg = MI.getOperand(1).getReg(); + bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg)); + Register DstReg = MI.getOperand(0).getReg(); + MachineBasicBlock *RetBB = nullptr; + if (isSGPR) { + // These operations with a uniform value i.e. SGPR are idempotent. + // Reduced value will be same as given sgpr. + BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg); + RetBB = &BB; + } else { + // TODO: Implement DPP Strategy and switch based on immediate strategy + // operand. For now, for all the cases (default, Iterative and DPP we use + // iterative approach by default.) + + // To reduce the VGPR using iterative approach, we need to iterate + // over all the active lanes. Lowering consists of ComputeLoop, + // which iterate over only active lanes. We use copy of EXEC register + // as induction variable and every active lane modifies it using bitset0 + // so that we will get the next active lane for next iteration. + MachineBasicBlock::iterator I = BB.end(); + Register SrcReg = MI.getOperand(1).getReg(); + + // Create Control flow for loop + // Split MI's Machine Basic block into For loop + auto [ComputeLoop, ComputeEnd] = splitBlockForLoop(MI, BB, true); + + // Create virtual registers required for lowering. + const TargetRegisterClass *WaveMaskRegClass = TRI->getWaveMaskRegClass(); + const TargetRegisterClass *DstRegClass = MRI.getRegClass(DstReg); + Register LoopIterator = MRI.createVirtualRegister(WaveMaskRegClass); + Register InitalValReg = MRI.createVirtualRegister(DstRegClass); + + Register AccumulatorReg = MRI.createVirtualRegister(DstRegClass); + Register ActiveBitsReg = MRI.createVirtualRegister(WaveMaskRegClass); + Register NewActiveBitsReg = MRI.createVirtualRegister(WaveMaskRegClass); + + Register FF1Reg = MRI.createVirtualRegister(DstRegClass); + Register LaneValueReg = MRI.createVirtualRegister(DstRegClass); + + bool IsWave32 = ST.isWave32(); + unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; + unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + + // Create initail values of induction variable from Exec, Accumulator and + // insert branch instr to newly created ComputeBlockk + uint32_t InitalValue = + (Opc == AMDGPU::S_MIN_U32) ? std::numeric_limits::max() : 0; + auto TmpSReg = + BuildMI(BB, I, DL, TII->get(MovOpc), LoopIterator).addReg(ExecReg); + BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), InitalValReg) + .addImm(InitalValue); + BuildMI(BB, I, DL, TII->get(AMDGPU::S_BRANCH)).addMBB(ComputeLoop); + + // Start constructing ComputeLoop + I = ComputeLoop->end(); + auto Accumulator = + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), AccumulatorReg) + .addReg(InitalValReg) + .addMBB(&BB); + auto ActiveBits = + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), ActiveBitsReg) + .addReg(TmpSReg->getOperand(0).getReg()) + .addMBB(&BB); + + // Perform the computations + unsigned SFFOpc = IsWave32 ? AMDGPU::S_FF1_I32_B32 : AMDGPU::S_FF1_I32_B64; + auto FF1 = BuildMI(*ComputeLoop, I, DL, TII->get(SFFOpc), FF1Reg) + .addReg(ActiveBits->getOperand(0).getReg()); + auto LaneValue = BuildMI(*ComputeLoop, I, DL, + TII->get(AMDGPU::V_READLANE_B32), LaneValueReg) + .addReg(SrcReg) + .addReg(FF1->getOperand(0).getReg()); + auto NewAccumulator = BuildMI(*ComputeLoop, I, DL, TII->get(Opc), DstReg) + .addReg(Accumulator->getOperand(0).getReg()) + .addReg(LaneValue->getOperand(0).getReg()); + + // Manipulate the iterator to get the next active lane + unsigned BITSETOpc = + IsWave32 ? AMDGPU::S_BITSET0_B32 : AMDGPU::S_BITSET0_B64; + auto NewActiveBits = + BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), NewActiveBitsReg) + .addReg(FF1->getOperand(0).getReg()) + .addReg(ActiveBits->getOperand(0).getReg()); + + // Add phi nodes + Accumulator.addReg(NewAccumulator->getOperand(0).getReg()) + .addMBB(ComputeLoop); + ActiveBits.addReg(NewActiveBits->getOperand(0).getReg()) + .addMBB(ComputeLoop); + + // Creating branching + unsigned CMPOpc = IsWave32 ? AMDGPU::S_CMP_LG_U32 : AMDGPU::S_CMP_LG_U64; + BuildMI(*ComputeLoop, I, DL, TII->get(CMPOpc)) + .addReg(NewActiveBits->getOperand(0).getReg()) + .addImm(0); + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) + .addMBB(ComputeLoop); + + RetBB = ComputeEnd; + } + MI.eraseFromParent(); + return RetBB; +} + MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *BB) const { @@ -4075,6 +4189,10 @@ SIMachineFunctionInfo *MFI = MF->getInfo(); switch (MI.getOpcode()) { + case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO_U32: + return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MIN_U32); + case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO_U32: + return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32); case AMDGPU::S_UADDO_PSEUDO: case AMDGPU::S_USUBO_PSEUDO: { const DebugLoc &DL = MI.getDebugLoc(); diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -258,6 +258,18 @@ } } // End Defs = [SCC] +let usesCustomInserter = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in { + def WAVE_REDUCE_UMIN_PSEUDO_U32 : VPseudoInstSI <(outs SGPR_32:$sdst), + (ins VSrc_b32: $src, VSrc_b32:$strategy), + [(set i32:$sdst, (int_amdgcn_wave_reduce_umin i32:$src, i32:$strategy))]> { + } + + def WAVE_REDUCE_UMAX_PSEUDO_U32 : VPseudoInstSI <(outs SGPR_32:$sdst), + (ins VSrc_b32: $src, VSrc_b32:$strategy), + [(set i32:$sdst, (int_amdgcn_wave_reduce_umax i32:$src, i32:$strategy))]> { + } +} + let usesCustomInserter = 1, Defs = [VCC, EXEC] in { def V_ADD_U64_PSEUDO : VPseudoInstSI < (outs VReg_64:$vdst), (ins VSrc_b64:$src0, VSrc_b64:$src1), diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll @@ -0,0 +1,1016 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -march=amdgcn -mcpu=tonga -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=tonga -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=0 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10DAGISEL,GFX1064DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=1 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10GISEL,GFX1064GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10DAGISEL,GFX1032DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10GISEL,GFX1032GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1164DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1164GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1132DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1132GISEL %s + +declare i32 @llvm.amdgcn.wave.reduce.umax.i32(i32, i32 immarg) +declare i32 @llvm.amdgcn.workitem.id.x() + +define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: uniform_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_load_dword s0, s[0:1], 0x2c +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: uniform_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dword s2, s[0:1], 0x2c +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s2 +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: uniform_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[2:3] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: uniform_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[2:3] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: uniform_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_clause 0x1 +; GFX10DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX10DAGISEL-NEXT: global_store_dword v0, v1, s[2:3] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: uniform_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_clause 0x1 +; GFX10GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX10GISEL-NEXT: global_store_dword v1, v0, s[2:3] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: uniform_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_clause 0x1 +; GFX1164DAGISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s2 +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: uniform_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_clause 0x1 +; GFX1164GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: uniform_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_clause 0x1 +; GFX1132DAGISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: uniform_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_clause 0x1 +; GFX1132GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 %in, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @const_value(ptr addrspace(1) %out) { +; GFX8DAGISEL-LABEL: const_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, 0x7b +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: const_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, 0x7b +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: const_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: const_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: const_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: const_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: const_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: const_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: const_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7b +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: const_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, 0x7b :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 123, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: poison_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v0 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: poison_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v0 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: poison_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: poison_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: poison_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: poison_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX11DAGISEL-LABEL: poison_value: +; GFX11DAGISEL: ; %bb.0: ; %entry +; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11DAGISEL-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX11DAGISEL-NEXT: s_nop 0 +; GFX11DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11DAGISEL-NEXT: s_endpgm +; +; GFX11GISEL-LABEL: poison_value: +; GFX11GISEL: ; %bb.0: ; %entry +; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11GISEL-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX11GISEL-NEXT: s_nop 0 +; GFX11GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 poison, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: divergent_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX8DAGISEL-NEXT: s_mov_b32 s4, 0 +; GFX8DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX8DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX8DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX8DAGISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX8DAGISEL-NEXT: ; %bb.2: +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: divergent_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX8GISEL-NEXT: s_mov_b32 s4, 0 +; GFX8GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX8GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX8GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX8GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX8GISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX8GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX8GISEL-NEXT: ; %bb.2: +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: divergent_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX9DAGISEL-NEXT: s_mov_b32 s4, 0 +; GFX9DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX9DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX9DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX9DAGISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX9DAGISEL-NEXT: ; %bb.2: +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: divergent_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX9GISEL-NEXT: s_mov_b32 s4, 0 +; GFX9GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX9GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX9GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX9GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX9GISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX9GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX9GISEL-NEXT: ; %bb.2: +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX1064DAGISEL-LABEL: divergent_value: +; GFX1064DAGISEL: ; %bb.0: ; %entry +; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1064DAGISEL-NEXT: s_mov_b32 s4, 0 +; GFX1064DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX1064DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1064DAGISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1064DAGISEL-NEXT: ; %bb.2: +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064DAGISEL-NEXT: s_endpgm +; +; GFX1064GISEL-LABEL: divergent_value: +; GFX1064GISEL: ; %bb.0: ; %entry +; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1064GISEL-NEXT: s_mov_b32 s4, 0 +; GFX1064GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1064GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX1064GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1064GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1064GISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1064GISEL-NEXT: ; %bb.2: +; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064GISEL-NEXT: s_endpgm +; +; GFX1032DAGISEL-LABEL: divergent_value: +; GFX1032DAGISEL: ; %bb.0: ; %entry +; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1032DAGISEL-NEXT: s_mov_b32 s2, 0 +; GFX1032DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s4, s3 +; GFX1032DAGISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1032DAGISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1032DAGISEL-NEXT: s_max_u32 s2, s2, s5 +; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1032DAGISEL-NEXT: ; %bb.2: +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032DAGISEL-NEXT: s_endpgm +; +; GFX1032GISEL-LABEL: divergent_value: +; GFX1032GISEL: ; %bb.0: ; %entry +; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1032GISEL-NEXT: s_mov_b32 s2, 0 +; GFX1032GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1032GISEL-NEXT: s_ff1_i32_b32 s4, s3 +; GFX1032GISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1032GISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1032GISEL-NEXT: s_max_u32 s2, s2, s5 +; GFX1032GISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1032GISEL-NEXT: ; %bb.2: +; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: divergent_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164DAGISEL-NEXT: s_mov_b32 s4, 0 +; GFX1164DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s5, s[2:3] +; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1164DAGISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1164DAGISEL-NEXT: ; %bb.2: +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: divergent_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164GISEL-NEXT: s_mov_b32 s4, 0 +; GFX1164GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1164GISEL-NEXT: s_ctz_i32_b64 s5, s[2:3] +; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1164GISEL-NEXT: s_max_u32 s4, s4, s6 +; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1164GISEL-NEXT: ; %bb.2: +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: divergent_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132DAGISEL-NEXT: s_mov_b32 s2, 0 +; GFX1132DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s4, s3 +; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1132DAGISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1132DAGISEL-NEXT: s_max_u32 s2, s2, s5 +; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1132DAGISEL-NEXT: ; %bb.2: +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: divergent_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132GISEL-NEXT: s_mov_b32 s2, 0 +; GFX1132GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1132GISEL-NEXT: s_ctz_i32_b32 s4, s3 +; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132GISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1132GISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1132GISEL-NEXT: s_max_u32 s2, s2, s5 +; GFX1132GISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1132GISEL-NEXT: ; %bb.2: +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %id.x = call i32 @llvm.amdgcn.workitem.id.x() + %result = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 %id.x, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: divergent_cfg: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX8DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX8DAGISEL-NEXT: ; %bb.1: ; %else +; GFX8DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX8DAGISEL-NEXT: ; %bb.3: ; %if +; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX8DAGISEL-NEXT: s_mov_b32 s6, 0 +; GFX8DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX8DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX8DAGISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX8DAGISEL-NEXT: ; %bb.5: +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX8DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX8DAGISEL-NEXT: flat_store_dword v[2:3], v1 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: divergent_cfg: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX8GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX8GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX8GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX8GISEL-NEXT: ; %bb.1: ; %else +; GFX8GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX8GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: s_mov_b32 s6, s4 +; GFX8GISEL-NEXT: .LBB4_2: ; %Flow +; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX8GISEL-NEXT: ; %bb.3: ; %if +; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX8GISEL-NEXT: s_mov_b32 s6, 0 +; GFX8GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX8GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX8GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX8GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX8GISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX8GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX8GISEL-NEXT: .LBB4_5: ; %endif +; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: divergent_cfg: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX9DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX9DAGISEL-NEXT: ; %bb.1: ; %else +; GFX9DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX9DAGISEL-NEXT: ; %bb.3: ; %if +; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX9DAGISEL-NEXT: s_mov_b32 s6, 0 +; GFX9DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX9DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX9DAGISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX9DAGISEL-NEXT: ; %bb.5: +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX9DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: divergent_cfg: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX9GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX9GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX9GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX9GISEL-NEXT: ; %bb.1: ; %else +; GFX9GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: s_mov_b32 s6, s4 +; GFX9GISEL-NEXT: .LBB4_2: ; %Flow +; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX9GISEL-NEXT: ; %bb.3: ; %if +; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX9GISEL-NEXT: s_mov_b32 s6, 0 +; GFX9GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX9GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX9GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX9GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX9GISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX9GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX9GISEL-NEXT: .LBB4_5: ; %endif +; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX1064DAGISEL-LABEL: divergent_cfg: +; GFX1064DAGISEL: ; %bb.0: ; %entry +; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX1064DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1064DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1064DAGISEL-NEXT: s_mov_b32 s6, 0 +; GFX1064DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1064DAGISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1064DAGISEL-NEXT: ; %bb.5: +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX1064DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX1064DAGISEL-NEXT: s_endpgm +; +; GFX1064GISEL-LABEL: divergent_cfg: +; GFX1064GISEL: ; %bb.0: ; %entry +; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX1064GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1064GISEL-NEXT: ; %bb.1: ; %else +; GFX1064GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: s_mov_b32 s6, s4 +; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1064GISEL-NEXT: ; %bb.3: ; %if +; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1064GISEL-NEXT: s_mov_b32 s6, 0 +; GFX1064GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1064GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX1064GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1064GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1064GISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1064GISEL-NEXT: .LBB4_5: ; %endif +; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064GISEL-NEXT: s_endpgm +; +; GFX1032DAGISEL-LABEL: divergent_cfg: +; GFX1032DAGISEL: ; %bb.0: ; %entry +; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0 +; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr3 +; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GFX1032DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1032DAGISEL-NEXT: s_load_dword s3, s[0:1], 0x2c +; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s2 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1032DAGISEL-NEXT: s_mov_b32 s3, 0 +; GFX1032DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s5, s4 +; GFX1032DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1032DAGISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1032DAGISEL-NEXT: s_max_u32 s3, s3, s6 +; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1032DAGISEL-NEXT: ; %bb.5: +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1032DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX1032DAGISEL-NEXT: s_endpgm +; +; GFX1032GISEL-LABEL: divergent_cfg: +; GFX1032GISEL: ; %bb.0: ; %entry +; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0 +; GFX1032GISEL-NEXT: ; implicit-def: $sgpr2 +; GFX1032GISEL-NEXT: s_and_saveexec_b32 s3, vcc_lo +; GFX1032GISEL-NEXT: s_xor_b32 s3, exec_lo, s3 +; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1032GISEL-NEXT: ; %bb.1: ; %else +; GFX1032GISEL-NEXT: s_load_dword s2, s[0:1], 0x2c +; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: s_mov_b32 s2, s2 +; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s3, s3 +; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1032GISEL-NEXT: ; %bb.3: ; %if +; GFX1032GISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1032GISEL-NEXT: s_mov_b32 s2, 0 +; GFX1032GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1032GISEL-NEXT: s_ff1_i32_b32 s5, s4 +; GFX1032GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1032GISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1032GISEL-NEXT: s_max_u32 s2, s2, s6 +; GFX1032GISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1032GISEL-NEXT: .LBB4_5: ; %endif +; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3 +; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: divergent_cfg: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0 +; GFX1164DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[0:1], 0x2c +; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1164DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1164DAGISEL-NEXT: s_mov_b32 s6, 0 +; GFX1164DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s7, s[4:5] +; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1164DAGISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1164DAGISEL-NEXT: ; %bb.5: +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX1164DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: divergent_cfg: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0 +; GFX1164GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1164GISEL-NEXT: ; %bb.1: ; %else +; GFX1164GISEL-NEXT: s_load_b32 s4, s[0:1], 0x2c +; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: s_mov_b32 s6, s4 +; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[2:3] +; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1164GISEL-NEXT: ; %bb.3: ; %if +; GFX1164GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1164GISEL-NEXT: s_mov_b32 s6, 0 +; GFX1164GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1164GISEL-NEXT: s_ctz_i32_b64 s7, s[4:5] +; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1164GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1164GISEL-NEXT: s_max_u32 s6, s6, s8 +; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1164GISEL-NEXT: .LBB4_5: ; %endif +; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: divergent_cfg: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo +; GFX1132DAGISEL-NEXT: ; implicit-def: $sgpr3 +; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0 +; GFX1132DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1132DAGISEL-NEXT: s_load_b32 s3, s[0:1], 0x2c +; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s2 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1132DAGISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1132DAGISEL-NEXT: s_mov_b32 s3, 0 +; GFX1132DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s5, s4 +; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1132DAGISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1132DAGISEL-NEXT: s_max_u32 s3, s3, s6 +; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1132DAGISEL-NEXT: ; %bb.5: +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1132DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: divergent_cfg: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132GISEL-NEXT: ; implicit-def: $sgpr2 +; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0 +; GFX1132GISEL-NEXT: s_xor_b32 s3, exec_lo, s3 +; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1132GISEL-NEXT: ; %bb.1: ; %else +; GFX1132GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: s_mov_b32 s2, s2 +; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s3, s3 +; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1132GISEL-NEXT: ; %bb.3: ; %if +; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1132GISEL-NEXT: s_mov_b32 s2, 0 +; GFX1132GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1132GISEL-NEXT: s_ctz_i32_b32 s5, s4 +; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1132GISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1132GISEL-NEXT: s_max_u32 s2, s2, s6 +; GFX1132GISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1132GISEL-NEXT: .LBB4_5: ; %endif +; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3 +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %reducedValTid = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 %tid, i32 1) + br label %endif + +else: + %reducedValIn = call i32 @llvm.amdgcn.wave.reduce.umax.i32(i32 %in, i32 1) + br label %endif + +endif: + %combine = phi i32 [%reducedValTid, %if], [%reducedValIn, %else] + store i32 %combine, ptr addrspace(1) %out + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll @@ -0,0 +1,1017 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -march=amdgcn -mcpu=tonga -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=tonga -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=0 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10DAGISEL,GFX1064DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=1 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10GISEL,GFX1064GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10DAGISEL,GFX1032DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1010 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10GISEL,GFX1032GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1164DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=1 -mattr=+wavefrontsize64 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1164GISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11DAGISEL,GFX1132DAGISEL %s +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=1 -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11GISEL,GFX1132GISEL %s + + +declare i32 @llvm.amdgcn.wave.reduce.umin.i32(i32, i32 immarg) +declare i32 @llvm.amdgcn.workitem.id.x() + +define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: uniform_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_load_dword s0, s[0:1], 0x2c +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: uniform_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dword s2, s[0:1], 0x2c +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s2 +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: uniform_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[2:3] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: uniform_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[2:3] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: uniform_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_clause 0x1 +; GFX10DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX10DAGISEL-NEXT: global_store_dword v0, v1, s[2:3] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: uniform_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_clause 0x1 +; GFX10GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX10GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX10GISEL-NEXT: global_store_dword v1, v0, s[2:3] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: uniform_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_clause 0x1 +; GFX1164DAGISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s2 +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: uniform_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_clause 0x1 +; GFX1164GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: uniform_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_clause 0x1 +; GFX1132DAGISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: uniform_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_clause 0x1 +; GFX1132GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v0, s2 +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 %in, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @const_value(ptr addrspace(1) %out) { +; GFX8DAGISEL-LABEL: const_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, 0x7b +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: const_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, 0x7b +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: const_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: const_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: const_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: const_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX10GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: const_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0x7b +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: const_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, 0x7b +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: const_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7b +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: const_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, 0x7b :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 123, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: poison_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v0 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: poison_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v0 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: poison_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: poison_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX10DAGISEL-LABEL: poison_value: +; GFX10DAGISEL: ; %bb.0: ; %entry +; GFX10DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10DAGISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX10DAGISEL-NEXT: s_endpgm +; +; GFX10GISEL-LABEL: poison_value: +; GFX10GISEL: ; %bb.0: ; %entry +; GFX10GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX10GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX10GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX10GISEL-NEXT: global_store_dword v0, v0, s[0:1] +; GFX10GISEL-NEXT: s_endpgm +; +; GFX11DAGISEL-LABEL: poison_value: +; GFX11DAGISEL: ; %bb.0: ; %entry +; GFX11DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX11DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11DAGISEL-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX11DAGISEL-NEXT: s_nop 0 +; GFX11DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11DAGISEL-NEXT: s_endpgm +; +; GFX11GISEL-LABEL: poison_value: +; GFX11GISEL: ; %bb.0: ; %entry +; GFX11GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX11GISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX11GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX11GISEL-NEXT: global_store_b32 v0, v0, s[0:1] +; GFX11GISEL-NEXT: s_nop 0 +; GFX11GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX11GISEL-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 poison, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: divergent_value: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX8DAGISEL-NEXT: s_mov_b32 s4, -1 +; GFX8DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX8DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX8DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX8DAGISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX8DAGISEL-NEXT: ; %bb.2: +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s4 +; GFX8DAGISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: divergent_value: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX8GISEL-NEXT: s_mov_b32 s4, -1 +; GFX8GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX8GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX8GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX8GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX8GISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX8GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX8GISEL-NEXT: ; %bb.2: +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s4 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: divergent_value: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX9DAGISEL-NEXT: s_mov_b32 s4, -1 +; GFX9DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX9DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX9DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX9DAGISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX9DAGISEL-NEXT: ; %bb.2: +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: divergent_value: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX9GISEL-NEXT: s_mov_b32 s4, -1 +; GFX9GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX9GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX9GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX9GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX9GISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX9GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX9GISEL-NEXT: ; %bb.2: +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX1064DAGISEL-LABEL: divergent_value: +; GFX1064DAGISEL: ; %bb.0: ; %entry +; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1064DAGISEL-NEXT: s_mov_b32 s4, -1 +; GFX1064DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX1064DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1064DAGISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1064DAGISEL-NEXT: ; %bb.2: +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064DAGISEL-NEXT: s_endpgm +; +; GFX1064GISEL-LABEL: divergent_value: +; GFX1064GISEL: ; %bb.0: ; %entry +; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1064GISEL-NEXT: s_mov_b32 s4, -1 +; GFX1064GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1064GISEL-NEXT: s_ff1_i32_b64 s5, s[2:3] +; GFX1064GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1064GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1064GISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1064GISEL-NEXT: ; %bb.2: +; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064GISEL-NEXT: s_endpgm +; +; GFX1032DAGISEL-LABEL: divergent_value: +; GFX1032DAGISEL: ; %bb.0: ; %entry +; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032DAGISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1032DAGISEL-NEXT: s_mov_b32 s2, -1 +; GFX1032DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s4, s3 +; GFX1032DAGISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1032DAGISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1032DAGISEL-NEXT: s_min_u32 s2, s2, s5 +; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1032DAGISEL-NEXT: ; %bb.2: +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032DAGISEL-NEXT: s_endpgm +; +; GFX1032GISEL-LABEL: divergent_value: +; GFX1032GISEL: ; %bb.0: ; %entry +; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1032GISEL-NEXT: s_mov_b32 s2, -1 +; GFX1032GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1032GISEL-NEXT: s_ff1_i32_b32 s4, s3 +; GFX1032GISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1032GISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1032GISEL-NEXT: s_min_u32 s2, s2, s5 +; GFX1032GISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1032GISEL-NEXT: ; %bb.2: +; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: divergent_value: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164DAGISEL-NEXT: s_mov_b32 s4, -1 +; GFX1164DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s5, s[2:3] +; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1164DAGISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1164DAGISEL-NEXT: ; %bb.2: +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: divergent_value: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164GISEL-NEXT: s_mov_b32 s4, -1 +; GFX1164GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1164GISEL-NEXT: s_ctz_i32_b64 s5, s[2:3] +; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1164GISEL-NEXT: s_bitset0_b64 s[2:3], s5 +; GFX1164GISEL-NEXT: s_min_u32 s4, s4, s6 +; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[2:3], 0 +; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1164GISEL-NEXT: ; %bb.2: +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s4 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: divergent_value: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1132DAGISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132DAGISEL-NEXT: s_mov_b32 s2, -1 +; GFX1132DAGISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s4, s3 +; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132DAGISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1132DAGISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1132DAGISEL-NEXT: s_min_u32 s2, s2, s5 +; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1132DAGISEL-NEXT: ; %bb.2: +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: divergent_value: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132GISEL-NEXT: s_mov_b32 s2, -1 +; GFX1132GISEL-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; GFX1132GISEL-NEXT: s_ctz_i32_b32 s4, s3 +; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132GISEL-NEXT: v_readlane_b32 s5, v0, s4 +; GFX1132GISEL-NEXT: s_bitset0_b32 s3, s4 +; GFX1132GISEL-NEXT: s_min_u32 s2, s2, s5 +; GFX1132GISEL-NEXT: s_cmp_lg_u32 s3, 0 +; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB3_1 +; GFX1132GISEL-NEXT: ; %bb.2: +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %id.x = call i32 @llvm.amdgcn.workitem.id.x() + %result = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 %id.x, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) { +; GFX8DAGISEL-LABEL: divergent_cfg: +; GFX8DAGISEL: ; %bb.0: ; %entry +; GFX8DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX8DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX8DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX8DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX8DAGISEL-NEXT: ; %bb.1: ; %else +; GFX8DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX8DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX8DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX8DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX8DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX8DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX8DAGISEL-NEXT: ; %bb.3: ; %if +; GFX8DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX8DAGISEL-NEXT: s_mov_b32 s6, -1 +; GFX8DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX8DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX8DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX8DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX8DAGISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX8DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX8DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX8DAGISEL-NEXT: ; %bb.5: +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX8DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX8DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX8DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v3, s1 +; GFX8DAGISEL-NEXT: v_mov_b32_e32 v2, s0 +; GFX8DAGISEL-NEXT: flat_store_dword v[2:3], v1 +; GFX8DAGISEL-NEXT: s_endpgm +; +; GFX8GISEL-LABEL: divergent_cfg: +; GFX8GISEL: ; %bb.0: ; %entry +; GFX8GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX8GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX8GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX8GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX8GISEL-NEXT: ; %bb.1: ; %else +; GFX8GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX8GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: s_mov_b32 s6, s4 +; GFX8GISEL-NEXT: .LBB4_2: ; %Flow +; GFX8GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX8GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX8GISEL-NEXT: ; %bb.3: ; %if +; GFX8GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX8GISEL-NEXT: s_mov_b32 s6, -1 +; GFX8GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX8GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX8GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX8GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX8GISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX8GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX8GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX8GISEL-NEXT: .LBB4_5: ; %endif +; GFX8GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX8GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX8GISEL-NEXT: v_mov_b32_e32 v2, s6 +; GFX8GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX8GISEL-NEXT: v_mov_b32_e32 v0, s0 +; GFX8GISEL-NEXT: v_mov_b32_e32 v1, s1 +; GFX8GISEL-NEXT: flat_store_dword v[0:1], v2 +; GFX8GISEL-NEXT: s_endpgm +; +; GFX9DAGISEL-LABEL: divergent_cfg: +; GFX9DAGISEL: ; %bb.0: ; %entry +; GFX9DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX9DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX9DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX9DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX9DAGISEL-NEXT: ; %bb.1: ; %else +; GFX9DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX9DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX9DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX9DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX9DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX9DAGISEL-NEXT: ; %bb.3: ; %if +; GFX9DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX9DAGISEL-NEXT: s_mov_b32 s6, -1 +; GFX9DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX9DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX9DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX9DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX9DAGISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX9DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX9DAGISEL-NEXT: ; %bb.5: +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX9DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX9DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX9DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX9DAGISEL-NEXT: s_endpgm +; +; GFX9GISEL-LABEL: divergent_cfg: +; GFX9GISEL: ; %bb.0: ; %entry +; GFX9GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX9GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX9GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX9GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX9GISEL-NEXT: ; %bb.1: ; %else +; GFX9GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX9GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: s_mov_b32 s6, s4 +; GFX9GISEL-NEXT: .LBB4_2: ; %Flow +; GFX9GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX9GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX9GISEL-NEXT: ; %bb.3: ; %if +; GFX9GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX9GISEL-NEXT: s_mov_b32 s6, -1 +; GFX9GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX9GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX9GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX9GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX9GISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX9GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX9GISEL-NEXT: .LBB4_5: ; %endif +; GFX9GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX9GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX9GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX9GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX9GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX9GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX9GISEL-NEXT: s_endpgm +; +; GFX1064DAGISEL-LABEL: divergent_cfg: +; GFX1064DAGISEL: ; %bb.0: ; %entry +; GFX1064DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc, 15, v0 +; GFX1064DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX1064DAGISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX1064DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1064DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1064DAGISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX1064DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1064DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1064DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX1064DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1064DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1064DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1064DAGISEL-NEXT: s_mov_b32 s6, -1 +; GFX1064DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1064DAGISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX1064DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1064DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1064DAGISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX1064DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1064DAGISEL-NEXT: ; %bb.5: +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX1064DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1064DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1064DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1064DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX1064DAGISEL-NEXT: s_endpgm +; +; GFX1064GISEL-LABEL: divergent_cfg: +; GFX1064GISEL: ; %bb.0: ; %entry +; GFX1064GISEL-NEXT: v_cmp_le_u32_e32 vcc, 16, v0 +; GFX1064GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX1064GISEL-NEXT: s_and_saveexec_b64 s[2:3], vcc +; GFX1064GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1064GISEL-NEXT: ; %bb.1: ; %else +; GFX1064GISEL-NEXT: s_load_dword s4, s[0:1], 0x2c +; GFX1064GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: s_mov_b32 s6, s4 +; GFX1064GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1064GISEL-NEXT: s_andn2_saveexec_b64 s[2:3], s[2:3] +; GFX1064GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1064GISEL-NEXT: ; %bb.3: ; %if +; GFX1064GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1064GISEL-NEXT: s_mov_b32 s6, -1 +; GFX1064GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1064GISEL-NEXT: s_ff1_i32_b64 s7, s[4:5] +; GFX1064GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1064GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1064GISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX1064GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1064GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1064GISEL-NEXT: .LBB4_5: ; %endif +; GFX1064GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1064GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX1064GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1064GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1064GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1064GISEL-NEXT: s_endpgm +; +; GFX1032DAGISEL-LABEL: divergent_cfg: +; GFX1032DAGISEL: ; %bb.0: ; %entry +; GFX1032DAGISEL-NEXT: v_cmp_lt_u32_e32 vcc_lo, 15, v0 +; GFX1032DAGISEL-NEXT: ; implicit-def: $sgpr3 +; GFX1032DAGISEL-NEXT: s_and_saveexec_b32 s2, vcc_lo +; GFX1032DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1032DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1032DAGISEL-NEXT: s_load_dword s3, s[0:1], 0x2c +; GFX1032DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1032DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1032DAGISEL-NEXT: s_or_saveexec_b32 s2, s2 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1032DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1032DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1032DAGISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1032DAGISEL-NEXT: s_mov_b32 s3, -1 +; GFX1032DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1032DAGISEL-NEXT: s_ff1_i32_b32 s5, s4 +; GFX1032DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1032DAGISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1032DAGISEL-NEXT: s_min_u32 s3, s3, s6 +; GFX1032DAGISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1032DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1032DAGISEL-NEXT: ; %bb.5: +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1032DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1032DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; GFX1032DAGISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1032DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032DAGISEL-NEXT: global_store_dword v0, v1, s[0:1] +; GFX1032DAGISEL-NEXT: s_endpgm +; +; GFX1032GISEL-LABEL: divergent_cfg: +; GFX1032GISEL: ; %bb.0: ; %entry +; GFX1032GISEL-NEXT: v_cmp_le_u32_e32 vcc_lo, 16, v0 +; GFX1032GISEL-NEXT: ; implicit-def: $sgpr2 +; GFX1032GISEL-NEXT: s_and_saveexec_b32 s3, vcc_lo +; GFX1032GISEL-NEXT: s_xor_b32 s3, exec_lo, s3 +; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1032GISEL-NEXT: ; %bb.1: ; %else +; GFX1032GISEL-NEXT: s_load_dword s2, s[0:1], 0x2c +; GFX1032GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: s_mov_b32 s2, s2 +; GFX1032GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1032GISEL-NEXT: s_andn2_saveexec_b32 s3, s3 +; GFX1032GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1032GISEL-NEXT: ; %bb.3: ; %if +; GFX1032GISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1032GISEL-NEXT: s_mov_b32 s2, -1 +; GFX1032GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1032GISEL-NEXT: s_ff1_i32_b32 s5, s4 +; GFX1032GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1032GISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1032GISEL-NEXT: s_min_u32 s2, s2, s6 +; GFX1032GISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1032GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1032GISEL-NEXT: .LBB4_5: ; %endif +; GFX1032GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3 +; GFX1032GISEL-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v0, s2 +; GFX1032GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1032GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1032GISEL-NEXT: global_store_dword v1, v0, s[0:1] +; GFX1032GISEL-NEXT: s_endpgm +; +; GFX1164DAGISEL-LABEL: divergent_cfg: +; GFX1164DAGISEL: ; %bb.0: ; %entry +; GFX1164DAGISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164DAGISEL-NEXT: ; implicit-def: $sgpr4 +; GFX1164DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0 +; GFX1164DAGISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1164DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1164DAGISEL-NEXT: s_load_b32 s4, s[0:1], 0x2c +; GFX1164DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1164DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1164DAGISEL-NEXT: s_or_saveexec_b64 s[2:3], s[2:3] +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s4 +; GFX1164DAGISEL-NEXT: s_xor_b64 exec, exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1164DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1164DAGISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1164DAGISEL-NEXT: s_mov_b32 s6, -1 +; GFX1164DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1164DAGISEL-NEXT: s_ctz_i32_b64 s7, s[4:5] +; GFX1164DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164DAGISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1164DAGISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1164DAGISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX1164DAGISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1164DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1164DAGISEL-NEXT: ; %bb.5: +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v1, s6 +; GFX1164DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1164DAGISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1164DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1164DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1164DAGISEL-NEXT: s_nop 0 +; GFX1164DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164DAGISEL-NEXT: s_endpgm +; +; GFX1164GISEL-LABEL: divergent_cfg: +; GFX1164GISEL: ; %bb.0: ; %entry +; GFX1164GISEL-NEXT: s_mov_b64 s[2:3], exec +; GFX1164GISEL-NEXT: ; implicit-def: $sgpr6 +; GFX1164GISEL-NEXT: v_cmpx_le_u32_e32 16, v0 +; GFX1164GISEL-NEXT: s_xor_b64 s[2:3], exec, s[2:3] +; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1164GISEL-NEXT: ; %bb.1: ; %else +; GFX1164GISEL-NEXT: s_load_b32 s4, s[0:1], 0x2c +; GFX1164GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: s_mov_b32 s6, s4 +; GFX1164GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1164GISEL-NEXT: s_and_not1_saveexec_b64 s[2:3], s[2:3] +; GFX1164GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1164GISEL-NEXT: ; %bb.3: ; %if +; GFX1164GISEL-NEXT: s_mov_b64 s[4:5], exec +; GFX1164GISEL-NEXT: s_mov_b32 s6, -1 +; GFX1164GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1164GISEL-NEXT: s_ctz_i32_b64 s7, s[4:5] +; GFX1164GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1164GISEL-NEXT: v_readlane_b32 s8, v0, s7 +; GFX1164GISEL-NEXT: s_bitset0_b64 s[4:5], s7 +; GFX1164GISEL-NEXT: s_min_u32 s6, s6, s8 +; GFX1164GISEL-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX1164GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1164GISEL-NEXT: .LBB4_5: ; %endif +; GFX1164GISEL-NEXT: s_or_b64 exec, exec, s[2:3] +; GFX1164GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v0, s6 +; GFX1164GISEL-NEXT: v_mov_b32_e32 v1, 0 +; GFX1164GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1164GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1164GISEL-NEXT: s_nop 0 +; GFX1164GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1164GISEL-NEXT: s_endpgm +; +; GFX1132DAGISEL-LABEL: divergent_cfg: +; GFX1132DAGISEL: ; %bb.0: ; %entry +; GFX1132DAGISEL-NEXT: s_mov_b32 s2, exec_lo +; GFX1132DAGISEL-NEXT: ; implicit-def: $sgpr3 +; GFX1132DAGISEL-NEXT: v_cmpx_lt_u32_e32 15, v0 +; GFX1132DAGISEL-NEXT: s_xor_b32 s2, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1132DAGISEL-NEXT: ; %bb.1: ; %else +; GFX1132DAGISEL-NEXT: s_load_b32 s3, s[0:1], 0x2c +; GFX1132DAGISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1132DAGISEL-NEXT: .LBB4_2: ; %Flow +; GFX1132DAGISEL-NEXT: s_or_saveexec_b32 s2, s2 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1132DAGISEL-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_cbranch_execz .LBB4_6 +; GFX1132DAGISEL-NEXT: ; %bb.3: ; %if +; GFX1132DAGISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1132DAGISEL-NEXT: s_mov_b32 s3, -1 +; GFX1132DAGISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1132DAGISEL-NEXT: s_ctz_i32_b32 s5, s4 +; GFX1132DAGISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132DAGISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1132DAGISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1132DAGISEL-NEXT: s_min_u32 s3, s3, s6 +; GFX1132DAGISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1132DAGISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1132DAGISEL-NEXT: ; %bb.5: +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v1, s3 +; GFX1132DAGISEL-NEXT: .LBB4_6: ; %endif +; GFX1132DAGISEL-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; GFX1132DAGISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132DAGISEL-NEXT: v_mov_b32_e32 v0, 0 +; GFX1132DAGISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132DAGISEL-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX1132DAGISEL-NEXT: s_nop 0 +; GFX1132DAGISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132DAGISEL-NEXT: s_endpgm +; +; GFX1132GISEL-LABEL: divergent_cfg: +; GFX1132GISEL: ; %bb.0: ; %entry +; GFX1132GISEL-NEXT: s_mov_b32 s3, exec_lo +; GFX1132GISEL-NEXT: ; implicit-def: $sgpr2 +; GFX1132GISEL-NEXT: v_cmpx_le_u32_e32 16, v0 +; GFX1132GISEL-NEXT: s_xor_b32 s3, exec_lo, s3 +; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_2 +; GFX1132GISEL-NEXT: ; %bb.1: ; %else +; GFX1132GISEL-NEXT: s_load_b32 s2, s[0:1], 0x2c +; GFX1132GISEL-NEXT: ; implicit-def: $vgpr0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: s_mov_b32 s2, s2 +; GFX1132GISEL-NEXT: .LBB4_2: ; %Flow +; GFX1132GISEL-NEXT: s_and_not1_saveexec_b32 s3, s3 +; GFX1132GISEL-NEXT: s_cbranch_execz .LBB4_5 +; GFX1132GISEL-NEXT: ; %bb.3: ; %if +; GFX1132GISEL-NEXT: s_mov_b32 s4, exec_lo +; GFX1132GISEL-NEXT: s_mov_b32 s2, -1 +; GFX1132GISEL-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; GFX1132GISEL-NEXT: s_ctz_i32_b32 s5, s4 +; GFX1132GISEL-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; GFX1132GISEL-NEXT: v_readlane_b32 s6, v0, s5 +; GFX1132GISEL-NEXT: s_bitset0_b32 s4, s5 +; GFX1132GISEL-NEXT: s_min_u32 s2, s2, s6 +; GFX1132GISEL-NEXT: s_cmp_lg_u32 s4, 0 +; GFX1132GISEL-NEXT: s_cbranch_scc1 .LBB4_4 +; GFX1132GISEL-NEXT: .LBB4_5: ; %endif +; GFX1132GISEL-NEXT: s_or_b32 exec_lo, exec_lo, s3 +; GFX1132GISEL-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; GFX1132GISEL-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v1, 0 +; GFX1132GISEL-NEXT: s_waitcnt lgkmcnt(0) +; GFX1132GISEL-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX1132GISEL-NEXT: s_nop 0 +; GFX1132GISEL-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; GFX1132GISEL-NEXT: s_endpgm +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %reducedValTid = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 %tid, i32 1) + br label %endif + +else: + %reducedValIn = call i32 @llvm.amdgcn.wave.reduce.umin.i32(i32 %in, i32 1) + br label %endif + +endif: + %combine = phi i32 [%reducedValTid, %if], [%reducedValIn, %else] + store i32 %combine, ptr addrspace(1) %out + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umax.mir @@ -0,0 +1,80 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 +# RUN: llc -march=amdgcn -run-pass=finalize-isel %s -o - | FileCheck -check-prefix=GCN %s + +--- +name: uniform_value +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1 + + ; GCN-LABEL: name: uniform_value + ; GCN: liveins: $sgpr0_sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1 + ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 + ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 44, 0 + ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 [[S_LOAD_DWORD_IMM]] + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY1]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec + ; GCN-NEXT: S_ENDPGM 0 + %1:sgpr_64(p4) = COPY $sgpr0_sgpr1 + %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0 + %6:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %1(p4), 44, 0 + %7:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 killed %6, 1, implicit $exec + %8:vgpr_32 = COPY %7 + GLOBAL_STORE_DWORD_SADDR killed %4, killed %8, killed %5, 0, 0, implicit $exec + S_ENDPGM 0 + +... + +--- +name: divergent_value +machineFunctionInfo: + isEntryFunction: true +body: | + bb.0.entry: + liveins: $vgpr0, $sgpr0_sgpr1 + + ; GCN-LABEL: name: divergent_value + ; GCN: successors: %bb.1(0x80000000) + ; GCN-NEXT: liveins: $vgpr0, $sgpr0_sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 + ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec + ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 0 + ; GCN-NEXT: S_BRANCH %bb.1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: .1: + ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[PHI:%[0-9]+]]:sgpr_32 = PHI [[S_MOV_B32_]], %bb.0, %4, %bb.1 + ; GCN-NEXT: [[PHI1:%[0-9]+]]:sreg_64_xexec = PHI [[S_MOV_B64_]], %bb.0, %10, %bb.1 + ; GCN-NEXT: [[S_FF1_I32_B64_:%[0-9]+]]:sgpr_32 = S_FF1_I32_B64 [[PHI1]] + ; GCN-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sgpr_32 = V_READLANE_B32 [[COPY1]], [[S_FF1_I32_B64_]] + ; GCN-NEXT: [[S_MAX_U32_:%[0-9]+]]:sgpr_32 = S_MAX_U32 [[PHI]], [[V_READLANE_B32_]], implicit-def $scc + ; GCN-NEXT: [[S_BITSET0_B64_:%[0-9]+]]:sreg_64_xexec = S_BITSET0_B64 [[S_FF1_I32_B64_]], [[PHI1]] + ; GCN-NEXT: S_CMP_LG_U64 [[S_BITSET0_B64_]], 0, implicit-def $scc + ; GCN-NEXT: S_CBRANCH_SCC1 %bb.1, implicit $scc + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: .2: + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[S_MAX_U32_]] + ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY2]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec + ; GCN-NEXT: S_ENDPGM 0 + %1:sgpr_64(p4) = COPY $sgpr0_sgpr1 + %0:vgpr_32 = COPY $vgpr0 + %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %6:sgpr_32 = WAVE_REDUCE_UMAX_PSEUDO_U32 %0, 1, implicit $exec + %7:vgpr_32 = COPY %6 + GLOBAL_STORE_DWORD_SADDR killed %5, killed %7, killed %4, 0, 0, implicit $exec + S_ENDPGM 0 + +... diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.wave.reduce.umin.mir @@ -0,0 +1,80 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3 +# RUN: llc -march=amdgcn -run-pass=finalize-isel %s -o - | FileCheck -check-prefix=GCN %s + +--- +name: uniform_value +tracksRegLiveness: true +machineFunctionInfo: + isEntryFunction: true +body: | + bb.0.entry: + liveins: $sgpr0_sgpr1 + + ; GCN-LABEL: name: uniform_value + ; GCN: liveins: $sgpr0_sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1 + ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 + ; GCN-NEXT: [[S_LOAD_DWORD_IMM:%[0-9]+]]:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM [[COPY]](p4), 44, 0 + ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 [[S_LOAD_DWORD_IMM]] + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY1]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec + ; GCN-NEXT: S_ENDPGM 0 + %1:sgpr_64(p4) = COPY $sgpr0_sgpr1 + %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0 + %6:sreg_32_xm0_xexec = S_LOAD_DWORD_IMM %1(p4), 44, 0 + %7:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 killed %6, 1, implicit $exec + %8:vgpr_32 = COPY %7 + GLOBAL_STORE_DWORD_SADDR killed %4, killed %8, killed %5, 0, 0, implicit $exec + S_ENDPGM 0 + +... + +--- +name: divergent_value +machineFunctionInfo: + isEntryFunction: true +body: | + bb.0.entry: + liveins: $vgpr0, $sgpr0_sgpr1 + + ; GCN-LABEL: name: divergent_value + ; GCN: successors: %bb.1(0x80000000) + ; GCN-NEXT: liveins: $vgpr0, $sgpr0_sgpr1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[COPY:%[0-9]+]]:sgpr_64(p4) = COPY $sgpr0_sgpr1 + ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + ; GCN-NEXT: [[S_LOAD_DWORDX2_IMM:%[0-9]+]]:sreg_64_xexec = S_LOAD_DWORDX2_IMM [[COPY]](p4), 36, 0 + ; GCN-NEXT: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + ; GCN-NEXT: [[S_MOV_B64_:%[0-9]+]]:sreg_64_xexec = S_MOV_B64 $exec + ; GCN-NEXT: [[S_MOV_B32_:%[0-9]+]]:sgpr_32 = S_MOV_B32 4294967295 + ; GCN-NEXT: S_BRANCH %bb.1 + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: .1: + ; GCN-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: [[PHI:%[0-9]+]]:sgpr_32 = PHI [[S_MOV_B32_]], %bb.0, %4, %bb.1 + ; GCN-NEXT: [[PHI1:%[0-9]+]]:sreg_64_xexec = PHI [[S_MOV_B64_]], %bb.0, %10, %bb.1 + ; GCN-NEXT: [[S_FF1_I32_B64_:%[0-9]+]]:sgpr_32 = S_FF1_I32_B64 [[PHI1]] + ; GCN-NEXT: [[V_READLANE_B32_:%[0-9]+]]:sgpr_32 = V_READLANE_B32 [[COPY1]], [[S_FF1_I32_B64_]] + ; GCN-NEXT: [[S_MIN_U32_:%[0-9]+]]:sgpr_32 = S_MIN_U32 [[PHI]], [[V_READLANE_B32_]], implicit-def $scc + ; GCN-NEXT: [[S_BITSET0_B64_:%[0-9]+]]:sreg_64_xexec = S_BITSET0_B64 [[S_FF1_I32_B64_]], [[PHI1]] + ; GCN-NEXT: S_CMP_LG_U64 [[S_BITSET0_B64_]], 0, implicit-def $scc + ; GCN-NEXT: S_CBRANCH_SCC1 %bb.1, implicit $scc + ; GCN-NEXT: {{ $}} + ; GCN-NEXT: .2: + ; GCN-NEXT: [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[S_MIN_U32_]] + ; GCN-NEXT: GLOBAL_STORE_DWORD_SADDR killed [[V_MOV_B32_e32_]], killed [[COPY2]], killed [[S_LOAD_DWORDX2_IMM]], 0, 0, implicit $exec + ; GCN-NEXT: S_ENDPGM 0 + %1:sgpr_64(p4) = COPY $sgpr0_sgpr1 + %0:vgpr_32 = COPY $vgpr0 + %4:sreg_64_xexec = S_LOAD_DWORDX2_IMM %1(p4), 36, 0 + %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec + %6:sgpr_32 = WAVE_REDUCE_UMIN_PSEUDO_U32 %0, 1, implicit $exec + %7:vgpr_32 = COPY %6 + GLOBAL_STORE_DWORD_SADDR killed %5, killed %7, killed %4, 0, 0, implicit $exec + S_ENDPGM 0 + +...