diff --git a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td --- a/llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ b/llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -1929,6 +1929,21 @@ Intrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree]>; +def int_amdgcn_wave_reduce_umin : + Intrinsic<[llvm_i32_ty], [ + llvm_i32_ty, // llvm value to reduce + llvm_i32_ty // Strategy switch for DPP/Iterative lowering + ], + [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree, ImmArg>]>; + +def int_amdgcn_wave_reduce_umax : + Intrinsic<[llvm_i32_ty], [ + llvm_i32_ty, // llvm value to reduce + llvm_i32_ty // Strategy switch for DPP/Iterative lowering + ], + [IntrNoMem, IntrConvergent, IntrWillReturn, IntrNoCallback, IntrNoFree, ImmArg>]>; + + def int_amdgcn_readfirstlane : ClangBuiltin<"__builtin_amdgcn_readfirstlane">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty], diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4065,6 +4065,133 @@ return LoopBB; } +static MachineBasicBlock *lowerReduce(MachineInstr &MI, MachineBasicBlock &BB, + const GCNSubtarget &ST, unsigned Opc) { + MachineRegisterInfo &MRI = BB.getParent()->getRegInfo(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const DebugLoc &DL = MI.getDebugLoc(); + const SIInstrInfo *TII = ST.getInstrInfo(); + + // Reduction operations depend on whether the input operand is SGPR or VGPR. + Register SrcReg = MI.getOperand(1).getReg(); + bool isSGPR = TRI->isSGPRClass(MRI.getRegClass(SrcReg)); + Register DstReg = MI.getOperand(0).getReg(); + MachineBasicBlock *RetBB = nullptr; + if (isSGPR) { + // These operations with a uniform value i.e. SGPR are idempotent. + // Reduced value will be same as given sgpr. + BuildMI(BB, MI, DL, TII->get(AMDGPU::S_MOV_B32), DstReg).addReg(SrcReg); + RetBB = &BB; + } else { + unsigned ScanStratgyImm = MI.getOperand(2).getImm(); + ScanOptions ScanStrategy = + ScanStratgyImm == 0 ? ScanOptions::DPP : ScanOptions::Iterative; + if (ScanStrategy == ScanOptions::Iterative) { + // To reduce the VGPR, we need to iterative over all the active lanes. + // Lowering consists of ComputeLoop, which iterative over only active + // lanes. We use copy of EXEC register as induction variable and + // every active lane modifies it using bitset0 so that + // we will get the next active lane for next iteration. + MachineBasicBlock::iterator I = BB.end(); + Register SrcReg = MI.getOperand(1).getReg(); + + // Create Control flow for loop + MachineBasicBlock *ComputeLoop; + MachineBasicBlock *ComputeEnd; + + // Split MI's Machine Basic block into For loop + std::tie(ComputeLoop, ComputeEnd) = splitBlockForLoop(MI, BB, true); + + bool IsWave32 = ST.isWave32(); + const TargetRegisterClass *RegClass = + IsWave32 ? &AMDGPU::SReg_32RegClass : &AMDGPU::SReg_64RegClass; + + // Create Registers required for lowering. + Register LoopIterator = MRI.createVirtualRegister(RegClass); + Register InitalValReg = + MRI.createVirtualRegister(MRI.getRegClass(DstReg)); + + Register AccumulatorReg = + MRI.createVirtualRegister(MRI.getRegClass(DstReg)); + Register NewAccumulatorReg = + MRI.createVirtualRegister(MRI.getRegClass(DstReg)); + + Register ActiveBitsReg = MRI.createVirtualRegister(RegClass); + Register NewActiveBitsReg = MRI.createVirtualRegister(RegClass); + + Register FF1Reg = MRI.createVirtualRegister(MRI.getRegClass(DstReg)); + Register LaneValueReg = + MRI.createVirtualRegister(MRI.getRegClass(DstReg)); + + unsigned MovOpc = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; + unsigned ExecOpc = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC; + + // Create initail values of induction variable from Exec, Accumulator and + // Branch to ComputeBlock + long InitalValue = (Opc == AMDGPU::S_MIN_U32) ? UINT_MAX : 0; + auto &TmpSReg = + BuildMI(BB, I, DL, TII->get(MovOpc), LoopIterator).addReg(ExecOpc); + BuildMI(BB, I, DL, TII->get(AMDGPU::S_MOV_B32), InitalValReg) + .addImm(InitalValue); + BuildMI(BB, I, DL, TII->get(AMDGPU::S_BRANCH)).addMBB(ComputeLoop); + + // Start constructing ComputeLoop + I = ComputeLoop->end(); + auto Accumulator = + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), AccumulatorReg) + .addReg(InitalValReg) + .addMBB(&BB); + auto ActiveBits = + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::PHI), ActiveBitsReg) + .addReg(TmpSReg->getOperand(0).getReg()) + .addMBB(&BB); + + // Perform the computations + unsigned SFFOpc = + IsWave32 ? AMDGPU::S_FF1_I32_B32 : AMDGPU::S_FF1_I32_B64; + auto &FF1 = BuildMI(*ComputeLoop, I, DL, TII->get(SFFOpc), FF1Reg) + .addReg(ActiveBits->getOperand(0).getReg()); + auto &LaneValue = BuildMI(*ComputeLoop, I, DL, + TII->get(AMDGPU::V_READLANE_B32), LaneValueReg) + .addReg(SrcReg) + .addReg(FF1->getOperand(0).getReg()); + auto &NewAccumulator = + BuildMI(*ComputeLoop, I, DL, TII->get(Opc), NewAccumulatorReg) + .addReg(Accumulator->getOperand(0).getReg()) + .addReg(LaneValue->getOperand(0).getReg()); + + // Manipulate the iterator to get the next active lane + unsigned BITSETOpc = + IsWave32 ? AMDGPU::S_BITSET0_B32 : AMDGPU::S_BITSET0_B64; + auto &NewActiveBits = + BuildMI(*ComputeLoop, I, DL, TII->get(BITSETOpc), NewActiveBitsReg) + .addReg(FF1->getOperand(0).getReg()) + .addReg(ActiveBits->getOperand(0).getReg()); + + // Add phi nodes + Accumulator.addReg(NewAccumulator->getOperand(0).getReg()) + .addMBB(ComputeLoop); + ActiveBits.addReg(NewActiveBits->getOperand(0).getReg()) + .addMBB(ComputeLoop); + + // Creating branching + unsigned CMPOpc = IsWave32 ? AMDGPU::S_CMP_LG_U32 : AMDGPU::S_CMP_LG_U64; + BuildMI(*ComputeLoop, I, DL, TII->get(CMPOpc)) + .addReg(NewActiveBits->getOperand(0).getReg()) + .addImm(0); + BuildMI(*ComputeLoop, I, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) + .addMBB(ComputeLoop); + + MRI.replaceRegWith(DstReg, NewAccumulator->getOperand(0).getReg()); + RetBB = ComputeEnd; + } else { + // TODO: DPP Implementation + } + } + MI.eraseFromParent(); + return RetBB; +} + MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( MachineInstr &MI, MachineBasicBlock *BB) const { @@ -4073,6 +4200,10 @@ SIMachineFunctionInfo *MFI = MF->getInfo(); switch (MI.getOpcode()) { + case AMDGPU::WAVE_REDUCE_UMIN_PSEUDO: + return lowerReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MIN_U32); + case AMDGPU::WAVE_REDUCE_UMAX_PSEUDO: + return lowerReduce(MI, *BB, *getSubtarget(), AMDGPU::S_MAX_U32); case AMDGPU::S_UADDO_PSEUDO: case AMDGPU::S_USUBO_PSEUDO: { const DebugLoc &DL = MI.getDebugLoc(); diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td --- a/llvm/lib/Target/AMDGPU/SIInstructions.td +++ b/llvm/lib/Target/AMDGPU/SIInstructions.td @@ -258,6 +258,18 @@ } } // End Defs = [SCC] +let usesCustomInserter = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0, Uses = [EXEC] in { + def WAVE_REDUCE_UMIN_PSEUDO : VPseudoInstSI <(outs SGPR_32:$sdst), + (ins VSrc_b32: $src, VSrc_b32:$strategy), + [(set i32:$sdst, (int_amdgcn_wave_reduce_umin i32:$src, i32:$strategy))]> { + } + + def WAVE_REDUCE_UMAX_PSEUDO : VPseudoInstSI <(outs SGPR_32:$sdst), + (ins VSrc_b32: $src, VSrc_b32:$strategy), + [(set i32:$sdst, (int_amdgcn_wave_reduce_umax i32:$src, i32:$strategy))]> { + } +} + let usesCustomInserter = 1, Defs = [VCC, EXEC] in { def V_ADD_U64_PSEUDO : VPseudoInstSI < (outs VReg_64:$vdst), (ins VSrc_b64:$src0, VSrc_b64:$src1), diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umax.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck %s + +declare i32 @llvm.amdgcn.wave.reduce.umax(i32, i32) +declare i32 @llvm.amdgcn.workitem.id.x() + +define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: uniform_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_clause 0x1 +; CHECK-NEXT: s_load_b32 s2, s[0:1], 0x2c +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax(i32 %in, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @const_value(ptr addrspace(1) %out) { +; CHECK-LABEL: const_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7b +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax(i32 123, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: poison_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v0, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umax(i32 poison, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: divergent_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_mov_b32 s3, exec_lo +; CHECK-NEXT: s_mov_b32 s2, 0 +; CHECK-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: s_ctz_i32_b32 s4, s3 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readlane_b32 s5, v0, s4 +; CHECK-NEXT: s_bitset0_b32 s3, s4 +; CHECK-NEXT: s_max_u32 s2, s2, s5 +; CHECK-NEXT: s_cmp_lg_u32 s3, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB3_1 +; CHECK-NEXT: ; %bb.2: +; CHECK-NEXT: v_mov_b32_e32 v0, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %id.x = call i32 @llvm.amdgcn.workitem.id.x() + %result = call i32 @llvm.amdgcn.wave.reduce.umax(i32 %id.x, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: divergent_cfg: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_mov_b32 s2, exec_lo +; CHECK-NEXT: ; implicit-def: $sgpr3 +; CHECK-NEXT: v_cmpx_lt_u32_e32 15, v0 +; CHECK-NEXT: s_xor_b32 s2, exec_lo, s2 +; CHECK-NEXT: s_cbranch_execz .LBB4_2 +; CHECK-NEXT: ; %bb.1: ; %else +; CHECK-NEXT: s_load_b32 s3, s[0:1], 0x2c +; CHECK-NEXT: ; implicit-def: $vgpr0 +; CHECK-NEXT: .LBB4_2: ; %Flow +; CHECK-NEXT: s_or_saveexec_b32 s2, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; CHECK-NEXT: s_cbranch_execz .LBB4_6 +; CHECK-NEXT: ; %bb.3: ; %if +; CHECK-NEXT: s_mov_b32 s4, exec_lo +; CHECK-NEXT: s_mov_b32 s3, 0 +; CHECK-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: s_ctz_i32_b32 s5, s4 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readlane_b32 s6, v0, s5 +; CHECK-NEXT: s_bitset0_b32 s4, s5 +; CHECK-NEXT: s_max_u32 s3, s3, s6 +; CHECK-NEXT: s_cmp_lg_u32 s4, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB4_4 +; CHECK-NEXT: ; %bb.5: +; CHECK-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-NEXT: .LBB4_6: ; %endif +; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %0 = call i32 @llvm.amdgcn.wave.reduce.umax(i32 %tid, i32 1) + br label %endif + +else: + %1 = call i32 @llvm.amdgcn.wave.reduce.umax(i32 %in, i32 1) + br label %endif + +endif: + %2 = phi i32 [%0, %if], [%1, %else] + store i32 %2, ptr addrspace(1) %out + ret void +} diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.reduce.umin.ll @@ -0,0 +1,137 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 +; RUN: llc -march=amdgcn -mcpu=gfx1100 -global-isel=0 -mattr=+wavefrontsize32,-wavefrontsize64 < %s | FileCheck %s + +declare i32 @llvm.amdgcn.wave.reduce.umin(i32, i32) +declare i32 @llvm.amdgcn.workitem.id.x() + +define amdgpu_kernel void @uniform_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: uniform_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_clause 0x1 +; CHECK-NEXT: s_load_b32 s2, s[0:1], 0x2c +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2 +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin(i32 %in, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @const_value(ptr addrspace(1) %out) { +; CHECK-LABEL: const_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, 0x7b +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin(i32 123, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @poison_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: poison_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v0, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %result = call i32 @llvm.amdgcn.wave.reduce.umin(i32 poison, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_value(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: divergent_value: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_mov_b32 s3, exec_lo +; CHECK-NEXT: s_mov_b32 s2, -1 +; CHECK-NEXT: .LBB3_1: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: s_ctz_i32_b32 s4, s3 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readlane_b32 s5, v0, s4 +; CHECK-NEXT: s_bitset0_b32 s3, s4 +; CHECK-NEXT: s_min_u32 s2, s2, s5 +; CHECK-NEXT: s_cmp_lg_u32 s3, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB3_1 +; CHECK-NEXT: ; %bb.2: +; CHECK-NEXT: v_mov_b32_e32 v0, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v1, v0, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %id.x = call i32 @llvm.amdgcn.workitem.id.x() + %result = call i32 @llvm.amdgcn.wave.reduce.umin(i32 %id.x, i32 1) + store i32 %result, ptr addrspace(1) %out + ret void +} + +define amdgpu_kernel void @divergent_cfg(ptr addrspace(1) %out, i32 %in) { +; CHECK-LABEL: divergent_cfg: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: s_mov_b32 s2, exec_lo +; CHECK-NEXT: ; implicit-def: $sgpr3 +; CHECK-NEXT: v_cmpx_lt_u32_e32 15, v0 +; CHECK-NEXT: s_xor_b32 s2, exec_lo, s2 +; CHECK-NEXT: s_cbranch_execz .LBB4_2 +; CHECK-NEXT: ; %bb.1: ; %else +; CHECK-NEXT: s_load_b32 s3, s[0:1], 0x2c +; CHECK-NEXT: ; implicit-def: $vgpr0 +; CHECK-NEXT: .LBB4_2: ; %Flow +; CHECK-NEXT: s_or_saveexec_b32 s2, s2 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-NEXT: s_xor_b32 exec_lo, exec_lo, s2 +; CHECK-NEXT: s_cbranch_execz .LBB4_6 +; CHECK-NEXT: ; %bb.3: ; %if +; CHECK-NEXT: s_mov_b32 s4, exec_lo +; CHECK-NEXT: s_mov_b32 s3, -1 +; CHECK-NEXT: .LBB4_4: ; =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: s_ctz_i32_b32 s5, s4 +; CHECK-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_readlane_b32 s6, v0, s5 +; CHECK-NEXT: s_bitset0_b32 s4, s5 +; CHECK-NEXT: s_min_u32 s3, s3, s6 +; CHECK-NEXT: s_cmp_lg_u32 s4, 0 +; CHECK-NEXT: s_cbranch_scc1 .LBB4_4 +; CHECK-NEXT: ; %bb.5: +; CHECK-NEXT: v_mov_b32_e32 v1, s3 +; CHECK-NEXT: .LBB4_6: ; %endif +; CHECK-NEXT: s_or_b32 exec_lo, exec_lo, s2 +; CHECK-NEXT: s_load_b64 s[0:1], s[0:1], 0x24 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_waitcnt lgkmcnt(0) +; CHECK-NEXT: global_store_b32 v0, v1, s[0:1] +; CHECK-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) +; CHECK-NEXT: s_endpgm +entry: + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %d_cmp = icmp ult i32 %tid, 16 + br i1 %d_cmp, label %if, label %else + +if: + %0 = call i32 @llvm.amdgcn.wave.reduce.umin(i32 %tid, i32 1) + br label %endif + +else: + %1 = call i32 @llvm.amdgcn.wave.reduce.umin(i32 %in, i32 1) + br label %endif + +endif: + %2 = phi i32 [%0, %if], [%1, %else] + store i32 %2, ptr addrspace(1) %out + ret void +}