Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -3879,6 +3879,7 @@ MachineBasicBlock::iterator MII = MI; const DebugLoc &DL = MI.getDebugLoc(); MachineOperand &Dest = MI.getOperand(0); + MachineOperand &CarryDest = MI.getOperand(1); MachineOperand &Src0 = MI.getOperand(2); MachineOperand &Src1 = MI.getOperand(3); MachineOperand &Src2 = MI.getOperand(4); @@ -3915,6 +3916,9 @@ } BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1); + + BuildMI(*BB, MII, DL, TII->get(AMDGPU::COPY), CarryDest.getReg()) + .addReg(AMDGPU::SCC); MI.eraseFromParent(); return BB; } Index: llvm/lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -637,6 +637,13 @@ } if (RC == &AMDGPU::SReg_64RegClass) { + if (SrcReg == AMDGPU::SCC) { + BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) + .addImm(1) + .addImm(0); + return; + } + if (DestReg == AMDGPU::VCC) { if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) @@ -663,10 +670,18 @@ } if (DestReg == AMDGPU::SCC) { - assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); - BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) - .addReg(SrcReg, getKillRegState(KillSrc)) - .addImm(0); + unsigned Opcode; + if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { + Opcode = AMDGPU::S_CMP_LG_U32; + } else { + assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); + Opcode = AMDGPU::S_CMP_LG_U64; + } + + BuildMI(MBB, MI, DL, get(Opcode)) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addImm(0); + return; } Index: llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll @@ -0,0 +1,119 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 < %s | FileCheck -check-prefixes=GCN,GFX10 %s + +define i32 @s_add_co_select_user() { +; GFX9-LABEL: s_add_co_select_user: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: s_mov_b64 s[4:5], 0 +; GFX9-NEXT: s_load_dword s6, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_add_co_u32_e64 v0, s[4:5], s6, s6 +; GFX9-NEXT: s_cmp_lg_u64 s[4:5], 0 +; GFX9-NEXT: s_addc_u32 s4, s6, 0 +; GFX9-NEXT: v_mov_b32_e32 v1, s4 +; GFX9-NEXT: s_cselect_b64 vcc, 1, 0 +; GFX9-NEXT: v_cndmask_b32_e32 v1, 0, v1, vcc +; GFX9-NEXT: v_cmp_gt_u32_e64 vcc, s6, 31 +; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX10-LABEL: s_add_co_select_user: +; GFX10: ; %bb.0: ; %bb +; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX10-NEXT: s_waitcnt_vscnt null, 0x0 +; GFX10-NEXT: s_mov_b64 s[4:5], 0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_load_dword s4, s[4:5], 0x0 +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: v_add_co_u32_e64 v0, s5, s4, s4 +; GFX10-NEXT: v_cmp_gt_u32_e64 vcc_lo, s4, 31 +; GFX10-NEXT: s_cmpk_lg_u32 s5, 0x0 +; GFX10-NEXT: s_addc_u32 s5, s4, 0 +; GFX10-NEXT: s_cselect_b32 s6, 1, 0 +; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, s5, s6 +; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo +; GFX10-NEXT: s_setpc_b64 s[30:31] +bb: + %i = load volatile i32, i32 addrspace(4)* null, align 8 + %i1 = add i32 %i, %i + %i2 = icmp ult i32 %i1, %i + %i3 = zext i1 %i2 to i32 + %i4 = add nuw nsw i32 %i3, 0 + %i5 = add i32 %i4, %i + %i6 = icmp ult i32 %i5, %i4 + %i7 = select i1 %i6, i32 %i5, i32 0 + %i8 = icmp ugt i32 %i, 31 + %i9 = select i1 %i8, i32 %i1, i32 %i7 + ret i32 %i9 +} + +define amdgpu_kernel void @s_add_co_br_user(i32 %i) { +; GFX9-LABEL: s_add_co_br_user: +; GFX9: ; %bb.0: ; %bb +; GFX9-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_add_i32 s1, s0, s0 +; GFX9-NEXT: v_mov_b32_e32 v0, s0 +; GFX9-NEXT: v_cmp_lt_u32_e32 vcc, s1, v0 +; GFX9-NEXT: s_cmp_lg_u64 vcc, 0 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: s_addc_u32 s0, s0, 0 +; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, s0, v0 +; GFX9-NEXT: s_and_b64 vcc, exec, vcc +; GFX9-NEXT: s_cbranch_vccnz BB1_2 +; GFX9-NEXT: ; %bb.1: ; %bb0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 9 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: BB1_2: ; %bb1 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: v_mov_b32_e32 v2, 10 +; GFX9-NEXT: v_mov_b32_e32 v1, 0 +; GFX9-NEXT: global_store_dword v[0:1], v2, off +; GFX9-NEXT: s_endpgm +; +; GFX10-LABEL: s_add_co_br_user: +; GFX10: ; %bb.0: ; %bb +; GFX10-NEXT: s_load_dword s0, s[4:5], 0x0 +; GFX10-NEXT: ; implicit-def: $vcc_hi +; GFX10-NEXT: s_waitcnt lgkmcnt(0) +; GFX10-NEXT: s_add_i32 s1, s0, s0 +; GFX10-NEXT: v_cmp_lt_u32_e64 s1, s1, s0 +; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1 +; GFX10-NEXT: s_cmpk_lg_u32 s1, 0x0 +; GFX10-NEXT: s_addc_u32 s0, s0, 0 +; GFX10-NEXT: v_cmp_ge_u32_e32 vcc_lo, s0, v0 +; GFX10-NEXT: s_and_b32 vcc_lo, exec_lo, vcc_lo +; GFX10-NEXT: s_cbranch_vccnz BB1_2 +; GFX10-NEXT: ; %bb.1: ; %bb0 +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: v_mov_b32_e32 v2, 9 +; GFX10-NEXT: v_mov_b32_e32 v1, 0 +; GFX10-NEXT: global_store_dword v[0:1], v2, off +; GFX10-NEXT: BB1_2: ; %bb1 +; GFX10-NEXT: v_mov_b32_e32 v0, 0 +; GFX10-NEXT: v_mov_b32_e32 v2, 10 +; GFX10-NEXT: v_mov_b32_e32 v1, 0 +; GFX10-NEXT: global_store_dword v[0:1], v2, off +; GFX10-NEXT: s_endpgm +bb: + %i1 = add i32 %i, %i + %i2 = icmp ult i32 %i1, %i + %i3 = zext i1 %i2 to i32 + %i4 = add nuw nsw i32 %i3, 0 + %i5 = add i32 %i4, %i + %i6 = icmp ult i32 %i5, %i4 + %i7 = select i1 %i6, i32 %i5, i32 0 + br i1 %i6, label %bb0, label %bb1 + +bb0: + store volatile i32 9, i32 addrspace(1)* null + br label %bb1 + +bb1: + store volatile i32 10, i32 addrspace(1)* null + ret void +}