Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -440,6 +440,7 @@ setSchedulingPreference(Sched::RegPressure); setJumpIsExpensive(true); + setHasMultipleConditionRegisters(true); // SI at least has hardware support for floating point exceptions, but no way // of using or handling them is implemented. They are also optional in OpenCL Index: lib/Target/AMDGPU/SILowerI1Copies.cpp =================================================================== --- lib/Target/AMDGPU/SILowerI1Copies.cpp +++ lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -100,12 +100,12 @@ const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg()); const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg()); + DebugLoc DL = MI.getDebugLoc(); + MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg()); if (DstRC == &AMDGPU::VReg_1RegClass && TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) { I1Defs.push_back(Dst.getReg()); - DebugLoc DL = MI.getDebugLoc(); - MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg()); if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) { if (DefInst->getOperand(1).isImm()) { I1Defs.push_back(Dst.getReg()); @@ -129,10 +129,26 @@ MI.eraseFromParent(); } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) && SrcRC == &AMDGPU::VReg_1RegClass) { - BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_U32_e64)) - .addOperand(Dst) - .addOperand(Src) - .addImm(0); + if (DefInst->getOpcode() == AMDGPU::V_CNDMASK_B32_e64 && + DefInst->getOperand(1).isImm() && DefInst->getOperand(2).isImm() && + DefInst->getOperand(1).getImm() == 0 && + DefInst->getOperand(2).getImm() != 0 && + DefInst->getOperand(3).isReg() && + TargetRegisterInfo::isVirtualRegister( + DefInst->getOperand(3).getReg()) && + TRI->getCommonSubClass( + MRI.getRegClass(DefInst->getOperand(3).getReg()), + &AMDGPU::SGPR_64RegClass)) { + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64)) + .addOperand(Dst) + .addReg(AMDGPU::EXEC) + .addOperand(DefInst->getOperand(3)); + } else { + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64)) + .addOperand(Dst) + .addOperand(Src) + .addImm(0); + } MI.eraseFromParent(); } } Index: test/CodeGen/AMDGPU/branch-relaxation.ll =================================================================== --- test/CodeGen/AMDGPU/branch-relaxation.ll +++ test/CodeGen/AMDGPU/branch-relaxation.ll @@ -493,9 +493,9 @@ ; GCN: s_setpc_b64 ; GCN: [[LONG_BR_DEST0]] -; GCN: s_cmp_eq_u32 +; GCN: v_cmp_ne_u32_e32 ; GCN-NEXT: ; implicit-def -; GCN-NEXT: s_cbranch_scc0 +; GCN-NEXT: s_cbranch_vccz ; GCN: s_setpc_b64 ; GCN: s_endpgm Index: test/CodeGen/AMDGPU/hoist-cond.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/hoist-cond.ll @@ -0,0 +1,47 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s + +; Check that invariant compare is hoisted out of the loop. +; At the same time condition shall not be serialized into a VGPR and deserialized later +; using another v_cmp + v_cndmask, but used directly in s_and_saveexec_b64. + +; CHECK: v_cmp_{{..}}_u32_e64 [[COND:s\[[0-9]+:[0-9]+\]]] +; CHECK: BB0_1: +; CHECK-NOT: v_cmp +; CHECK_NOT: v_cndmask +; CHECK: s_and_b64 [[RESTORED:s\[[0-9]+:[0-9]+\]]], exec, [[COND]] +; CHECK: s_and_saveexec_b64 s[{{[[0-9]+:[0-9]+}}], [[RESTORED]] +; CHECK: BB0_2: + +define amdgpu_kernel void @hoist_cond(float addrspace(1)* nocapture %arg, float addrspace(1)* noalias nocapture readonly %arg1, i32 %arg3, i32 %arg4) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0 + %tmp5 = icmp ult i32 %tmp, %arg3 + br label %bb1 + +bb1: ; preds = %bb3, %bb + %tmp7 = phi i32 [ %arg4, %bb ], [ %tmp16, %bb3 ] + %tmp8 = phi float [ 0.000000e+00, %bb ], [ %tmp15, %bb3 ] + br i1 %tmp5, label %bb2, label %bb3 + +bb2: ; preds = %bb1 + %tmp10 = zext i32 %tmp7 to i64 + %tmp11 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %tmp10 + %tmp12 = load float, float addrspace(1)* %tmp11, align 4 + br label %bb3 + +bb3: ; preds = %bb2, %bb1 + %tmp14 = phi float [ %tmp12, %bb2 ], [ 0.000000e+00, %bb1 ] + %tmp15 = fadd float %tmp8, %tmp14 + %tmp16 = add i32 %tmp7, -1 + %tmp17 = icmp eq i32 %tmp16, 0 + br i1 %tmp17, label %bb4, label %bb1 + +bb4: ; preds = %bb3 + store float %tmp15, float addrspace(1)* %arg, align 4 + ret void +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.amdgcn.workitem.id.x() #0 + +attributes #0 = { nounwind readnone }