Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -437,6 +437,7 @@ setSchedulingPreference(Sched::RegPressure); setJumpIsExpensive(true); + setHasMultipleConditionRegisters(true); // SI at least has hardware support for floating point exceptions, but no way // of using or handling them is implemented. They are also optional in OpenCL Index: lib/Target/AMDGPU/SILowerI1Copies.cpp =================================================================== --- lib/Target/AMDGPU/SILowerI1Copies.cpp +++ lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -121,11 +121,29 @@ } } - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64)) - .addOperand(Dst) - .addImm(0) - .addImm(-1) - .addOperand(Src); + // If there are uses which are just a copy back from this new VReg_1 + // to another SGPR_64 just forward propagate original SGPR_64. + SmallVector RegUses; + for (auto &Use : MRI.use_instructions(Dst.getReg())) + if (Use.isFullCopy()) + RegUses.push_back(&Use); + + while (!RegUses.empty()) { + MachineInstr *Use = RegUses.pop_back_val(); + if (Use->getOperand(1).getReg() == Dst.getReg()) { + unsigned FromReg = Use->getOperand(0).getReg(); + Use->eraseFromParent(); + MRI.replaceRegWith(FromReg, Src.getReg()); + } + } + + if (!MRI.use_empty(Dst.getReg())) + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64)) + .addOperand(Dst) + .addImm(0) + .addImm(-1) + .addOperand(Src); + MI.eraseFromParent(); } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) && SrcRC == &AMDGPU::VReg_1RegClass) { Index: test/CodeGen/AMDGPU/host-cond.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/host-cond.ll @@ -0,0 +1,44 @@ +; RUN: llc -march=amdgcn < %s | FileCheck %s + +; Check that invariant compare is hoisted out of the loop. +; At the same time condition shall not be serialized into a VGPR and deserialized later +; using another v_cmp + v_cndmask, but used directly in s_and_saveexec_b64. + +; CHECK: v_cmp_{{..}}_u32_e64 [[COND:s\[[0-9]+:[0-9]+\]]] +; CHECK: BB0_1: +; CHECK-NOT: v_cmp +; CHECK_NOT: v_cndmask +; CHECK: s_and_saveexec_b64 s[{{[[0-9]+:[0-9]+}}], [[COND]] +; CHECK: BB0_2: + +define amdgpu_kernel void @hoist_cond(float addrspace(1)* nocapture %arg, float addrspace(1)* noalias nocapture readonly %arg1, i32 %arg3, i32 %arg4) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp5 = icmp ult i32 %tmp, %arg3 + br label %bb1 + +bb1: ; preds = %bb3, %bb + %tmp7 = phi i32 [ %arg4, %bb ], [ %tmp16, %bb3 ] + %tmp8 = phi float [ 0.000000e+00, %bb ], [ %tmp15, %bb3 ] + br i1 %tmp5, label %bb2, label %bb3 + +bb2: ; preds = %bb1 + %tmp10 = zext i32 %tmp7 to i64 + %tmp11 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %tmp10 + %tmp12 = load float, float addrspace(1)* %tmp11, align 4 + br label %bb3 + +bb3: ; preds = %bb2, %bb1 + %tmp14 = phi float [ %tmp12, %bb2 ], [ 0.000000e+00, %bb1 ] + %tmp15 = fadd float %tmp8, %tmp14 + %tmp16 = add i32 %tmp7, -1 + %tmp17 = icmp eq i32 %tmp16, 0 + br i1 %tmp17, label %bb4, label %bb1 + +bb4: ; preds = %bb3 + store float %tmp15, float addrspace(1)* %arg, align 4 + ret void +} + +; Function Attrs: nounwind readnone +declare i32 @llvm.amdgcn.workitem.id.x()