diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -120,9 +120,9 @@ void addUsersToMoveToVALUWorklist(unsigned Reg, MachineRegisterInfo &MRI, SetVectorType &Worklist) const; - void - addSCCDefUsersToVALUWorklist(MachineInstr &SCCDefInst, - SetVectorType &Worklist) const; + void addSCCDefUsersToVALUWorklist(MachineOperand &Op, + MachineInstr &SCCDefInst, + SetVectorType &Worklist) const; const TargetRegisterClass * getDestEquivalentVGPRClass(const MachineInstr &Inst) const; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -4320,8 +4320,10 @@ for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { MachineOperand &Op = Inst.getOperand(i); if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { + // Only propagate through live-def of SCC. + if (Op.isDef() && !Op.isDead()) + addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); Inst.RemoveOperand(i); - addSCCDefUsersToVALUWorklist(Inst, Worklist); } } @@ -5014,19 +5016,23 @@ addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); } -void SIInstrInfo::addSCCDefUsersToVALUWorklist( - MachineInstr &SCCDefInst, SetVectorType &Worklist) const { +void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, + MachineInstr &SCCDefInst, + SetVectorType &Worklist) const { + // Ensure that def inst defines SCC, which is still live. + assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && + !Op.isDead() && Op.getParent() == &SCCDefInst); // This assumes that all the users of SCC are in the same block // as the SCC def. - for (MachineInstr &MI : - make_range(MachineBasicBlock::iterator(SCCDefInst), - SCCDefInst.getParent()->end())) { + for (MachineInstr &MI : // Skip the def inst itself. + make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), + SCCDefInst.getParent()->end())) { + // Check if SCC is used first. + if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) + Worklist.insert(&MI); // Exit if we find another SCC def. if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) return; - - if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) - Worklist.insert(&MI); } } diff --git a/llvm/test/CodeGen/AMDGPU/udivrem64.ll b/llvm/test/CodeGen/AMDGPU/udivrem64.ll --- a/llvm/test/CodeGen/AMDGPU/udivrem64.ll +++ b/llvm/test/CodeGen/AMDGPU/udivrem64.ll @@ -168,3 +168,17 @@ store i64 %result, i64 addrspace(1)* %out ret void } + +;FUNC-LABEL: {{^}}test_udiv_k: +;GCN: v_mul{{.+}} v{{[0-9]+}}, v{{[0-9]+}}, 24 +;GCN: v_mul{{.+}} v{{[0-9]+}}, v{{[0-9]+}}, 24 +;GCN: v_mul{{.+}} v{{[0-9]+}}, v{{[0-9]+}}, 24 +;GCN: v_add +;GCN: v_addc +;GCN: v_addc +;GCN: s_endpgm +define amdgpu_kernel void @test_udiv_k(i64 addrspace(1)* %out, i64 %x) { + %result = udiv i64 24, %x + store i64 %result, i64 addrspace(1)* %out + ret void +}