diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -1155,8 +1155,12 @@ MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); - if (!Src1->isIdenticalTo(*Src0)) - return false; + if (!Src1->isIdenticalTo(*Src0)) { + auto *Src0Imm = getImmOrMaterializedImm(*MRI, *Src0); + auto *Src1Imm = getImmOrMaterializedImm(*MRI, *Src1); + if (!Src1Imm->isIdenticalTo(*Src0Imm)) + return false; + } int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); @@ -1276,11 +1280,8 @@ for (MachineInstr *Copy : CopiesToReplace) Copy->addImplicitDefUseOperands(*MF); - SmallPtrSet Folded; for (FoldCandidate &Fold : FoldList) { assert(!Fold.isReg() || Fold.OpToFold); - if (Folded.count(Fold.UseMI)) - continue; if (Fold.isReg() && Fold.OpToFold->getReg().isVirtual()) { Register Reg = Fold.OpToFold->getReg(); MachineInstr *DefMI = Fold.OpToFold->getParent(); @@ -1300,8 +1301,6 @@ LLVM_DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " << static_cast(Fold.UseOpNo) << " of " << *Fold.UseMI); - if (tryFoldCndMask(*Fold.UseMI)) - Folded.insert(Fold.UseMI); } else if (Fold.isCommuted()) { // Restoring instruction's original operand order if fold has failed. TII->commuteInstruction(*Fold.UseMI, false); diff --git a/llvm/test/CodeGen/AMDGPU/fold-cndmask-wave32.mir b/llvm/test/CodeGen/AMDGPU/fold-cndmask-wave32.mir --- a/llvm/test/CodeGen/AMDGPU/fold-cndmask-wave32.mir +++ b/llvm/test/CodeGen/AMDGPU/fold-cndmask-wave32.mir @@ -11,7 +11,7 @@ ; CHECK: [[DEF:%[0-9]+]]:sreg_32_xm0_xexec = IMPLICIT_DEF ; CHECK: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0 ; CHECK: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec - ; CHECK: [[COPY:%[0-9]+]]:vgpr_32 = COPY [[S_MOV_B32_]] + ; CHECK: [[V_MOV_B32_e32_1:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 0, implicit $exec %0:sreg_32_xm0_xexec = IMPLICIT_DEF %1:sreg_32 = S_MOV_B32 0 %2:vgpr_32 = COPY %1:sreg_32