Index: lib/Target/AMDGPU/SIOptimizeExecMasking.cpp =================================================================== --- lib/Target/AMDGPU/SIOptimizeExecMasking.cpp +++ lib/Target/AMDGPU/SIOptimizeExecMasking.cpp @@ -87,6 +87,30 @@ return AMDGPU::NoRegister; } +/// If \p MI is a logical operation on an exec value, +/// return the register copied to. +static unsigned isLogicalOpOnExec(const MachineInstr &MI) { + switch (MI.getOpcode()) { + case AMDGPU::S_AND_B64: + case AMDGPU::S_OR_B64: + case AMDGPU::S_XOR_B64: + case AMDGPU::S_ANDN2_B64: + case AMDGPU::S_ORN2_B64: + case AMDGPU::S_NAND_B64: + case AMDGPU::S_NOR_B64: + case AMDGPU::S_XNOR_B64: { + const MachineOperand &Src1 = MI.getOperand(1); + if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC) + return MI.getOperand(0).getReg(); + const MachineOperand &Src2 = MI.getOperand(2); + if (Src2.isReg() && Src2.getReg() == AMDGPU::EXEC) + return MI.getOperand(0).getReg(); + } + } + + return AMDGPU::NoRegister; +} + static unsigned getSaveExecOp(unsigned Opc) { switch (Opc) { case AMDGPU::S_AND_B64: @@ -209,8 +233,24 @@ // Scan backwards to find the def. auto CopyToExecInst = &*I; auto CopyFromExecInst = findExecCopy(*TII, MBB, I, CopyToExec); - if (CopyFromExecInst == E) + if (CopyFromExecInst == E) { + auto PrepareExecInst = std::next(I); + if (PrepareExecInst == E) + continue; + // Fold exec = COPY (S_AND_B64 reg, exec) -> exec = S_AND_B64 reg, exec + if (CopyToExecInst->getOperand(1).isKill() && + isLogicalOpOnExec(*PrepareExecInst) == CopyToExec) { + DEBUG(dbgs() << "Fold exec copy: " << *PrepareExecInst); + + PrepareExecInst->getOperand(0).setReg(AMDGPU::EXEC); + + DEBUG(dbgs() << "into: " << *PrepareExecInst << '\n'); + + CopyToExecInst->eraseFromParent(); + } + continue; + } if (isLiveOut(MBB, CopyToExec)) { // The copied register is live out and has a second use in another block. Index: lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp =================================================================== --- lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp +++ lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp @@ -154,6 +154,30 @@ } Changed = true; + + // If the only use of saved exec in the removed instruction is S_AND_B64 + // fold the copy now. + auto SaveExec = getOrExecSource(*Lead, *TII, MRI); + if (!SaveExec || !SaveExec->isFullCopy()) + continue; + + unsigned SavedExec = SaveExec->getOperand(0).getReg(); + bool SafeToReplace = true; + for (auto& U : MRI.use_nodbg_instructions(SavedExec)) { + if (U.getParent() != SaveExec->getParent()) { + SafeToReplace = false; + break; + } + + DEBUG(dbgs() << "Redundant EXEC COPY: " << *SaveExec << '\n'); + } + + if (SafeToReplace) { + LIS->RemoveMachineInstrFromMaps(*SaveExec); + SaveExec->eraseFromParent(); + MRI.replaceRegWith(SavedExec, AMDGPU::EXEC); + LIS->removeInterval(SavedExec); + } } if (Changed) { Index: test/CodeGen/AMDGPU/collapse-endcf.ll =================================================================== --- test/CodeGen/AMDGPU/collapse-endcf.ll +++ test/CodeGen/AMDGPU/collapse-endcf.ll @@ -4,7 +4,7 @@ ; GCN: s_and_saveexec_b64 [[SAVEEXEC:s\[[0-9:]+\]]] ; GCN-NEXT: ; mask branch [[ENDIF:BB[0-9_]+]] ; GCN-NEXT: s_cbranch_execz [[ENDIF]] -; GCN: s_and_saveexec_b64 +; GCN: s_and_b64 exec, exec, vcc ; GCN-NEXT: ; mask branch [[ENDIF]] ; GCN-NEXT: {{^BB[0-9_]+}}: ; GCN: store_dword