diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -540,7 +540,7 @@ /// Return the operand index in \p MI that defines \p Def static unsigned getDefIndex(const MachineInstr &MI, Register SearchDef) { unsigned DefIdx = 0; - for (const MachineOperand &Def : MI.defs()) { + for (const MachineOperand &Def : MI.explicit_defs()) { if (Def.getReg() == SearchDef) break; ++DefIdx; @@ -737,7 +737,7 @@ case TargetOpcode::G_UNMERGE_VALUES: { unsigned DefStartBit = 0; unsigned DefSize = MRI.getType(DefReg).getSizeInBits(); - for (const auto &MO : Def->defs()) { + for (const auto &MO : Def->explicit_defs()) { if (MO.getReg() == DefReg) break; DefStartBit += DefSize; @@ -1350,7 +1350,7 @@ if (PrevMI == &DefMI) { unsigned I = 0; bool IsDead = true; - for (MachineOperand &Def : DefMI.defs()) { + for (MachineOperand &Def : DefMI.explicit_defs()) { if (I != DefIdx) { if (!MRI.use_empty(Def.getReg())) { IsDead = false; diff --git a/llvm/include/llvm/CodeGen/MachineInstr.h b/llvm/include/llvm/CodeGen/MachineInstr.h --- a/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/llvm/include/llvm/CodeGen/MachineInstr.h @@ -673,17 +673,6 @@ ? make_range(operands_begin(), operands_begin() + 1) : make_range(operands_begin() + 2, operands_end()); } - /// Returns a range over all explicit operands that are register definitions. - /// Implicit definition are not included! - iterator_range defs() { - return make_range(operands_begin(), - operands_begin() + getNumExplicitDefs()); - } - /// \copydoc defs() - iterator_range defs() const { - return make_range(operands_begin(), - operands_begin() + getNumExplicitDefs()); - } /// Returns a range that includes all operands that are register uses. /// This may include unrelated operands which are not register uses. iterator_range uses() { @@ -693,10 +682,26 @@ iterator_range uses() const { return make_range(operands_begin() + getNumExplicitDefs(), operands_end()); } + + /// Returns a range over all explicit operands that are register definitions. + iterator_range explicit_defs() { + return make_range(operands_begin(), + operands_begin() + getNumExplicitDefs()); + } + + /// \copydoc explicit_defs() + iterator_range explicit_defs() const { + return make_range(operands_begin(), + operands_begin() + getNumExplicitDefs()); + } + + /// Returns a range over all explicit operands that are register uses. iterator_range explicit_uses() { return make_range(operands_begin() + getNumExplicitDefs(), operands_begin() + getNumExplicitOperands()); } + + /// \copydoc explicit_uses() iterator_range explicit_uses() const { return make_range(operands_begin() + getNumExplicitDefs(), operands_begin() + getNumExplicitOperands()); diff --git a/llvm/lib/CodeGen/CodeGenCommonISel.cpp b/llvm/lib/CodeGen/CodeGenCommonISel.cpp --- a/llvm/lib/CodeGen/CodeGenCommonISel.cpp +++ b/llvm/lib/CodeGen/CodeGenCommonISel.cpp @@ -214,7 +214,7 @@ assert(Trunc.getOpcode() == TargetOpcode::G_TRUNC && "Must be a G_TRUNC"); const auto FromLLT = MRI.getType(Trunc.getOperand(1).getReg()); - const auto ToLLT = MRI.getType(Trunc.defs().begin()->getReg()); + const auto ToLLT = MRI.getType(Trunc.explicit_defs().begin()->getReg()); // TODO: Support non-scalar types. if (!FromLLT.isScalar()) { diff --git a/llvm/lib/CodeGen/DetectDeadLanes.cpp b/llvm/lib/CodeGen/DetectDeadLanes.cpp --- a/llvm/lib/CodeGen/DetectDeadLanes.cpp +++ b/llvm/lib/CodeGen/DetectDeadLanes.cpp @@ -200,7 +200,7 @@ // they really need to be modeled differently! if (MI.getOpcode() == TargetOpcode::PATCHPOINT) return; - const MachineOperand &Def = *MI.defs().begin(); + const MachineOperand &Def = *MI.explicit_defs().begin(); Register DefReg = Def.getReg(); if (!DefReg.isVirtual()) return; @@ -343,7 +343,7 @@ unsigned SubReg = MO.getSubReg(); if (lowersToCopies(UseMI)) { assert(UseMI.getDesc().getNumDefs() == 1); - const MachineOperand &Def = *UseMI.defs().begin(); + const MachineOperand &Def = *UseMI.explicit_defs().begin(); Register DefReg = Def.getReg(); // The used lanes of COPY-like instruction operands are determined by the // following dataflow analysis. diff --git a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp --- a/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp +++ b/llvm/lib/CodeGen/FixupStatepointCallerSaved.cpp @@ -373,7 +373,7 @@ SmallSet GCRegs; // All GC pointer operands assigned to registers produce new value. // Since they're tied to their defs, it is enough to collect def registers. - for (const auto &Def : MI.defs()) + for (const auto &Def : MI.explicit_defs()) GCRegs.insert(Def.getReg()); SmallSet VisitedRegs; diff --git a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp --- a/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -246,7 +246,7 @@ auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0); SmallVector Regs; - for (auto Op : Unmerge.getInstr()->defs()) + for (auto Op : Unmerge.getInstr()->explicit_defs()) Regs.push_back(Op.getReg()); Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0); unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size(); diff --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp --- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp +++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp @@ -1365,7 +1365,7 @@ } void llvm::salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI) { - for (auto &Def : MI.defs()) { + for (auto &Def : MI.explicit_defs()) { assert(Def.isReg() && "Must be a reg"); SmallVector DbgUsers; diff --git a/llvm/lib/CodeGen/MachineCopyPropagation.cpp b/llvm/lib/CodeGen/MachineCopyPropagation.cpp --- a/llvm/lib/CodeGen/MachineCopyPropagation.cpp +++ b/llvm/lib/CodeGen/MachineCopyPropagation.cpp @@ -593,7 +593,7 @@ /// The umull instruction is unpredictable unless RdHi and RdLo are different. bool MachineCopyPropagation::hasOverlappingMultipleDef( const MachineInstr &MI, const MachineOperand &MODef, Register Def) { - for (const MachineOperand &MIDef : MI.defs()) { + for (const MachineOperand &MIDef : MI.explicit_defs()) { if ((&MIDef != &MODef) && MIDef.isReg() && TRI->regsOverlap(Def, MIDef.getReg())) return true; diff --git a/llvm/lib/CodeGen/MachineLoopUtils.cpp b/llvm/lib/CodeGen/MachineLoopUtils.cpp --- a/llvm/lib/CodeGen/MachineLoopUtils.cpp +++ b/llvm/lib/CodeGen/MachineLoopUtils.cpp @@ -46,7 +46,7 @@ for (MachineInstr &MI : *Loop) { MachineInstr *NewMI = MF.CloneMachineInstr(&MI); NewBB->insert(InsertPt, NewMI); - for (MachineOperand &MO : NewMI->defs()) { + for (MachineOperand &MO : NewMI->explicit_defs()) { Register OrigR = MO.getReg(); if (OrigR.isPhysical()) continue; diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -3306,7 +3306,7 @@ // towards scheduling this later. Make sure all register defs are to // physical registers. bool DoBias = true; - for (const MachineOperand &Op : MI->defs()) { + for (const MachineOperand &Op : MI->explicit_defs()) { if (Op.isReg() && !Op.getReg().isPhysical()) { DoBias = false; break; diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -1352,7 +1352,7 @@ continue; } - for (MachineOperand &Def : MI->defs()) { + for (MachineOperand &Def : MI->explicit_defs()) { for (MachineInstr &MI : MRI.use_instructions(Def.getReg())) { if (MI.getParent() != BB) { phi(Def.getReg()); @@ -1618,7 +1618,7 @@ if (Stage == -1 || Stage >= MinStage) continue; - for (MachineOperand &DefMO : MI->defs()) { + for (MachineOperand &DefMO : MI->explicit_defs()) { SmallVector, 4> Subs; for (MachineInstr &UseMI : MRI.use_instructions(DefMO.getReg())) { // Only PHIs can use values from this block by construction. @@ -1932,7 +1932,7 @@ // Instruction is live, no rewriting to do. return; - for (MachineOperand &DefMO : MI->defs()) { + for (MachineOperand &DefMO : MI->explicit_defs()) { SmallVector, 4> Subs; for (MachineInstr &UseMI : MRI.use_instructions(DefMO.getReg())) { // Only PHIs can use values from this block by construction. diff --git a/llvm/lib/CodeGen/SplitKit.cpp b/llvm/lib/CodeGen/SplitKit.cpp --- a/llvm/lib/CodeGen/SplitKit.cpp +++ b/llvm/lib/CodeGen/SplitKit.cpp @@ -441,7 +441,7 @@ const MachineInstr *DefMI = LIS.getInstructionFromIndex(Def); assert(DefMI != nullptr); LaneBitmask LM; - for (const MachineOperand &DefOp : DefMI->defs()) { + for (const MachineOperand &DefOp : DefMI->explicit_defs()) { Register R = DefOp.getReg(); if (R != LI.reg()) continue; @@ -803,7 +803,7 @@ } static bool hasTiedUseOf(MachineInstr &MI, unsigned Reg) { - return any_of(MI.defs(), [Reg](const MachineOperand &MO) { + return any_of(MI.explicit_defs(), [Reg](const MachineOperand &MO) { return MO.isReg() && MO.isTied() && MO.getReg() == Reg; }); } diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -697,8 +697,8 @@ assert(NewMI->getNumExplicitDefs() == 1); // Find the old and new def location. - unsigned OldIdx = mi->defs().begin()->getOperandNo(); - unsigned NewIdx = NewMI->defs().begin()->getOperandNo(); + unsigned OldIdx = mi->explicit_defs().begin()->getOperandNo(); + unsigned NewIdx = NewMI->explicit_defs().begin()->getOperandNo(); // Record that one def has been replaced by the other. unsigned NewInstrNum = NewMI->getDebugInstrNum(); diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp --- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -606,7 +606,7 @@ for (auto &I : MI->uses()) maybeKillChain(I, Idx, ActiveChains); - for (auto &I : MI->defs()) + for (auto &I : MI->explicit_defs()) maybeKillChain(I, Idx, ActiveChains); // Create a new chain. Multiplies don't require forwarding so can go on any @@ -671,7 +671,7 @@ // lists. for (auto &I : MI->uses()) maybeKillChain(I, Idx, ActiveChains); - for (auto &I : MI->defs()) + for (auto &I : MI->explicit_defs()) maybeKillChain(I, Idx, ActiveChains); } diff --git a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp --- a/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp +++ b/llvm/lib/Target/AArch64/AArch64SpeculationHardening.cpp @@ -481,10 +481,12 @@ // when the address loaded from gets masked. However, masking is only // easy to do efficiently on GPR registers, so for loads into non-GPR // registers (e.g. floating point loads), mask the address loaded from. - bool AllDefsAreGPR = llvm::all_of(MI.defs(), [&](MachineOperand &Op) { - return Op.isReg() && (AArch64::GPR32allRegClass.contains(Op.getReg()) || - AArch64::GPR64allRegClass.contains(Op.getReg())); - }); + bool AllDefsAreGPR = + llvm::all_of(MI.explicit_defs(), [&](MachineOperand &Op) { + return Op.isReg() && + (AArch64::GPR32allRegClass.contains(Op.getReg()) || + AArch64::GPR64allRegClass.contains(Op.getReg())); + }); // FIXME: it might be a worthwhile optimization to not mask loaded // values if all the registers involved in address calculation are already // hardened, leading to this load not able to execute on a miss-speculated @@ -495,7 +497,7 @@ // First remove registers from AlreadyMaskedRegisters if their value is // updated by this instruction - it makes them contain a new value that is // not guaranteed to already have been masked. - for (MachineOperand Op : MI.defs()) + for (MachineOperand Op : MI.explicit_defs()) for (MCRegAliasIterator AI(Op.getReg(), TRI, true); AI.isValid(); ++AI) RegsAlreadyMasked.reset(*AI); @@ -506,7 +508,7 @@ // https://llvm.org/docs/SpeculativeLoadHardening.html if (HardenLoadedData) - for (auto Def : MI.defs()) { + for (auto Def : MI.explicit_defs()) { if (Def.isDead()) // Do not mask a register that is not used further. continue; @@ -567,7 +569,7 @@ // Mark this register and all its aliasing registers as needing to be // value speculation hardened before its next use, by using a CSDB // barrier instruction. - for (MachineOperand Op : MI.defs()) + for (MachineOperand Op : MI.explicit_defs()) for (MCRegAliasIterator AI(Op.getReg(), TRI, true); AI.isValid(); ++AI) RegsNeedingCSDBBeforeUse.set(*AI); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUInsertDelayAlu.cpp @@ -386,7 +386,7 @@ if (Type != OTHER) { // TODO: Scan implicit defs too? - for (const auto &Op : MI.defs()) { + for (const auto &Op : MI.explicit_defs()) { unsigned Latency = SchedModel.computeOperandLatency( &MI, Op.getOperandNo(), nullptr, 0); for (MCRegUnitIterator UI(Op.getReg(), TRI); UI.isValid(); ++UI) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUMachineCFGStructurizer.cpp @@ -736,7 +736,7 @@ LLVM_DEBUG(dbgs() << "-Store Live Outs Begin (" << printMBBReference(*MBB) << ")-\n"); for (auto &II : *MBB) { - for (auto &RI : II.defs()) { + for (auto &RI : II.explicit_defs()) { storeLiveOutReg(MBB, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo); } for (auto &IRI : II.implicit_operands()) { @@ -776,7 +776,7 @@ PHILinearize &PHIInfo, RegionMRT *TopRegion) { for (auto &II : *MBB) { - for (auto &RI : II.defs()) { + for (auto &RI : II.explicit_defs()) { storeLiveOutRegRegion(TopRegion, RI.getReg(), RI.getParent(), MRI, TRI, PHIInfo); } diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -603,7 +603,7 @@ void GCNHazardRecognizer::addClauseInst(const MachineInstr &MI) { // XXX: Do we need to worry about implicit operands - addRegsToSet(TRI, MI.defs(), ClauseDefs); + addRegsToSet(TRI, MI.explicit_defs(), ClauseDefs); addRegsToSet(TRI, MI.uses(), ClauseUses); } @@ -1011,7 +1011,7 @@ const MachineRegisterInfo &MRI = MF.getRegInfo(); - for (const MachineOperand &Def : VALU->defs()) { + for (const MachineOperand &Def : VALU->explicit_defs()) { WaitStatesNeeded = std::max(WaitStatesNeeded, checkVALUHazardsHelper(Def, MRI)); } @@ -1157,7 +1157,7 @@ !SIInstrInfo::isFLAT(I)) return false; - for (const MachineOperand &Def : MI->defs()) { + for (const MachineOperand &Def : MI->explicit_defs()) { const MachineOperand *Op = I.findRegisterUseOperand(Def.getReg(), false, TRI); if (!Op) @@ -2565,7 +2565,7 @@ if (!IsVALU && !IsMemOrExport) return WaitStatesNeeded; - for (const MachineOperand &Def : MI->defs()) { + for (const MachineOperand &Def : MI->explicit_defs()) { const int SMFMA4x4WriteVgprVALUWawWaitStates = 5; const int SMFMA16x16WriteVgprVALUWawWaitStates = 11; const int SMFMA32x32WriteVgprVALUWawWaitStates = 19; diff --git a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp --- a/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp +++ b/llvm/lib/Target/AMDGPU/SIFormMemoryClauses.cpp @@ -117,7 +117,7 @@ if (!IsVMEMClause && !isSMEMClauseInst(MI)) return false; // If this is a load instruction where the result has been coalesced with an operand, then we cannot clause it. - for (const MachineOperand &ResMO : MI.defs()) { + for (const MachineOperand &ResMO : MI.explicit_defs()) { Register ResReg = ResMO.getReg(); for (const MachineOperand &MO : MI.uses()) { if (!MO.isReg() || MO.isDef()) diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -293,7 +293,7 @@ if (!DefInstr) return nullptr; - for (auto &DefMO : DefInstr->defs()) { + for (auto &DefMO : DefInstr->explicit_defs()) { if (DefMO.isReg() && DefMO.getReg() == Reg->getReg()) return &DefMO; } diff --git a/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp b/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp --- a/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp +++ b/llvm/lib/Target/AMDGPU/SIPostRABundler.cpp @@ -152,7 +152,7 @@ assert(Defs.empty()); if (I->getNumExplicitDefs() != 0) - Defs.insert(I->defs().begin()->getReg()); + Defs.insert(I->explicit_defs().begin()->getReg()); MachineBasicBlock::instr_iterator BundleStart = I; MachineBasicBlock::instr_iterator BundleEnd = I; @@ -164,7 +164,7 @@ if (canBundle(*BundleEnd, *I)) { BundleEnd = I; if (I->getNumExplicitDefs() != 0) - Defs.insert(I->defs().begin()->getReg()); + Defs.insert(I->explicit_defs().begin()->getReg()); ++ClauseLength; } else if (!I->isMetaInstruction()) { // Allow meta instructions in between bundle candidates, but do not diff --git a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp --- a/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp +++ b/llvm/lib/Target/AMDGPU/SIPreAllocateWWMRegs.cpp @@ -237,7 +237,7 @@ LLVM_DEBUG(dbgs() << "Processing " << MI); - for (MachineOperand &DefOpnd : MI.defs()) { + for (MachineOperand &DefOpnd : MI.explicit_defs()) { RegsAssigned |= processDef(DefOpnd); } } diff --git a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp --- a/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/llvm/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -578,7 +578,7 @@ bool SIShrinkInstructions::instModifiesReg(const MachineInstr *MI, unsigned Reg, unsigned SubReg) const { - return instAccessReg(MI->defs(), Reg, SubReg); + return instAccessReg(MI->explicit_defs(), Reg, SubReg); } TargetInstrInfo::RegSubRegPair diff --git a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp --- a/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ b/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -600,7 +600,7 @@ // VGPRs correspond to shader inputs and outputs. Inputs are // only used, outputs are only defined. // FIXME: is this still valid? - for (const MachineOperand &MO : MI.defs()) { + for (const MachineOperand &MO : MI.explicit_defs()) { if (!MO.isReg()) continue; diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/llvm/lib/Target/Mips/MipsInstrInfo.cpp --- a/llvm/lib/Target/Mips/MipsInstrInfo.cpp +++ b/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -597,7 +597,7 @@ return false; } - for (const MachineOperand &Op : FPUMI.defs()) { + for (const MachineOperand &Op : FPUMI.explicit_defs()) { if (!Op.isReg()) continue; @@ -618,7 +618,7 @@ if (MIInSlot.isInlineAsm()) return false; - return !llvm::any_of(LoadMI.defs(), [&](const MachineOperand &Op) { + return !llvm::any_of(LoadMI.explicit_defs(), [&](const MachineOperand &Op) { return Op.isReg() && MIInSlot.readsRegister(Op.getReg()); }); } diff --git a/llvm/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp b/llvm/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp --- a/llvm/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXProxyRegErasure.cpp @@ -96,7 +96,7 @@ void NVPTXProxyRegErasure::replaceMachineInstructionUsage(MachineFunction &MF, MachineInstr &MI) { auto &InOp = *MI.uses().begin(); - auto &OutOp = *MI.defs().begin(); + auto &OutOp = *MI.explicit_defs().begin(); assert(InOp.isReg() && "ProxyReg input operand should be a register."); assert(OutOp.isReg() && "ProxyReg output operand should be a register."); diff --git a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp --- a/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp +++ b/llvm/lib/Target/PowerPC/PPCBranchCoalescing.cpp @@ -429,7 +429,7 @@ LLVM_DEBUG(dbgs() << "Checking if " << MI << " can move to beginning of " << TargetMBB.getNumber() << "\n"); - for (auto &Def : MI.defs()) { // Looking at Def + for (auto &Def : MI.explicit_defs()) { // Looking at Def for (auto &Use : MRI->use_instructions(Def.getReg())) { if (Use.isPHI() && Use.getParent() == &TargetMBB) { LLVM_DEBUG(dbgs() << " *** used in a PHI -- cannot move ***\n"); @@ -540,7 +540,7 @@ I = SourceRegion.BranchBlock->instr_begin(), E = SourceRegion.BranchBlock->getFirstNonPHI(); I != E; ++I) { - for (auto &Def : I->defs()) + for (auto &Def : I->explicit_defs()) for (auto &Use : MRI->use_instructions(Def.getReg())) { if (Use.isPHI() && Use.getParent() == SourceRegion.BranchTargetBlock) { LLVM_DEBUG(dbgs() diff --git a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp --- a/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp +++ b/llvm/lib/Target/RISCV/RISCVRVVInitUndef.cpp @@ -172,7 +172,7 @@ } static bool isEarlyClobberMI(MachineInstr &MI) { - return llvm::any_of(MI.defs(), [](const MachineOperand &DefMO) { + return llvm::any_of(MI.explicit_defs(), [](const MachineOperand &DefMO) { return DefMO.isReg() && DefMO.isEarlyClobber(); }); } diff --git a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp --- a/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVGlobalRegistry.cpp @@ -648,7 +648,7 @@ assert(SpirvType && "Attempting to get type id for nullptr type."); if (SpirvType->getOpcode() == SPIRV::OpTypeForwardPointer) return SpirvType->uses().begin()->getReg(); - return SpirvType->defs().begin()->getReg(); + return SpirvType->explicit_defs().begin()->getReg(); } SPIRVType *SPIRVGlobalRegistry::createSPIRVType( diff --git a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp --- a/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp +++ b/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp @@ -300,7 +300,7 @@ } } else if (MI.getOpcode() == SPIRV::OpFunction) { // Record all internal OpFunction declarations. - Register Reg = MI.defs().begin()->getReg(); + Register Reg = MI.explicit_defs().begin()->getReg(); Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg); assert(GlobalReg.isValid()); MAI.FuncMap[F] = GlobalReg; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyDebugFixup.cpp @@ -144,7 +144,7 @@ } } } - for (MachineOperand &MO : MI.defs()) { + for (MachineOperand &MO : MI.explicit_defs()) { if (MO.isReg() && MFI.isVRegStackified(MO.getReg())) { Stack.push_back({MO.getReg(), nullptr}); } diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyExplicitLocals.cpp @@ -189,7 +189,7 @@ MachineInstr *Def = MRI.getVRegDef(Reg); // If this instruction has any non-stackified defs, it is the start - for (auto DefReg : Def->defs()) { + for (auto DefReg : Def->explicit_defs()) { if (!MFI.isVRegStackified(DefReg.getReg())) { return Def; } @@ -328,7 +328,7 @@ } // Insert local.sets for any defs that aren't stackified yet. - for (auto &Def : MI.defs()) { + for (auto &Def : MI.explicit_defs()) { Register OldReg = Def.getReg(); if (!MFI.isVRegStackified(OldReg)) { const TargetRegisterClass *RC = MRI.getRegClass(OldReg); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -599,7 +599,7 @@ CallParams.addOperand(FnPtr); } - for (auto Def : CallResults.defs()) + for (auto Def : CallResults.explicit_defs()) MIB.add(Def); if (IsIndirect) { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -201,7 +201,7 @@ const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); - for (const MachineOperand &MO : MI->defs()) + for (const MachineOperand &MO : MI->explicit_defs()) Returns.push_back( WebAssembly::regClassToValType(MRI.getRegClass(MO.getReg()))); for (const MachineOperand &MO : MI->explicit_uses()) diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -330,7 +330,7 @@ // have to be taken to ensure the drops of the initial dead defs can be // placed. This would require checking that no previous defs are used in the // same instruction as subsequent defs. - if (Def != DefI->defs().begin()) + if (Def != DefI->explicit_defs().begin()) return false; // If any subsequent def is used prior to the current value by the same @@ -338,7 +338,7 @@ // stackify. Stackifying in this case would require that def moving below the // current def in the stack, which cannot be achieved, even with locals. // Also ensure we don't sink the def past any other prior uses. - for (const auto &SubsequentDef : drop_begin(DefI->defs())) { + for (const auto &SubsequentDef : drop_begin(DefI->explicit_defs())) { auto I = std::next(MachineBasicBlock::const_iterator(DefI)); auto E = std::next(MachineBasicBlock::const_iterator(UseI)); for (; I != E; ++I) { @@ -905,9 +905,9 @@ // Stackifying a multivalue def may unlock in-place stackification of // subsequent defs. TODO: Handle the case where the consecutive uses are // not all in the same instruction. - auto *SubsequentDef = Insert->defs().begin(); + auto *SubsequentDef = Insert->explicit_defs().begin(); auto *SubsequentUse = &Use; - while (SubsequentDef != Insert->defs().end() && + while (SubsequentDef != Insert->explicit_defs().end() && SubsequentUse != Use.getParent()->uses().end()) { if (!SubsequentDef->isReg() || !SubsequentUse->isReg()) break; @@ -966,7 +966,7 @@ assert(Stack.pop_back_val() == Reg && "Register stack pop should be paired with a push"); } - for (MachineOperand &MO : MI.defs()) { + for (MachineOperand &MO : MI.explicit_defs()) { if (!MO.isReg()) continue; Register Reg = MO.getReg(); diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp --- a/llvm/lib/Target/X86/X86CmovConversion.cpp +++ b/llvm/lib/Target/X86/X86CmovConversion.cpp @@ -332,12 +332,12 @@ SkipGroup = true; } // Check if we were relying on zero-extending behavior of the CMOV. - if (!SkipGroup && - llvm::any_of( - MRI->use_nodbg_instructions(I.defs().begin()->getReg()), - [&](MachineInstr &UseI) { - return UseI.getOpcode() == X86::SUBREG_TO_REG; - })) + if (!SkipGroup && llvm::any_of(MRI->use_nodbg_instructions( + I.explicit_defs().begin()->getReg()), + [&](MachineInstr &UseI) { + return UseI.getOpcode() == + X86::SUBREG_TO_REG; + })) // FIXME: We should model the cost of using an explicit MOV to handle // the zero-extension rather than just refusing to handle this. SkipGroup = true; @@ -550,7 +550,7 @@ // Avoid CMOV instruction which value is used as a pointer to load from. // This is another conservative check to avoid converting CMOV instruction // used with tree-search like algorithm, where the branch is unpredicted. - auto UIs = MRI->use_instructions(MI->defs().begin()->getReg()); + auto UIs = MRI->use_instructions(MI->explicit_defs().begin()->getReg()); if (!UIs.empty() && ++UIs.begin() == UIs.end()) { unsigned Op = UIs.begin()->getOpcode(); if (Op == X86::MOV64rm || Op == X86::MOV32rm) { diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp --- a/llvm/lib/Target/X86/X86DomainReassignment.cpp +++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp @@ -586,7 +586,7 @@ } encloseInstr(C, &UseMI); - for (auto &DefOp : UseMI.defs()) { + for (auto &DefOp : UseMI.explicit_defs()) { if (!DefOp.isReg()) continue; diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1299,7 +1299,7 @@ if (llvm::any_of(MI.uses(), [&](MachineOperand &Op) { return Op.isReg() && LoadDepRegs.test(Op.getReg()); })) - for (MachineOperand &Def : MI.defs()) + for (MachineOperand &Def : MI.explicit_defs()) if (Def.isReg()) LoadDepRegs.set(Def.getReg()); @@ -1383,7 +1383,7 @@ if (IndexReg) HardenedAddrRegs.insert(IndexReg); - for (MachineOperand &Def : MI.defs()) + for (MachineOperand &Def : MI.explicit_defs()) if (Def.isReg()) LoadDepRegs.set(Def.getReg()); }