diff --git a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp --- a/llvm/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/llvm/lib/Target/X86/X86CallFrameOptimization.cpp @@ -105,7 +105,7 @@ void adjustCallSequence(MachineFunction &MF, const CallContext &Context); MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup, - unsigned Reg); + Register Reg); enum InstClassification { Convert, Skip, Exit }; @@ -336,7 +336,7 @@ if (!MO.isReg()) continue; Register Reg = MO.getReg(); - if (!Register::isPhysicalRegister(Reg)) + if (!Reg.isPhysical()) continue; if (RegInfo.regsOverlap(Reg, RegInfo.getStackRegister())) return Exit; @@ -454,7 +454,7 @@ if (!MO.isReg()) continue; Register Reg = MO.getReg(); - if (Register::isPhysicalRegister(Reg)) + if (Reg.isPhysical()) UsedRegs.insert(Reg); } } @@ -599,7 +599,7 @@ } MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush( - MachineBasicBlock::iterator FrameSetup, unsigned Reg) { + MachineBasicBlock::iterator FrameSetup, Register Reg) { // Do an extremely restricted form of load folding. // ISel will often create patterns like: // movl 4(%edi), %eax @@ -610,7 +610,7 @@ // movl %eax, (%esp) // call // Get rid of those with prejudice. - if (!Register::isVirtualRegister(Reg)) + if (!Reg.isVirtual()) return nullptr; // Make sure this is the only use of Reg. diff --git a/llvm/lib/Target/X86/X86CmovConversion.cpp b/llvm/lib/Target/X86/X86CmovConversion.cpp --- a/llvm/lib/Target/X86/X86CmovConversion.cpp +++ b/llvm/lib/Target/X86/X86CmovConversion.cpp @@ -439,7 +439,7 @@ if (!MO.isReg() || !MO.isUse()) continue; Register Reg = MO.getReg(); - auto &RDM = RegDefMaps[Register::isVirtualRegister(Reg)]; + auto &RDM = RegDefMaps[Reg.isVirtual()]; if (MachineInstr *DefMI = RDM.lookup(Reg)) { OperandToDefMap[&MO] = DefMI; DepthInfo Info = DepthMap.lookup(DefMI); @@ -459,7 +459,7 @@ if (!MO.isReg() || !MO.isDef()) continue; Register Reg = MO.getReg(); - RegDefMaps[Register::isVirtualRegister(Reg)][Reg] = &MI; + RegDefMaps[Reg.isVirtual()][Reg] = &MI; } unsigned Latency = TSchedModel.computeInstrLatency(&MI); diff --git a/llvm/lib/Target/X86/X86DomainReassignment.cpp b/llvm/lib/Target/X86/X86DomainReassignment.cpp --- a/llvm/lib/Target/X86/X86DomainReassignment.cpp +++ b/llvm/lib/Target/X86/X86DomainReassignment.cpp @@ -220,14 +220,12 @@ // Don't allow copies to/flow GR8/GR16 physical registers. // FIXME: Is there some better way to support this? Register DstReg = MI->getOperand(0).getReg(); - if (Register::isPhysicalRegister(DstReg) && - (X86::GR8RegClass.contains(DstReg) || - X86::GR16RegClass.contains(DstReg))) + if (DstReg.isPhysical() && (X86::GR8RegClass.contains(DstReg) || + X86::GR16RegClass.contains(DstReg))) return false; Register SrcReg = MI->getOperand(1).getReg(); - if (Register::isPhysicalRegister(SrcReg) && - (X86::GR8RegClass.contains(SrcReg) || - X86::GR16RegClass.contains(SrcReg))) + if (SrcReg.isPhysical() && (X86::GR8RegClass.contains(SrcReg) || + X86::GR16RegClass.contains(SrcReg))) return false; return true; @@ -300,7 +298,7 @@ class Closure { private: /// Virtual registers in the closure. - DenseSet Edges; + DenseSet Edges; /// Instructions in the closure. SmallVector Instrs; @@ -332,11 +330,9 @@ bool empty() const { return Edges.empty(); } - bool insertEdge(unsigned Reg) { - return Edges.insert(Reg).second; - } + bool insertEdge(Register Reg) { return Edges.insert(Reg).second; } - using const_edge_iterator = DenseSet::const_iterator; + using const_edge_iterator = DenseSet::const_iterator; iterator_range edges() const { return iterator_range(Edges.begin(), Edges.end()); } @@ -352,7 +348,7 @@ LLVM_DUMP_METHOD void dump(const MachineRegisterInfo *MRI) const { dbgs() << "Registers: "; bool First = true; - for (unsigned Reg : Edges) { + for (Register Reg : Edges) { if (!First) dbgs() << ", "; First = false; @@ -407,10 +403,10 @@ void initConverters(); /// Starting from \Reg, expand the closure as much as possible. - void buildClosure(Closure &, unsigned Reg); + void buildClosure(Closure &, Register Reg); /// Enqueue \p Reg to be considered for addition to the closure. - void visitRegister(Closure &, unsigned Reg, RegDomain &Domain, + void visitRegister(Closure &, Register Reg, RegDomain &Domain, SmallVectorImpl &Worklist); /// Reassign the closure to \p Domain. @@ -430,13 +426,13 @@ } // End anonymous namespace. -void X86DomainReassignment::visitRegister(Closure &C, unsigned Reg, +void X86DomainReassignment::visitRegister(Closure &C, Register Reg, RegDomain &Domain, SmallVectorImpl &Worklist) { if (EnclosedEdges.count(Reg)) return; - if (!Register::isVirtualRegister(Reg)) + if (!Reg.isVirtual()) return; if (!MRI->hasOneDef(Reg)) @@ -507,7 +503,7 @@ // Iterate all registers in the closure, replace them with registers in the // destination domain. - for (unsigned Reg : C.edges()) { + for (Register Reg : C.edges()) { MRI->setRegClass(Reg, getDstRC(MRI->getRegClass(Reg), Domain)); for (auto &MO : MRI->use_operands(Reg)) { if (MO.isReg()) @@ -523,7 +519,7 @@ /// \returns true when \p Reg is used as part of an address calculation in \p /// MI. -static bool usedAsAddr(const MachineInstr &MI, unsigned Reg, +static bool usedAsAddr(const MachineInstr &MI, Register Reg, const TargetInstrInfo *TII) { if (!MI.mayLoadOrStore()) return false; @@ -544,7 +540,7 @@ return false; } -void X86DomainReassignment::buildClosure(Closure &C, unsigned Reg) { +void X86DomainReassignment::buildClosure(Closure &C, Register Reg) { SmallVector Worklist; RegDomain Domain = NoDomain; visitRegister(C, Reg, Domain, Worklist); @@ -594,7 +590,7 @@ continue; Register DefReg = DefOp.getReg(); - if (!Register::isVirtualRegister(DefReg)) { + if (!DefReg.isVirtual()) { C.setAllIllegal(); continue; } @@ -753,7 +749,7 @@ // Go over all virtual registers and calculate a closure. unsigned ClosureID = 0; for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) { - unsigned Reg = Register::index2VirtReg(Idx); + Register Reg = Register::index2VirtReg(Idx); // GPR only current source domain supported. if (!isGPR(MRI->getRegClass(Reg))) diff --git a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp --- a/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp +++ b/llvm/lib/Target/X86/X86FlagsCopyLowering.cpp @@ -97,7 +97,7 @@ CondRegArray collectCondsInRegs(MachineBasicBlock &MBB, MachineBasicBlock::iterator CopyDefI); - unsigned promoteCondToReg(MachineBasicBlock &MBB, + Register promoteCondToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator TestPos, DebugLoc TestLoc, X86::CondCode Cond); std::pair @@ -739,8 +739,7 @@ llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) { X86::CondCode Cond = X86::getCondFromSETCC(MI); if (Cond != X86::COND_INVALID && !MI.mayStore() && - MI.getOperand(0).isReg() && - Register::isVirtualRegister(MI.getOperand(0).getReg())) { + MI.getOperand(0).isReg() && MI.getOperand(0).getReg().isVirtual()) { assert(MI.getOperand(0).isDef() && "A non-storing SETcc should always define a register!"); CondRegs[Cond] = MI.getOperand(0).getReg(); @@ -754,7 +753,7 @@ return CondRegs; } -unsigned X86FlagsCopyLoweringPass::promoteCondToReg( +Register X86FlagsCopyLoweringPass::promoteCondToReg( MachineBasicBlock &TestMBB, MachineBasicBlock::iterator TestPos, DebugLoc TestLoc, X86::CondCode Cond) { Register Reg = MRI->createVirtualRegister(PromoteRC); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4480,7 +4480,7 @@ int FI = INT_MAX; if (Arg.getOpcode() == ISD::CopyFromReg) { Register VR = cast(Arg.getOperand(1))->getReg(); - if (!Register::isVirtualRegister(VR)) + if (!VR.isVirtual()) return false; MachineInstr *Def = MRI->getVRegDef(VR); if (!Def) diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -947,9 +947,9 @@ } /// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r. -static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) { +static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) { // Don't waste compile time scanning use-def chains of physregs. - if (!Register::isVirtualRegister(BaseReg)) + if (!BaseReg.isVirtual()) return false; bool isPICBase = false; for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg), @@ -1206,8 +1206,7 @@ isKill = Src.isKill(); assert(!Src.isUndef() && "Undef op doesn't need optimization"); - if (Register::isVirtualRegister(NewSrc) && - !MF.getRegInfo().constrainRegClass(NewSrc, RC)) + if (NewSrc.isVirtual() && !MF.getRegInfo().constrainRegClass(NewSrc, RC)) return false; return true; @@ -1215,7 +1214,7 @@ // This is for an LEA64_32r and incoming registers are 32-bit. One way or // another we need to add 64-bit registers to the final MI. - if (Register::isPhysicalRegister(SrcReg)) { + if (SrcReg.isPhysical()) { ImplicitOp = Src; ImplicitOp.setImplicit(); @@ -1410,9 +1409,8 @@ if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr; // LEA can't handle RSP. - if (Register::isVirtualRegister(Src.getReg()) && - !MF.getRegInfo().constrainRegClass(Src.getReg(), - &X86::GR64_NOSPRegClass)) + if (Src.getReg().isVirtual() && !MF.getRegInfo().constrainRegClass( + Src.getReg(), &X86::GR64_NOSPRegClass)) return nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) @@ -3532,11 +3530,10 @@ return None; } -static unsigned getLoadStoreRegOpcode(unsigned Reg, +static unsigned getLoadStoreRegOpcode(Register Reg, const TargetRegisterClass *RC, - bool isStackAligned, - const X86Subtarget &STI, - bool load) { + bool IsStackAligned, + const X86Subtarget &STI, bool load) { bool HasAVX = STI.hasAVX(); bool HasAVX512 = STI.hasAVX512(); bool HasVLX = STI.hasVLX(); @@ -3609,7 +3606,7 @@ case 16: { if (X86::VR128XRegClass.hasSubClassEq(RC)) { // If stack is realigned we can use aligned stores. - if (isStackAligned) + if (IsStackAligned) return load ? (HasVLX ? X86::VMOVAPSZ128rm : HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX : @@ -3641,7 +3638,7 @@ case 32: assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass"); // If stack is realigned we can use aligned stores. - if (isStackAligned) + if (IsStackAligned) return load ? (HasVLX ? X86::VMOVAPSZ256rm : HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX : @@ -3660,7 +3657,7 @@ case 64: assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass"); assert(STI.hasAVX512() && "Using 512-bit register requires AVX512"); - if (isStackAligned) + if (IsStackAligned) return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr; else return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr; @@ -3778,19 +3775,17 @@ return true; } -static unsigned getStoreRegOpcode(unsigned SrcReg, +static unsigned getStoreRegOpcode(Register SrcReg, const TargetRegisterClass *RC, - bool isStackAligned, + bool IsStackAligned, const X86Subtarget &STI) { - return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false); + return getLoadStoreRegOpcode(SrcReg, RC, IsStackAligned, STI, false); } - -static unsigned getLoadRegOpcode(unsigned DestReg, +static unsigned getLoadRegOpcode(Register DestReg, const TargetRegisterClass *RC, - bool isStackAligned, - const X86Subtarget &STI) { - return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true); + bool IsStackAligned, const X86Subtarget &STI) { + return getLoadStoreRegOpcode(DestReg, RC, IsStackAligned, STI, true); } void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, @@ -4447,8 +4442,8 @@ /// %k4 = K_SET1 /// to: /// %k4 = KXNORrr %k0, %k0 -static bool Expand2AddrKreg(MachineInstrBuilder &MIB, - const MCInstrDesc &Desc, unsigned Reg) { +static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc, + Register Reg) { assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction."); MIB->setDesc(Desc); MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef); @@ -4894,7 +4889,7 @@ // If MI is marked as reading Reg, the partial register update is wanted. const MachineOperand &MO = MI.getOperand(0); Register Reg = MO.getReg(); - if (Register::isVirtualRegister(Reg)) { + if (Reg.isVirtual()) { if (MO.readsReg() || MI.readsVirtualRegister(Reg)) return 0; } else { @@ -5279,7 +5274,7 @@ if (!MO.isReg()) continue; Register Reg = MO.getReg(); - if (!Register::isVirtualRegister(Reg)) + if (!Reg.isVirtual()) continue; auto *NewRC = MRI.constrainRegClass( @@ -5575,7 +5570,7 @@ // value and zero-extend the top bits. Change the destination register // to a 32-bit one. Register DstReg = NewMI->getOperand(0).getReg(); - if (Register::isPhysicalRegister(DstReg)) + if (DstReg.isPhysical()) NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit)); else NewMI->getOperand(0).setSubReg(X86::sub_32bit); diff --git a/llvm/lib/Target/X86/X86InstructionSelector.cpp b/llvm/lib/Target/X86/X86InstructionSelector.cpp --- a/llvm/lib/Target/X86/X86InstructionSelector.cpp +++ b/llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -214,8 +214,8 @@ return SubIdx; } -static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) { - assert(Register::isPhysicalRegister(Reg)); +static const TargetRegisterClass *getRegClassFromGRPhysReg(Register Reg) { + assert(Reg.isPhysical()); if (X86::GR64RegClass.contains(Reg)) return &X86::GR64RegClass; if (X86::GR32RegClass.contains(Reg)) @@ -239,7 +239,7 @@ const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI); const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI); - if (Register::isPhysicalRegister(DstReg)) { + if (DstReg.isPhysical()) { assert(I.isCopy() && "Generic operators do not allow physical registers"); if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID && @@ -266,12 +266,12 @@ return true; } - assert((!Register::isPhysicalRegister(SrcReg) || I.isCopy()) && + assert((!SrcReg.isPhysical() || I.isCopy()) && "No phys reg on generic operators"); assert((DstSize == SrcSize || // Copies are a mean to setup initial types, the number of // bits may not exactly match. - (Register::isPhysicalRegister(SrcReg) && + (SrcReg.isPhysical() && DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) && "Copy with different width?!"); @@ -280,7 +280,7 @@ if (SrcRegBank.getID() == X86::GPRRegBankID && DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize && - Register::isPhysicalRegister(SrcReg)) { + SrcReg.isPhysical()) { // Change the physical register to performe truncate. const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg); diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -184,7 +184,7 @@ MachineBasicBlock::iterator InsertPt, DebugLoc Loc); void restoreEFLAGS(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, - unsigned OFReg); + Register Reg); void mergePredStateIntoSP(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, @@ -200,8 +200,8 @@ MachineInstr * sinkPostLoadHardenedInst(MachineInstr &MI, SmallPtrSetImpl &HardenedInstrs); - bool canHardenRegister(unsigned Reg); - unsigned hardenValueInRegister(unsigned Reg, MachineBasicBlock &MBB, + bool canHardenRegister(Register Reg); + unsigned hardenValueInRegister(Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc); unsigned hardenPostLoad(MachineInstr &MI); @@ -1520,7 +1520,7 @@ /// reliably lower. void X86SpeculativeLoadHardeningPass::restoreEFLAGS( MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc, - unsigned Reg) { + Register Reg) { BuildMI(MBB, InsertPt, Loc, TII->get(X86::COPY), X86::EFLAGS).addReg(Reg); ++NumInstsInserted; } @@ -1842,8 +1842,7 @@ // just bail. Also check that its register class is one of the ones we // can harden. Register UseDefReg = UseMI.getOperand(0).getReg(); - if (!Register::isVirtualRegister(UseDefReg) || - !canHardenRegister(UseDefReg)) + if (!UseDefReg.isVirtual() || !canHardenRegister(UseDefReg)) return {}; SingleUseMI = &UseMI; @@ -1865,7 +1864,7 @@ return MI; } -bool X86SpeculativeLoadHardeningPass::canHardenRegister(unsigned Reg) { +bool X86SpeculativeLoadHardeningPass::canHardenRegister(Register Reg) { auto *RC = MRI->getRegClass(Reg); int RegBytes = TRI->getRegSizeInBits(*RC) / 8; if (RegBytes > 8) @@ -1909,10 +1908,10 @@ /// The new, hardened virtual register is returned. It will have the same /// register class as `Reg`. unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister( - unsigned Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, + Register Reg, MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertPt, DebugLoc Loc) { assert(canHardenRegister(Reg) && "Cannot harden this register!"); - assert(Register::isVirtualRegister(Reg) && "Cannot harden a physical register!"); + assert(Reg.isVirtual() && "Cannot harden a physical register!"); auto *RC = MRI->getRegClass(Reg); int Bytes = TRI->getRegSizeInBits(*RC) / 8;