Index: include/llvm/CodeGen/GlobalISel/CallLowering.h =================================================================== --- include/llvm/CodeGen/GlobalISel/CallLowering.h +++ include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -163,8 +163,8 @@ /// /// \return True if the lowering succeeds, false otherwise. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, - unsigned SwiftErrorVReg) const { + ArrayRef VRegs, + Register SwiftErrorVReg) const { if (!supportSwiftError()) { assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror"); return lowerReturn(MIRBuilder, Val, VRegs); @@ -175,7 +175,7 @@ /// This hook behaves as the extended lowerReturn function, but for targets /// that do not support swifterror value promotion. virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs) const { return false; } @@ -191,7 +191,7 @@ /// \return True if the lowering succeeded, false otherwise. virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { return false; } @@ -216,7 +216,7 @@ virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef OrigArgs, - unsigned SwiftErrorVReg) const { + Register SwiftErrorVReg) const { if (!supportSwiftError()) { assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror"); return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs); @@ -254,8 +254,8 @@ /// /// \return true if the lowering succeeded, false otherwise. bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, - unsigned ResReg, ArrayRef ArgRegs, - unsigned SwiftErrorVReg, + Register ResReg, ArrayRef ArgRegs, + Register SwiftErrorVReg, std::function GetCalleeReg) const; }; Index: include/llvm/CodeGen/GlobalISel/IRTranslator.h =================================================================== --- include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -69,7 +69,7 @@ public: ValueToVRegInfo() = default; - using VRegListT = SmallVector; + using VRegListT = SmallVector; using OffsetListT = SmallVector; using const_vreg_iterator = @@ -491,9 +491,9 @@ /// Non-aggregate types have just one corresponding VReg and the list can be /// used as a single "unsigned". Aggregates get flattened. If such VRegs do /// not exist, they are created. - ArrayRef getOrCreateVRegs(const Value &Val); + ArrayRef getOrCreateVRegs(const Value &Val); - unsigned getOrCreateVReg(const Value &Val) { + Register getOrCreateVReg(const Value &Val) { auto Regs = getOrCreateVRegs(Val); if (Regs.empty()) return 0; Index: include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -39,11 +39,11 @@ return false; Builder.setInstr(MI); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); // aext(trunc x) - > aext/copy/trunc x - unsigned TruncSrc; + Register TruncSrc; if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;); Builder.buildAnyExtOrTrunc(DstReg, TruncSrc); @@ -52,7 +52,7 @@ } // aext([asz]ext x) -> [asz]ext x - unsigned ExtSrc; + Register ExtSrc; MachineInstr *ExtMI; if (mi_match(SrcReg, MRI, m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)), @@ -72,11 +72,11 @@ return false; Builder.setInstr(MI); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); // zext(trunc x) - > and (aext/copy/trunc x), mask - unsigned TruncSrc; + Register TruncSrc; if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { LLT DstTy = MRI.getType(DstReg); if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) || @@ -101,11 +101,11 @@ return false; Builder.setInstr(MI); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg()); // sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c - unsigned TruncSrc; + Register TruncSrc; if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { LLT DstTy = MRI.getType(DstReg); // Guess on the RHS shift amount type, which should be re-legalized if @@ -139,7 +139,7 @@ if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(), MRI)) { Builder.setInstr(MI); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (Opcode == TargetOpcode::G_ANYEXT) { @@ -207,7 +207,7 @@ const unsigned NewNumDefs = NumDefs / NumMergeRegs; for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) { - SmallVector DstRegs; + SmallVector DstRegs; for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs; ++j, ++DefIdx) DstRegs.push_back(MI.getOperand(DefIdx).getReg()); @@ -229,7 +229,7 @@ const unsigned NumRegs = NumMergeRegs / NumDefs; for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) { - SmallVector Regs; + SmallVector Regs; for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs; ++j, ++Idx) Regs.push_back(MergeI->getOperand(Idx).getReg()); @@ -436,8 +436,8 @@ /// Looks through copy instructions and returns the actual /// source register. - unsigned lookThroughCopyInstrs(unsigned Reg) { - unsigned TmpReg; + unsigned lookThroughCopyInstrs(Register Reg) { + Register TmpReg; while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) { if (MRI.getType(TmpReg).isValid()) Reg = TmpReg; Index: include/llvm/CodeGen/GlobalISel/LegalizerHelper.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -141,14 +141,14 @@ /// Helper function to split a wide generic register into bitwise blocks with /// the given Type (which implies the number of blocks needed). The generic /// registers created are appended to Ops, starting at bit 0 of Reg. - void extractParts(unsigned Reg, LLT Ty, int NumParts, - SmallVectorImpl &VRegs); + void extractParts(Register Reg, LLT Ty, int NumParts, + SmallVectorImpl &VRegs); /// Version which handles irregular splits. - bool extractParts(unsigned Reg, LLT RegTy, LLT MainTy, + bool extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy, - SmallVectorImpl &VRegs, - SmallVectorImpl &LeftoverVRegs); + SmallVectorImpl &VRegs, + SmallVectorImpl &LeftoverVRegs); /// Helper function to build a wide generic register \p DstReg of type \p /// RegTy from smaller parts. This will produce a G_MERGE_VALUES, @@ -159,16 +159,16 @@ /// /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy. - void insertParts(unsigned DstReg, LLT ResultTy, - LLT PartTy, ArrayRef PartRegs, - LLT LeftoverTy = LLT(), ArrayRef LeftoverRegs = {}); + void insertParts(Register DstReg, LLT ResultTy, + LLT PartTy, ArrayRef PartRegs, + LLT LeftoverTy = LLT(), ArrayRef LeftoverRegs = {}); /// Perform generic multiplication of values held in multiple registers. /// Generated instructions use only types NarrowTy and i1. /// Destination can be same or two times size of the source. - void multiplyRegisters(SmallVectorImpl &DstRegs, - ArrayRef Src1Regs, - ArrayRef Src2Regs, LLT NarrowTy); + void multiplyRegisters(SmallVectorImpl &DstRegs, + ArrayRef Src1Regs, + ArrayRef Src2Regs, LLT NarrowTy); LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); Index: include/llvm/CodeGen/GlobalISel/MIPatternMatch.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MIPatternMatch.h +++ include/llvm/CodeGen/GlobalISel/MIPatternMatch.h @@ -160,7 +160,7 @@ } }; -inline bind_ty m_Reg(unsigned &R) { return R; } +inline bind_ty m_Reg(Register &R) { return R; } inline bind_ty m_MInstr(MachineInstr *&MI) { return MI; } inline bind_ty m_Type(LLT &Ty) { return Ty; } Index: include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -66,6 +66,7 @@ public: enum class DstType { Ty_LLT, Ty_Reg, Ty_RC }; DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {} + DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {} DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {} DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {} DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {} @@ -126,6 +127,7 @@ public: enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate }; SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {} + SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {} SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {} SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {} SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {} @@ -401,7 +403,7 @@ /// type as \p Op0 or \p Op0 itself. /// /// \return a MachineInstrBuilder for the newly created instruction. - Optional materializeGEP(unsigned &Res, unsigned Op0, + Optional materializeGEP(Register &Res, Register Op0, const LLT &ValueTy, uint64_t Value); @@ -717,7 +719,7 @@ /// \pre The bits defined by each Op (derived from index and scalar size) must /// not overlap. /// \pre \p Indices must be in ascending order of bit position. - void buildSequence(unsigned Res, ArrayRef Ops, + void buildSequence(Register Res, ArrayRef Ops, ArrayRef Indices); /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ... @@ -731,7 +733,7 @@ /// \pre The type of all \p Ops registers must be identical. /// /// \return a MachineInstrBuilder for the newly created instruction. - MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef Ops); + MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef Ops); /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op /// @@ -744,7 +746,7 @@ /// /// \return a MachineInstrBuilder for the newly created instruction. MachineInstrBuilder buildUnmerge(ArrayRef Res, const SrcOp &Op); - MachineInstrBuilder buildUnmerge(ArrayRef Res, const SrcOp &Op); + MachineInstrBuilder buildUnmerge(ArrayRef Res, const SrcOp &Op); /// Build and insert an unmerge of \p Res sized pieces to cover \p Op MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op); @@ -759,7 +761,7 @@ /// /// \return a MachineInstrBuilder for the newly created instruction. MachineInstrBuilder buildBuildVector(const DstOp &Res, - ArrayRef Ops); + ArrayRef Ops); /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill /// the number of elements @@ -780,7 +782,7 @@ /// /// \return a MachineInstrBuilder for the newly created instruction. MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, - ArrayRef Ops); + ArrayRef Ops); /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ... /// @@ -794,10 +796,10 @@ /// /// \return a MachineInstrBuilder for the newly created instruction. MachineInstrBuilder buildConcatVectors(const DstOp &Res, - ArrayRef Ops); + ArrayRef Ops); - MachineInstrBuilder buildInsert(unsigned Res, unsigned Src, - unsigned Op, unsigned Index); + MachineInstrBuilder buildInsert(Register Res, Register Src, + Register Op, unsigned Index); /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the @@ -809,7 +811,7 @@ /// \pre setBasicBlock or setMI must have been called. /// /// \return a MachineInstrBuilder for the newly created instruction. - MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef Res, + MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef Res, bool HasSideEffects); MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef Res, bool HasSideEffects); Index: include/llvm/CodeGen/MachineOperand.h =================================================================== --- include/llvm/CodeGen/MachineOperand.h +++ include/llvm/CodeGen/MachineOperand.h @@ -14,6 +14,7 @@ #define LLVM_CODEGEN_MACHINEOPERAND_H #include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/Register.h" #include "llvm/IR/Intrinsics.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/LowLevelTypeImpl.h" @@ -345,9 +346,9 @@ //===--------------------------------------------------------------------===// /// getReg - Returns the register number. - unsigned getReg() const { + Register getReg() const { assert(isReg() && "This is not a register operand!"); - return SmallContents.RegNo; + return Register(SmallContents.RegNo); } unsigned getSubReg() const { Index: include/llvm/CodeGen/MachineRegisterInfo.h =================================================================== --- include/llvm/CodeGen/MachineRegisterInfo.h +++ include/llvm/CodeGen/MachineRegisterInfo.h @@ -712,12 +712,12 @@ /// createVirtualRegister - Create and return a new virtual register in the /// function with the specified register class. - unsigned createVirtualRegister(const TargetRegisterClass *RegClass, + Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name = ""); /// Create and return a new virtual register in the function with the same /// attributes as the given register. - unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = ""); + Register cloneVirtualRegister(Register VReg, StringRef Name = ""); /// Get the low-level type of \p Reg or LLT{} if Reg is not a generic /// (target independent) virtual register. @@ -732,7 +732,7 @@ /// Create and return a new generic virtual register with low-level /// type \p Ty. - unsigned createGenericVirtualRegister(LLT Ty, StringRef Name = ""); + Register createGenericVirtualRegister(LLT Ty, StringRef Name = ""); /// Remove all types associated to virtual registers (after instruction /// selection and constraining of all generic virtual registers). Index: include/llvm/CodeGen/Register.h =================================================================== --- /dev/null +++ include/llvm/CodeGen/Register.h @@ -0,0 +1,71 @@ +//===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_REGISTER_H +#define LLVM_CODEGEN_REGISTER_H + +#include + +namespace llvm { + +/// Wrapper class representing virtual and physical registers. Should be passed +/// by value. +class Register { + unsigned Reg; +public: + Register(unsigned Val = 0): Reg(Val) {} + + /// Return true if the specified register number is in + /// the physical register namespace. + static bool isPhysicalRegister(unsigned Reg) { + //assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first."); + return int(Reg) > 0; + } + + /// Return true if the specified register number is in + /// the virtual register namespace. + static bool isVirtualRegister(unsigned Reg) { + //assert(!isStackSlot(Reg) && "Not a register! Check isStackSlot() first."); + return int(Reg) < 0; + } + + static unsigned virtReg2Index(unsigned Reg) { + assert(isVirtualRegister(Reg) && "Not a virtual register"); + return Reg & ~(1u << 31); + } + + bool isVirtual() const { + return isVirtualRegister(Reg); + } + + bool isPhysical() const { + return isPhysicalRegister(Reg); + } + + unsigned virtRegIndex() const { + return virtReg2Index(Reg); + } + + operator unsigned() const { + return Reg; + } + +#if 0 + bool operator==(Register Other) const { + return Reg == Other.Reg; + } +#endif + + bool isValid() const { + return Reg != 0; + } +}; + +} + +#endif Index: include/llvm/CodeGen/SwiftErrorValueTracking.h =================================================================== --- include/llvm/CodeGen/SwiftErrorValueTracking.h +++ include/llvm/CodeGen/SwiftErrorValueTracking.h @@ -17,6 +17,7 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/Register.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/DebugLoc.h" #include @@ -41,18 +42,18 @@ /// A map from swifterror value in a basic block to the virtual register it is /// currently represented by. - DenseMap, unsigned> + DenseMap, Register> VRegDefMap; /// A list of upward exposed vreg uses that need to be satisfied by either a /// copy def or a phi node at the beginning of the basic block representing /// the predecessor(s) swifterror value. - DenseMap, unsigned> + DenseMap, Register> VRegUpwardsUse; /// A map from instructions that define/use a swifterror value to the virtual /// register that represents that def/use. - llvm::DenseMap, unsigned> + llvm::DenseMap, Register> VRegDefUses; /// The swifterror argument of the current function. @@ -80,7 +81,7 @@ /// Set the swifterror virtual register in the VRegDefMap for this /// basic block. - void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, unsigned); + void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register); /// Get or create the swifterror value virtual register for a def of a /// swifterror by an instruction. Index: include/llvm/CodeGen/TargetRegisterInfo.h =================================================================== --- include/llvm/CodeGen/TargetRegisterInfo.h +++ include/llvm/CodeGen/TargetRegisterInfo.h @@ -990,7 +990,7 @@ /// getFrameRegister - This method should return the register used as a base /// for values allocated in the current stack frame. - virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0; + virtual Register getFrameRegister(const MachineFunction &MF) const = 0; /// Mark a register and all its aliases as reserved in the given set. void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const; Index: include/llvm/CodeGen/VirtRegMap.h =================================================================== --- include/llvm/CodeGen/VirtRegMap.h +++ include/llvm/CodeGen/VirtRegMap.h @@ -97,8 +97,8 @@ /// returns the physical register mapped to the specified /// virtual register - unsigned getPhys(unsigned virtReg) const { - assert(TargetRegisterInfo::isVirtualRegister(virtReg)); + Register getPhys(Register virtReg) const { + assert(virtReg.isVirtual()); return Virt2PhysMap[virtReg]; } Index: lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp +++ lib/CodeGen/AsmPrinter/DbgEntityHistoryCalculator.cpp @@ -38,12 +38,12 @@ // If @MI is a DBG_VALUE with debug value described by a // defined register, returns the number of this register. // In the other case, returns 0. -static unsigned isDescribedByReg(const MachineInstr &MI) { +static Register isDescribedByReg(const MachineInstr &MI) { assert(MI.isDebugValue()); assert(MI.getNumOperands() == 4); // If location of variable is described using a register (directly or // indirectly), this register is always a first operand. - return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0; + return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register(); } bool DbgValueHistoryMap::startDbgValue(InlinedEntity Var, Index: lib/CodeGen/GlobalISel/CallLowering.cpp =================================================================== --- lib/CodeGen/GlobalISel/CallLowering.cpp +++ lib/CodeGen/GlobalISel/CallLowering.cpp @@ -27,8 +27,8 @@ void CallLowering::anchor() {} bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, - unsigned ResReg, ArrayRef ArgRegs, - unsigned SwiftErrorVReg, + Register ResReg, ArrayRef ArgRegs, + Register SwiftErrorVReg, std::function GetCalleeReg) const { auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); @@ -131,7 +131,7 @@ if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) { // Try to use the register type if we couldn't assign the VT. if (!Handler.isArgumentHandler() || !CurVT.isValid()) - return false; + return false; CurVT = TLI->getRegisterTypeForCallingConv( F.getContext(), F.getCallingConv(), EVT(CurVT)); if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -167,7 +167,7 @@ return *Regs; } -ArrayRef IRTranslator::getOrCreateVRegs(const Value &Val) { +ArrayRef IRTranslator::getOrCreateVRegs(const Value &Val) { auto VRegsIt = VMap.findVRegs(Val); if (VRegsIt != VMap.vregs_end()) return *VRegsIt->second; @@ -361,11 +361,11 @@ if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0) Ret = nullptr; - ArrayRef VRegs; + ArrayRef VRegs; if (Ret) VRegs = getOrCreateVRegs(*Ret); - unsigned SwiftErrorVReg = 0; + Register SwiftErrorVReg = 0; if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) { SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt( &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg()); @@ -481,7 +481,7 @@ if (DL->getTypeStoreSize(LI.getType()) == 0) return true; - ArrayRef Regs = getOrCreateVRegs(LI); + ArrayRef Regs = getOrCreateVRegs(LI); ArrayRef Offsets = *VMap.getOffsets(LI); unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); @@ -498,7 +498,7 @@ for (unsigned i = 0; i < Regs.size(); ++i) { - unsigned Addr = 0; + Register Addr; MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8); @@ -522,7 +522,7 @@ if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0) return true; - ArrayRef Vals = getOrCreateVRegs(*SI.getValueOperand()); + ArrayRef Vals = getOrCreateVRegs(*SI.getValueOperand()); ArrayRef Offsets = *VMap.getOffsets(*SI.getValueOperand()); unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); @@ -539,7 +539,7 @@ } for (unsigned i = 0; i < Vals.size(); ++i) { - unsigned Addr = 0; + Register Addr; MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8); MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8); @@ -581,7 +581,7 @@ MachineIRBuilder &MIRBuilder) { const Value *Src = U.getOperand(0); uint64_t Offset = getOffsetFromIndices(U, *DL); - ArrayRef SrcRegs = getOrCreateVRegs(*Src); + ArrayRef SrcRegs = getOrCreateVRegs(*Src); ArrayRef Offsets = *VMap.getOffsets(*Src); unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin(); auto &DstRegs = allocateVRegs(U); @@ -598,8 +598,8 @@ uint64_t Offset = getOffsetFromIndices(U, *DL); auto &DstRegs = allocateVRegs(U); ArrayRef DstOffsets = *VMap.getOffsets(U); - ArrayRef SrcRegs = getOrCreateVRegs(*Src); - ArrayRef InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); + ArrayRef SrcRegs = getOrCreateVRegs(*Src); + ArrayRef InsertedRegs = getOrCreateVRegs(*U.getOperand(1)); auto InsertedIt = InsertedRegs.begin(); for (unsigned i = 0; i < DstRegs.size(); ++i) { @@ -615,9 +615,9 @@ bool IRTranslator::translateSelect(const User &U, MachineIRBuilder &MIRBuilder) { unsigned Tst = getOrCreateVReg(*U.getOperand(0)); - ArrayRef ResRegs = getOrCreateVRegs(U); - ArrayRef Op0Regs = getOrCreateVRegs(*U.getOperand(1)); - ArrayRef Op1Regs = getOrCreateVRegs(*U.getOperand(2)); + ArrayRef ResRegs = getOrCreateVRegs(U); + ArrayRef Op0Regs = getOrCreateVRegs(*U.getOperand(1)); + ArrayRef Op1Regs = getOrCreateVRegs(*U.getOperand(2)); const SelectInst &SI = cast(U); uint16_t Flags = 0; @@ -809,7 +809,7 @@ bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op, MachineIRBuilder &MIRBuilder) { - ArrayRef ResRegs = getOrCreateVRegs(CI); + ArrayRef ResRegs = getOrCreateVRegs(CI); MIRBuilder.buildInstr(Op) .addDef(ResRegs[0]) .addDef(ResRegs[1]) @@ -1162,7 +1162,7 @@ unsigned IRTranslator::packRegs(const Value &V, MachineIRBuilder &MIRBuilder) { - ArrayRef Regs = getOrCreateVRegs(V); + ArrayRef Regs = getOrCreateVRegs(V); ArrayRef Offsets = *VMap.getOffsets(V); LLT BigTy = getLLTForType(*V.getType(), *DL); @@ -1181,7 +1181,7 @@ void IRTranslator::unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder) { - ArrayRef Regs = getOrCreateVRegs(V); + ArrayRef Regs = getOrCreateVRegs(V); ArrayRef Offsets = *VMap.getOffsets(V); for (unsigned i = 0; i < Regs.size(); ++i) @@ -1209,12 +1209,12 @@ if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) { bool IsSplitType = valueIsSplit(CI); - unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister( + Register Res = IsSplitType ? MRI->createGenericVirtualRegister( getLLTForType(*CI.getType(), *DL)) : getOrCreateVReg(CI); - SmallVector Args; - unsigned SwiftErrorVReg = 0; + SmallVector Args; + Register SwiftErrorVReg; for (auto &Arg: CI.arg_operands()) { if (CLI->supportSwiftError() && isSwiftError(Arg)) { LLT Ty = getLLTForType(*Arg->getType(), *DL); @@ -1245,7 +1245,7 @@ if (translateKnownIntrinsic(CI, ID, MIRBuilder)) return true; - ArrayRef ResultRegs; + ArrayRef ResultRegs; if (!CI.getType()->isVoidTy()) ResultRegs = getOrCreateVRegs(CI); @@ -1313,8 +1313,8 @@ unsigned Res = 0; if (!I.getType()->isVoidTy()) Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); - SmallVector Args; - unsigned SwiftErrorVReg = 0; + SmallVector Args; + Register SwiftErrorVReg; for (auto &Arg : I.arg_operands()) { if (CLI->supportSwiftError() && isSwiftError(Arg)) { LLT Ty = getLLTForType(*Arg->getType(), *DL); @@ -1399,7 +1399,7 @@ return false; MBB.addLiveIn(ExceptionReg); - ArrayRef ResRegs = getOrCreateVRegs(LP); + ArrayRef ResRegs = getOrCreateVRegs(LP); MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); @@ -1701,7 +1701,7 @@ continue; HandledPreds.insert(IRPred); - ArrayRef ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); + ArrayRef ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i)); for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) { assert(Pred->isSuccessor(ComponentPHIs[0]->getParent()) && "incorrect CFG at MachineBasicBlock level"); @@ -1767,7 +1767,7 @@ // Return the scalar if it is a <1 x Ty> vector. if (CAZ->getNumElements() == 1) return translate(*CAZ->getElementValue(0u), Reg); - SmallVector Ops; + SmallVector Ops; for (unsigned i = 0; i < CAZ->getNumElements(); ++i) { Constant &Elt = *CAZ->getElementValue(i); Ops.push_back(getOrCreateVReg(Elt)); @@ -1777,7 +1777,7 @@ // Return the scalar if it is a <1 x Ty> vector. if (CV->getNumElements() == 1) return translate(*CV->getElementAsConstant(0), Reg); - SmallVector Ops; + SmallVector Ops; for (unsigned i = 0; i < CV->getNumElements(); ++i) { Constant &Elt = *CV->getElementAsConstant(i); Ops.push_back(getOrCreateVReg(Elt)); @@ -1795,7 +1795,7 @@ } else if (auto CV = dyn_cast(&C)) { if (CV->getNumOperands() == 1) return translate(*CV->getOperand(0), Reg); - SmallVector Ops; + SmallVector Ops; for (unsigned i = 0; i < CV->getNumOperands(); ++i) { Ops.push_back(getOrCreateVReg(*CV->getOperand(i))); } @@ -1890,7 +1890,7 @@ EntryBB->addSuccessor(&getMBB(F.front())); // Lower the actual args into this basic block. - SmallVector VRegArgs; + SmallVector VRegArgs; for (const Argument &Arg: F.args()) { if (DL->getTypeStoreSize(Arg.getType()) == 0) continue; // Don't handle zero sized types. Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -115,17 +115,17 @@ } } -void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts, - SmallVectorImpl &VRegs) { +void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts, + SmallVectorImpl &VRegs) { for (int i = 0; i < NumParts; ++i) VRegs.push_back(MRI.createGenericVirtualRegister(Ty)); MIRBuilder.buildUnmerge(VRegs, Reg); } -bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy, +bool LegalizerHelper::extractParts(Register Reg, LLT RegTy, LLT MainTy, LLT &LeftoverTy, - SmallVectorImpl &VRegs, - SmallVectorImpl &LeftoverRegs) { + SmallVectorImpl &VRegs, + SmallVectorImpl &LeftoverRegs) { assert(!LeftoverTy.isValid() && "this is an out argument"); unsigned RegSize = RegTy.getSizeInBits(); @@ -152,14 +152,14 @@ // For irregular sizes, extract the individual parts. for (unsigned I = 0; I != NumParts; ++I) { - unsigned NewReg = MRI.createGenericVirtualRegister(MainTy); + Register NewReg = MRI.createGenericVirtualRegister(MainTy); VRegs.push_back(NewReg); MIRBuilder.buildExtract(NewReg, Reg, MainSize * I); } for (unsigned Offset = MainSize * NumParts; Offset < RegSize; Offset += LeftoverSize) { - unsigned NewReg = MRI.createGenericVirtualRegister(LeftoverTy); + Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy); LeftoverRegs.push_back(NewReg); MIRBuilder.buildExtract(NewReg, Reg, Offset); } @@ -167,11 +167,11 @@ return true; } -void LegalizerHelper::insertParts(unsigned DstReg, +void LegalizerHelper::insertParts(Register DstReg, LLT ResultTy, LLT PartTy, - ArrayRef PartRegs, + ArrayRef PartRegs, LLT LeftoverTy, - ArrayRef LeftoverRegs) { + ArrayRef LeftoverRegs) { if (!LeftoverTy.isValid()) { assert(LeftoverRegs.empty()); @@ -468,7 +468,7 @@ return UnableToLegalize; int NumParts = SizeOp0 / NarrowSize; - SmallVector DstRegs; + SmallVector DstRegs; for (int i = 0; i < NumParts; ++i) DstRegs.push_back( MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg()); @@ -488,7 +488,7 @@ unsigned NarrowSize = NarrowTy.getSizeInBits(); int NumParts = TotalSize / NarrowSize; - SmallVector PartRegs; + SmallVector PartRegs; for (int I = 0; I != NumParts; ++I) { unsigned Offset = I * NarrowSize; auto K = MIRBuilder.buildConstant(NarrowTy, @@ -498,7 +498,7 @@ LLT LeftoverTy; unsigned LeftoverBits = TotalSize - NumParts * NarrowSize; - SmallVector LeftoverRegs; + SmallVector LeftoverRegs; if (LeftoverBits != 0) { LeftoverTy = LLT::scalar(LeftoverBits); auto K = MIRBuilder.buildConstant( @@ -521,7 +521,7 @@ // Expand in terms of carry-setting/consuming G_ADDE instructions. int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); - SmallVector Src1Regs, Src2Regs, DstRegs; + SmallVector Src1Regs, Src2Regs, DstRegs; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); @@ -554,7 +554,7 @@ int NumParts = SizeOp0 / NarrowTy.getSizeInBits(); - SmallVector Src1Regs, Src2Regs, DstRegs; + SmallVector Src1Regs, Src2Regs, DstRegs; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); @@ -760,7 +760,7 @@ // Use concat_vectors if the result is a multiple of the number of elements. if (NumParts * OldElts == NewElts) { - SmallVector Parts; + SmallVector Parts; Parts.push_back(MO.getReg()); unsigned ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0); @@ -784,7 +784,7 @@ if (TypeIdx != 1) return UnableToLegalize; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (!DstTy.isScalar()) return UnableToLegalize; @@ -794,17 +794,17 @@ unsigned PartSize = DstTy.getSizeInBits() / NumSrc; unsigned Src1 = MI.getOperand(1).getReg(); - unsigned ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg(); + Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg(); for (unsigned I = 2; I != NumOps; ++I) { const unsigned Offset = (I - 1) * PartSize; - unsigned SrcReg = MI.getOperand(I).getReg(); + Register SrcReg = MI.getOperand(I).getReg(); assert(MRI.getType(SrcReg) == LLT::scalar(PartSize)); auto ZextInput = MIRBuilder.buildZExt(DstTy, SrcReg); - unsigned NextResult = I + 1 == NumOps ? DstReg : + Register NextResult = I + 1 == NumOps ? DstReg : MRI.createGenericVirtualRegister(DstTy); auto ShiftAmt = MIRBuilder.buildConstant(DstTy, Offset); @@ -824,12 +824,12 @@ return UnableToLegalize; unsigned NumDst = MI.getNumOperands() - 1; - unsigned SrcReg = MI.getOperand(NumDst).getReg(); + Register SrcReg = MI.getOperand(NumDst).getReg(); LLT SrcTy = MRI.getType(SrcReg); if (!SrcTy.isScalar()) return UnableToLegalize; - unsigned Dst0Reg = MI.getOperand(0).getReg(); + Register Dst0Reg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(Dst0Reg); if (!DstTy.isScalar()) return UnableToLegalize; @@ -860,8 +860,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); LLT SrcTy = MRI.getType(SrcReg); LLT DstTy = MRI.getType(DstReg); @@ -1616,7 +1616,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef( MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - SmallVector DstRegs; + SmallVector DstRegs; unsigned NarrowSize = NarrowTy.getSizeInBits(); unsigned DstReg = MI.getOperand(0).getReg(); @@ -1701,7 +1701,7 @@ return Legalized; } - SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; + SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs); @@ -1772,8 +1772,8 @@ SmallVector NewInsts; - SmallVector DstRegs, LeftoverDstRegs; - SmallVector PartRegs, LeftoverRegs; + SmallVector DstRegs, LeftoverDstRegs; + SmallVector PartRegs, LeftoverRegs; for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { LLT LeftoverTy; @@ -1860,7 +1860,7 @@ NarrowTy1 = SrcTy.getElementType(); } - SmallVector SrcRegs, DstRegs; + SmallVector SrcRegs, DstRegs; extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); for (unsigned I = 0; I < NumParts; ++I) { @@ -1923,7 +1923,7 @@ CmpInst::Predicate Pred = static_cast(MI.getOperand(1).getPredicate()); - SmallVector Src1Regs, Src2Regs, DstRegs; + SmallVector Src1Regs, Src2Regs, DstRegs; extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs); extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); @@ -1952,8 +1952,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned CondReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register CondReg = MI.getOperand(1).getReg(); unsigned NumParts = 0; LLT NarrowTy0, NarrowTy1; @@ -1998,7 +1998,7 @@ } } - SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; + SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; if (CondTy.isVector()) extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); @@ -2006,7 +2006,7 @@ extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); for (unsigned i = 0; i < NumParts; ++i) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, Src1Regs[i], Src2Regs[i]); DstRegs.push_back(DstReg); @@ -2037,7 +2037,7 @@ if (NumParts < 0) return UnableToLegalize; - SmallVector DstRegs, LeftoverDstRegs; + SmallVector DstRegs, LeftoverDstRegs; SmallVector NewInsts; const int TotalNumParts = NumParts + NumLeftover; @@ -2045,7 +2045,7 @@ // Insert the new phis in the result block first. for (int I = 0; I != TotalNumParts; ++I) { LLT Ty = I < NumParts ? NarrowTy : LeftoverTy; - unsigned PartDstReg = MRI.createGenericVirtualRegister(Ty); + Register PartDstReg = MRI.createGenericVirtualRegister(Ty); NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI) .addDef(PartDstReg)); if (I < NumParts) @@ -2058,7 +2058,7 @@ MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI()); insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs); - SmallVector PartRegs, LeftoverRegs; + SmallVector PartRegs, LeftoverRegs; // Insert code to extract the incoming values in each predecessor block. for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { @@ -2104,14 +2104,14 @@ return UnableToLegalize; bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD; - unsigned ValReg = MI.getOperand(0).getReg(); - unsigned AddrReg = MI.getOperand(1).getReg(); + Register ValReg = MI.getOperand(0).getReg(); + Register AddrReg = MI.getOperand(1).getReg(); LLT ValTy = MRI.getType(ValReg); int NumParts = -1; int NumLeftover = -1; LLT LeftoverTy; - SmallVector NarrowRegs, NarrowLeftoverRegs; + SmallVector NarrowRegs, NarrowLeftoverRegs; if (IsLoad) { std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy); } else { @@ -2133,7 +2133,7 @@ // is a load, return the new registers in ValRegs. For a store, each elements // of ValRegs should be PartTy. Returns the next offset that needs to be // handled. - auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl &ValRegs, + auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl &ValRegs, unsigned Offset) -> unsigned { MachineFunction &MF = MIRBuilder.getMF(); unsigned PartSize = PartTy.getSizeInBits(); @@ -2141,7 +2141,7 @@ Offset += PartSize, ++Idx) { unsigned ByteSize = PartSize / 8; unsigned ByteOffset = Offset / 8; - unsigned NewAddrReg = 0; + Register NewAddrReg; MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset); @@ -2149,7 +2149,7 @@ MF.getMachineMemOperand(MMO, ByteOffset, ByteSize); if (IsLoad) { - unsigned Dst = MRI.createGenericVirtualRegister(PartTy); + Register Dst = MRI.createGenericVirtualRegister(PartTy); ValRegs.push_back(Dst); MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO); } else { @@ -2400,7 +2400,7 @@ auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits); auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero); - unsigned ResultRegs[2]; + Register ResultRegs[2]; switch (MI.getOpcode()) { case TargetOpcode::G_SHL: { // Short: ShAmt < NewBitSize @@ -2555,9 +2555,9 @@ } } -void LegalizerHelper::multiplyRegisters(SmallVectorImpl &DstRegs, - ArrayRef Src1Regs, - ArrayRef Src2Regs, +void LegalizerHelper::multiplyRegisters(SmallVectorImpl &DstRegs, + ArrayRef Src1Regs, + ArrayRef Src2Regs, LLT NarrowTy) { MachineIRBuilder &B = MIRBuilder; unsigned SrcParts = Src1Regs.size(); @@ -2569,7 +2569,7 @@ DstRegs[DstIdx] = FactorSum; unsigned CarrySumPrevDstIdx; - SmallVector Factors; + SmallVector Factors; for (DstIdx = 1; DstIdx < DstParts; DstIdx++) { // Collect low parts of muls for DstIdx. @@ -2620,9 +2620,9 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Src1 = MI.getOperand(1).getReg(); - unsigned Src2 = MI.getOperand(2).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register Src1 = MI.getOperand(1).getReg(); + Register Src2 = MI.getOperand(2).getReg(); LLT Ty = MRI.getType(DstReg); if (Ty.isVector()) @@ -2639,14 +2639,14 @@ bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH; unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1); - SmallVector Src1Parts, Src2Parts, DstTmpRegs; + SmallVector Src1Parts, Src2Parts, DstTmpRegs; extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts); extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts); DstTmpRegs.resize(DstTmpParts); multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy); // Take only high half of registers if this is high mul. - ArrayRef DstRegs( + ArrayRef DstRegs( IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts); MIRBuilder.buildMerge(DstReg, DstRegs); MI.eraseFromParent(); @@ -2668,7 +2668,7 @@ return UnableToLegalize; int NumParts = SizeOp1 / NarrowSize; - SmallVector SrcRegs, DstRegs; + SmallVector SrcRegs, DstRegs; SmallVector Indexes; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); @@ -2735,7 +2735,7 @@ int NumParts = SizeOp0 / NarrowSize; - SmallVector SrcRegs, DstRegs; + SmallVector SrcRegs, DstRegs; SmallVector Indexes; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); @@ -2801,9 +2801,9 @@ assert(MI.getNumOperands() == 3 && TypeIdx == 0); - SmallVector DstRegs, DstLeftoverRegs; - SmallVector Src0Regs, Src0LeftoverRegs; - SmallVector Src1Regs, Src1LeftoverRegs; + SmallVector DstRegs, DstLeftoverRegs; + SmallVector Src0Regs, Src0LeftoverRegs; + SmallVector Src1Regs, Src1LeftoverRegs; LLT LeftoverTy; if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy, Src0Regs, Src0LeftoverRegs)) @@ -2848,9 +2848,9 @@ unsigned DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); - SmallVector DstRegs, DstLeftoverRegs; - SmallVector Src1Regs, Src1LeftoverRegs; - SmallVector Src2Regs, Src2LeftoverRegs; + SmallVector DstRegs, DstLeftoverRegs; + SmallVector Src1Regs, Src1LeftoverRegs; + SmallVector Src2Regs, Src2LeftoverRegs; LLT LeftoverTy; if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy, Src1Regs, Src1LeftoverRegs)) Index: lib/CodeGen/GlobalISel/MachineIRBuilder.cpp =================================================================== --- lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -210,7 +210,7 @@ } Optional -MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0, +MachineIRBuilder::materializeGEP(Register &Res, Register Op0, const LLT &ValueTy, uint64_t Value) { assert(Res == 0 && "Res is a result argument"); assert(ValueTy.isScalar() && "invalid offset type"); @@ -506,7 +506,7 @@ return Extract; } -void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef Ops, +void MachineIRBuilder::buildSequence(Register Res, ArrayRef Ops, ArrayRef Indices) { #ifndef NDEBUG assert(Ops.size() == Indices.size() && "incompatible args"); @@ -535,11 +535,11 @@ return; } - unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy); + Register ResIn = getMRI()->createGenericVirtualRegister(ResTy); buildUndef(ResIn); for (unsigned i = 0; i < Ops.size(); ++i) { - unsigned ResOut = i + 1 == Ops.size() + Register ResOut = i + 1 == Ops.size() ? Res : getMRI()->createGenericVirtualRegister(ResTy); buildInsert(ResOut, ResIn, Ops[i], Indices[i]); @@ -552,7 +552,7 @@ } MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res, - ArrayRef Ops) { + ArrayRef Ops) { // Unfortunately to convert from ArrayRef to ArrayRef, // we need some temporary storage for the DstOp objects. Here we use a // sufficiently large SmallVector to not go through the heap. @@ -572,13 +572,13 @@ MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res, const SrcOp &Op) { unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits(); - SmallVector TmpVec; + SmallVector TmpVec; for (unsigned I = 0; I != NumReg; ++I) TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res)); return buildUnmerge(TmpVec, Op); } -MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef Res, +MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef Res, const SrcOp &Op) { // Unfortunately to convert from ArrayRef to ArrayRef, // we need some temporary storage for the DstOp objects. Here we use a @@ -588,7 +588,7 @@ } MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res, - ArrayRef Ops) { + ArrayRef Ops) { // Unfortunately to convert from ArrayRef to ArrayRef, // we need some temporary storage for the DstOp objects. Here we use a // sufficiently large SmallVector to not go through the heap. @@ -604,7 +604,7 @@ MachineInstrBuilder MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res, - ArrayRef Ops) { + ArrayRef Ops) { // Unfortunately to convert from ArrayRef to ArrayRef, // we need some temporary storage for the DstOp objects. Here we use a // sufficiently large SmallVector to not go through the heap. @@ -613,7 +613,7 @@ } MachineInstrBuilder -MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef Ops) { +MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef Ops) { // Unfortunately to convert from ArrayRef to ArrayRef, // we need some temporary storage for the DstOp objects. Here we use a // sufficiently large SmallVector to not go through the heap. @@ -621,8 +621,8 @@ return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec); } -MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src, - unsigned Op, unsigned Index) { +MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src, + Register Op, unsigned Index) { assert(Index + getMRI()->getType(Op).getSizeInBits() <= getMRI()->getType(Res).getSizeInBits() && "insertion past the end of a register"); @@ -640,7 +640,7 @@ } MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID, - ArrayRef ResultRegs, + ArrayRef ResultRegs, bool HasSideEffects) { auto MIB = buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS Index: lib/CodeGen/LiveDebugValues.cpp =================================================================== --- lib/CodeGen/LiveDebugValues.cpp +++ lib/CodeGen/LiveDebugValues.cpp @@ -70,12 +70,12 @@ // If @MI is a DBG_VALUE with debug value described by a defined // register, returns the number of this register. In the other case, returns 0. -static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) { +static Register isDbgValueDescribedByReg(const MachineInstr &MI) { assert(MI.isDebugValue() && "expected a DBG_VALUE"); assert(MI.getNumOperands() == 4 && "malformed DBG_VALUE"); // If location of variable is described using a register (directly // or indirectly), this register is always a first operand. - return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0; + return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register(); } namespace { Index: lib/CodeGen/MachineOperand.cpp =================================================================== --- lib/CodeGen/MachineOperand.cpp +++ lib/CodeGen/MachineOperand.cpp @@ -342,7 +342,7 @@ switch (MO.getType()) { case MachineOperand::MO_Register: // Register operands don't have target flags. - return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef()); + return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef()); case MachineOperand::MO_Immediate: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm()); case MachineOperand::MO_CImmediate: Index: lib/CodeGen/MachineRegisterInfo.cpp =================================================================== --- lib/CodeGen/MachineRegisterInfo.cpp +++ lib/CodeGen/MachineRegisterInfo.cpp @@ -154,7 +154,7 @@ /// createVirtualRegister - Create and return a new virtual register in the /// function with the specified register class. /// -unsigned +Register MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name) { assert(RegClass && "Cannot create register without RegClass!"); @@ -169,7 +169,7 @@ return Reg; } -unsigned MachineRegisterInfo::cloneVirtualRegister(unsigned VReg, +Register MachineRegisterInfo::cloneVirtualRegister(Register VReg, StringRef Name) { unsigned Reg = createIncompleteVirtualRegister(Name); VRegInfo[Reg].first = VRegInfo[VReg].first; @@ -184,7 +184,7 @@ VRegToType[VReg] = Ty; } -unsigned +Register MachineRegisterInfo::createGenericVirtualRegister(LLT Ty, StringRef Name) { // New virtual register number. unsigned Reg = createIncompleteVirtualRegister(Name); Index: lib/CodeGen/RegAllocGreedy.cpp =================================================================== --- lib/CodeGen/RegAllocGreedy.cpp +++ lib/CodeGen/RegAllocGreedy.cpp @@ -2874,14 +2874,14 @@ if (!Instr.isFullCopy()) continue; // Look for the other end of the copy. - unsigned OtherReg = Instr.getOperand(0).getReg(); + Register OtherReg = Instr.getOperand(0).getReg(); if (OtherReg == Reg) { OtherReg = Instr.getOperand(1).getReg(); if (OtherReg == Reg) continue; } // Get the current assignment. - unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg) + Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg) ? OtherReg : VRM->getPhys(OtherReg); // Push the collected information. Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7879,7 +7879,7 @@ for (; NumRegs; --NumRegs, ++I) { assert(I != RC->end() && "Ran out of registers to allocate!"); - auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC); + Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); Regs.push_back(R); } Index: lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -570,7 +570,7 @@ for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) { MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1]; bool hasFI = MI->getOperand(0).isFI(); - unsigned Reg = + Register Reg = hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) EntryMBB->insert(EntryMBB->begin(), MI); Index: lib/CodeGen/SwiftErrorValueTracking.cpp =================================================================== --- lib/CodeGen/SwiftErrorValueTracking.cpp +++ lib/CodeGen/SwiftErrorValueTracking.cpp @@ -42,7 +42,7 @@ } void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB, - const Value *Val, unsigned VReg) { + const Value *Val, Register VReg) { VRegDefMap[std::make_pair(MBB, Val)] = VReg; } @@ -161,7 +161,7 @@ auto UUseIt = VRegUpwardsUse.find(Key); auto VRegDefIt = VRegDefMap.find(Key); bool UpwardsUse = UUseIt != VRegUpwardsUse.end(); - unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0; + Register UUseVReg = UpwardsUse ? UUseIt->second : Register(); bool DownwardDef = VRegDefIt != VRegDefMap.end(); assert(!(UpwardsUse && !DownwardDef) && "We can't have an upwards use but no downwards def"); @@ -238,7 +238,7 @@ // destination virtual register number otherwise we generate a new one. auto &DL = MF->getDataLayout(); auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); - unsigned PHIVReg = + Register PHIVReg = UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC); MachineInstrBuilder PHI = BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, Index: lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- lib/CodeGen/TargetInstrInfo.cpp +++ lib/CodeGen/TargetInstrInfo.cpp @@ -163,9 +163,9 @@ assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"); - unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; - unsigned Reg1 = MI.getOperand(Idx1).getReg(); - unsigned Reg2 = MI.getOperand(Idx2).getReg(); + Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); + Register Reg1 = MI.getOperand(Idx1).getReg(); + Register Reg2 = MI.getOperand(Idx2).getReg(); unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); Index: lib/Target/AArch64/AArch64CallLowering.h =================================================================== --- lib/Target/AArch64/AArch64CallLowering.h +++ lib/Target/AArch64/AArch64CallLowering.h @@ -34,16 +34,16 @@ AArch64CallLowering(const AArch64TargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, - unsigned SwiftErrorVReg) const override; + ArrayRef VRegs, + Register SwiftErrorVReg) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef OrigArgs, - unsigned SwiftErrorVReg) const override; + Register SwiftErrorVReg) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, Index: lib/Target/AArch64/AArch64CallLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64CallLowering.cpp +++ lib/Target/AArch64/AArch64CallLowering.cpp @@ -232,8 +232,8 @@ bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, - unsigned SwiftErrorVReg) const { + ArrayRef VRegs, + Register SwiftErrorVReg) const { auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR); assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && "Return value without a vreg"); @@ -352,7 +352,7 @@ bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { MachineFunction &MF = MIRBuilder.getMF(); MachineBasicBlock &MBB = MIRBuilder.getMBB(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -427,7 +427,7 @@ const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef OrigArgs, - unsigned SwiftErrorVReg) const { + Register SwiftErrorVReg) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -495,7 +495,7 @@ SplitArgs.clear(); SmallVector RegOffsets; - SmallVector SplitRegs; + SmallVector SplitRegs; splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(), [&](unsigned Reg, uint64_t Offset) { RegOffsets.push_back(Offset); Index: lib/Target/AArch64/AArch64FalkorHWPFFix.cpp =================================================================== --- lib/Target/AArch64/AArch64FalkorHWPFFix.cpp +++ lib/Target/AArch64/AArch64FalkorHWPFFix.cpp @@ -212,8 +212,8 @@ struct LoadInfo { LoadInfo() = default; - unsigned DestReg = 0; - unsigned BaseReg = 0; + Register DestReg; + Register BaseReg; int BaseRegIdx = -1; const MachineOperand *OffsetOpnd = nullptr; bool IsPrePost = false; @@ -647,7 +647,7 @@ return None; LoadInfo LI; - LI.DestReg = DestRegIdx == -1 ? 0 : MI.getOperand(DestRegIdx).getReg(); + LI.DestReg = DestRegIdx == -1 ? Register() : MI.getOperand(DestRegIdx).getReg(); LI.BaseReg = BaseReg; LI.BaseRegIdx = BaseRegIdx; LI.OffsetOpnd = OffsetIdx == -1 ? nullptr : &MI.getOperand(OffsetIdx); Index: lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- lib/Target/AArch64/AArch64InstructionSelector.cpp +++ lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -1015,9 +1015,9 @@ MovZ->addOperand(MF, MachineOperand::CreateImm(0)); constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI); - auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset, - unsigned ForceDstReg) { - unsigned DstReg = ForceDstReg + auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset, + Register ForceDstReg) { + Register DstReg = ForceDstReg ? ForceDstReg : MRI.createVirtualRegister(&AArch64::GPR64RegClass); auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg); Index: lib/Target/AArch64/AArch64RedundantCopyElimination.cpp =================================================================== --- lib/Target/AArch64/AArch64RedundantCopyElimination.cpp +++ lib/Target/AArch64/AArch64RedundantCopyElimination.cpp @@ -379,8 +379,8 @@ bool IsCopy = MI->isCopy(); bool IsMoveImm = MI->isMoveImmediate(); if (IsCopy || IsMoveImm) { - MCPhysReg DefReg = MI->getOperand(0).getReg(); - MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0; + Register DefReg = MI->getOperand(0).getReg(); + Register SrcReg = IsCopy ? MI->getOperand(1).getReg() : Register(); int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0; if (!MRI->isReserved(DefReg) && ((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) || Index: lib/Target/AArch64/AArch64RegisterInfo.h =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.h +++ lib/Target/AArch64/AArch64RegisterInfo.h @@ -113,7 +113,7 @@ unsigned getBaseRegister() const; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override; Index: lib/Target/AArch64/AArch64RegisterInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.cpp +++ lib/Target/AArch64/AArch64RegisterInfo.cpp @@ -279,7 +279,7 @@ return false; } -unsigned +Register AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const { const AArch64FrameLowering *TFI = getFrameLowering(MF); return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP; Index: lib/Target/AMDGPU/AMDGPUCallLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUCallLowering.h +++ lib/Target/AMDGPU/AMDGPUCallLowering.h @@ -27,15 +27,15 @@ void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy, uint64_t Offset, unsigned Align, - unsigned DstReg) const; + Register DstReg) const; public: AMDGPUCallLowering(const AMDGPUTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg); static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg); }; Index: lib/Target/AMDGPU/AMDGPUCallLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -69,7 +69,7 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs) const { MachineFunction &MF = MIRBuilder.getMF(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -81,7 +81,7 @@ return true; } - unsigned VReg = VRegs[0]; + Register VReg = VRegs[0]; const Function &F = MF.getFunction(); auto &DL = F.getParent()->getDataLayout(); @@ -138,14 +138,14 @@ void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy, uint64_t Offset, unsigned Align, - unsigned DstReg) const { + Register DstReg) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); MachinePointerInfo PtrInfo(UndefValue::get(PtrTy)); unsigned TypeSize = DL.getTypeStoreSize(ParamTy); - unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset); + Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset); MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad | @@ -195,7 +195,7 @@ bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { // AMDGPU_GS and AMDGP_HS are not supported yet. if (F.getCallingConv() == CallingConv::AMDGPU_GS || F.getCallingConv() == CallingConv::AMDGPU_HS) Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -792,8 +792,8 @@ 4, MinAlign(64, StructOffset)); - unsigned LoadResult = MRI.createGenericVirtualRegister(S32); - unsigned LoadAddr = AMDGPU::NoRegister; + Register LoadResult = MRI.createGenericVirtualRegister(S32); + Register LoadAddr; MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset); MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO); @@ -807,8 +807,8 @@ MIRBuilder.setInstr(MI); - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(Dst); LLT SrcTy = MRI.getType(Src); Index: lib/Target/AMDGPU/AMDGPURegisterBankInfo.h =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterBankInfo.h +++ lib/Target/AMDGPU/AMDGPURegisterBankInfo.h @@ -13,6 +13,7 @@ #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H #define LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H +#include "llvm/CodeGen/Register.h" #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h" #define GET_REGBANK_DECLARATIONS @@ -54,7 +55,7 @@ /// Split 64-bit value \p Reg into two 32-bit halves and populate them into \p /// Regs. This appropriately sets the regbank of the new registers. void split64BitValueForMapping(MachineIRBuilder &B, - SmallVector &Regs, + SmallVector &Regs, LLT HalfTy, unsigned Reg) const; Index: lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -375,7 +375,7 @@ void AMDGPURegisterBankInfo::split64BitValueForMapping( MachineIRBuilder &B, - SmallVector &Regs, + SmallVector &Regs, LLT HalfTy, unsigned Reg) const { assert(HalfTy.getSizeInBits() == 32); @@ -396,7 +396,7 @@ } /// Replace the current type each register in \p Regs has with \p NewTy -static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef Regs, +static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef Regs, LLT NewTy) { for (unsigned Reg : Regs) { assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits()); @@ -445,7 +445,7 @@ // Use a set to avoid extra readfirstlanes in the case where multiple operands // are the same register. - SmallSet SGPROperandRegs; + SmallSet SGPROperandRegs; for (unsigned Op : OpIndices) { assert(MI.getOperand(Op).isUse()); unsigned Reg = MI.getOperand(Op).getReg(); @@ -459,9 +459,9 @@ return; MachineIRBuilder B(MI); - SmallVector ResultRegs; - SmallVector InitResultRegs; - SmallVector PhiRegs; + SmallVector ResultRegs; + SmallVector InitResultRegs; + SmallVector PhiRegs; for (MachineOperand &Def : MI.defs()) { LLT ResTy = MRI.getType(Def.getReg()); const RegisterBank *DefBank = getRegBank(Def.getReg(), MRI, *TRI); @@ -575,7 +575,7 @@ } } else { LLT S32 = LLT::scalar(32); - SmallVector ReadlanePieces; + SmallVector ReadlanePieces; // The compares can be done as 64-bit, but the extract needs to be done // in 32-bit pieces. @@ -732,10 +732,10 @@ LLT HalfTy = getHalfSizedType(DstTy); - SmallVector DefRegs(OpdMapper.getVRegs(0)); - SmallVector Src0Regs(OpdMapper.getVRegs(1)); - SmallVector Src1Regs(OpdMapper.getVRegs(2)); - SmallVector Src2Regs(OpdMapper.getVRegs(3)); + SmallVector DefRegs(OpdMapper.getVRegs(0)); + SmallVector Src0Regs(OpdMapper.getVRegs(1)); + SmallVector Src1Regs(OpdMapper.getVRegs(2)); + SmallVector Src2Regs(OpdMapper.getVRegs(3)); // All inputs are SGPRs, nothing special to do. if (DefRegs.empty()) { @@ -781,9 +781,9 @@ break; LLT HalfTy = getHalfSizedType(DstTy); - SmallVector DefRegs(OpdMapper.getVRegs(0)); - SmallVector Src0Regs(OpdMapper.getVRegs(1)); - SmallVector Src1Regs(OpdMapper.getVRegs(2)); + SmallVector DefRegs(OpdMapper.getVRegs(0)); + SmallVector Src0Regs(OpdMapper.getVRegs(1)); + SmallVector Src1Regs(OpdMapper.getVRegs(2)); // All inputs are SGPRs, nothing special to do. if (DefRegs.empty()) { Index: lib/Target/AMDGPU/AMDGPURegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPURegisterInfo.cpp +++ lib/Target/AMDGPU/AMDGPURegisterInfo.cpp @@ -82,7 +82,7 @@ } } -unsigned SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const SIFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); const SIMachineFunctionInfo *FuncInfo = MF.getInfo(); Index: lib/Target/AMDGPU/R600Packetizer.cpp =================================================================== --- lib/Target/AMDGPU/R600Packetizer.cpp +++ lib/Target/AMDGPU/R600Packetizer.cpp @@ -186,8 +186,8 @@ // Does MII and MIJ share the same pred_sel ? int OpI = TII->getOperandIdx(MII->getOpcode(), R600::OpName::pred_sel), OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600::OpName::pred_sel); - unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0, - PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0; + Register PredI = (OpI > -1)?MII->getOperand(OpI).getReg() : Register(), + PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg() : Register(); if (PredI != PredJ) return false; if (SUJ->isSucc(SUI)) { Index: lib/Target/AMDGPU/R600RegisterInfo.h =================================================================== --- lib/Target/AMDGPU/R600RegisterInfo.h +++ lib/Target/AMDGPU/R600RegisterInfo.h @@ -26,7 +26,7 @@ BitVector getReservedRegs(const MachineFunction &MF) const override; const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; /// get the HW encoding for a register's channel. unsigned getHWRegChan(unsigned reg) const; Index: lib/Target/AMDGPU/R600RegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/R600RegisterInfo.cpp +++ lib/Target/AMDGPU/R600RegisterInfo.cpp @@ -67,7 +67,7 @@ return &CalleeSavedReg; } -unsigned R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const { return R600::NoRegister; } Index: lib/Target/AMDGPU/SILowerControlFlow.cpp =================================================================== --- lib/Target/AMDGPU/SILowerControlFlow.cpp +++ lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -185,7 +185,7 @@ assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister && Cond.getSubReg() == AMDGPU::NoSubRegister); - unsigned SaveExecReg = SaveExec.getReg(); + Register SaveExecReg = SaveExec.getReg(); MachineOperand &ImpDefSCC = MI.getOperand(4); assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef()); @@ -197,7 +197,7 @@ // Add an implicit def of exec to discourage scheduling VALU after this which // will interfere with trying to form s_and_saveexec_b64 later. - unsigned CopyReg = SimpleIf ? SaveExecReg + Register CopyReg = SimpleIf ? SaveExecReg : MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg) @@ -266,7 +266,7 @@ MachineBasicBlock &MBB = *MI.getParent(); const DebugLoc &DL = MI.getDebugLoc(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister); bool ExecModified = MI.getOperand(3).getImm() != 0; @@ -275,14 +275,14 @@ // We are running before TwoAddressInstructions, and si_else's operands are // tied. In order to correctly tie the registers, split this into a copy of // the src like it does. - unsigned CopyReg = MRI->createVirtualRegister(BoolRC); + Register CopyReg = MRI->createVirtualRegister(BoolRC); MachineInstr *CopyExec = BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) .add(MI.getOperand(1)); // Saved EXEC // This must be inserted before phis and any spill code inserted before the // else. - unsigned SaveReg = ExecModified ? + Register SaveReg = ExecModified ? MRI->createVirtualRegister(BoolRC) : DstReg; MachineInstr *OrSaveExec = BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg) Index: lib/Target/AMDGPU/SIRegisterInfo.h =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.h +++ lib/Target/AMDGPU/SIRegisterInfo.h @@ -70,7 +70,7 @@ return 100; } - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; bool canRealignStack(const MachineFunction &MF) const override; bool requiresRegisterScavenging(const MachineFunction &Fn) const override; Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -721,7 +721,7 @@ if (SpillToSMEM && OnlyToVGPR) return false; - unsigned FrameReg = getFrameRegister(*MF); + Register FrameReg = getFrameRegister(*MF); assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() && SuperReg != MFI->getFrameOffsetReg() && @@ -914,7 +914,7 @@ unsigned EltSize = 4; unsigned ScalarLoadOp; - unsigned FrameReg = getFrameRegister(*MF); + Register FrameReg = getFrameRegister(*MF); const TargetRegisterClass *RC = getPhysRegClass(SuperReg); if (SpillToSMEM && isSGPRClass(RC)) { @@ -1063,7 +1063,7 @@ MachineOperand &FIOp = MI->getOperand(FIOperandNum); int Index = MI->getOperand(FIOperandNum).getIndex(); - unsigned FrameReg = getFrameRegister(*MF); + Register FrameReg = getFrameRegister(*MF); switch (MI->getOpcode()) { // SGPR register spill @@ -1154,7 +1154,7 @@ = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32; - unsigned ResultReg = IsCopy ? + Register ResultReg = IsCopy ? MI->getOperand(0).getReg() : MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); Index: lib/Target/ARM/ARMBaseRegisterInfo.h =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.h +++ lib/Target/ARM/ARMBaseRegisterInfo.h @@ -173,7 +173,7 @@ bool cannotEliminateFrame(const MachineFunction &MF) const; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; unsigned getBaseRegister() const { return BasePtr; } bool isLowRegister(unsigned Reg) const; Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -426,7 +426,7 @@ || needsStackRealignment(MF); } -unsigned +Register ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const ARMSubtarget &STI = MF.getSubtarget(); const ARMFrameLowering *TFI = getFrameLowering(MF); @@ -786,7 +786,7 @@ int PIdx = MI.findFirstPredOperandIdx(); ARMCC::CondCodes Pred = (PIdx == -1) ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm(); - unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg(); + Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg(); if (Offset == 0) // Must be addrmode4/6. MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false); Index: lib/Target/ARM/ARMCallLowering.h =================================================================== --- lib/Target/ARM/ARMCallLowering.h +++ lib/Target/ARM/ARMCallLowering.h @@ -33,10 +33,10 @@ ARMCallLowering(const ARMTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, @@ -44,7 +44,7 @@ private: bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs, + ArrayRef VRegs, MachineInstrBuilder &Ret) const; using SplitArgTy = std::function; Index: lib/Target/ARM/ARMCallLowering.cpp =================================================================== --- lib/Target/ARM/ARMCallLowering.cpp +++ lib/Target/ARM/ARMCallLowering.cpp @@ -151,7 +151,7 @@ assert(VA.isRegLoc() && "Value should be in reg"); assert(NextVA.isRegLoc() && "Value should be in reg"); - unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), + Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), MRI.createGenericVirtualRegister(LLT::scalar(32))}; MIRBuilder.buildUnmerge(NewRegs, Arg.Reg); @@ -232,7 +232,7 @@ /// Lower the return value for the already existing \p Ret. This assumes that /// \p MIRBuilder's insertion point is correct. bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder, - const Value *Val, ArrayRef VRegs, + const Value *Val, ArrayRef VRegs, MachineInstrBuilder &Ret) const { if (!Val) // Nothing to do here. @@ -257,9 +257,9 @@ ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)); setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); - SmallVector Regs; + SmallVector Regs; splitToValueTypes(CurArgInfo, SplitVTs, MF, - [&](unsigned Reg) { Regs.push_back(Reg); }); + [&](Register Reg) { Regs.push_back(Reg); }); if (Regs.size() > 1) MIRBuilder.buildUnmerge(Regs, VRegs[i]); } @@ -273,7 +273,7 @@ bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs) const { assert(!Val == VRegs.empty() && "Return value without a vreg"); auto const &ST = MIRBuilder.getMF().getSubtarget(); @@ -386,7 +386,7 @@ assert(VA.isRegLoc() && "Value should be in reg"); assert(NextVA.isRegLoc() && "Value should be in reg"); - unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), + Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)), MRI.createGenericVirtualRegister(LLT::scalar(32))}; assignValueToReg(NewRegs[0], VA.getLocReg(), VA); @@ -421,7 +421,7 @@ bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { auto &TLI = *getTLI(); auto Subtarget = TLI.getSubtarget(); @@ -453,7 +453,7 @@ AssignFn); SmallVector ArgInfos; - SmallVector SplitRegs; + SmallVector SplitRegs; unsigned Idx = 0; for (auto &Arg : F.args()) { ArgInfo AInfo(VRegs[Idx], Arg.getType()); @@ -462,7 +462,7 @@ SplitRegs.clear(); splitToValueTypes(AInfo, ArgInfos, MF, - [&](unsigned Reg) { SplitRegs.push_back(Reg); }); + [&](Register Reg) { SplitRegs.push_back(Reg); }); if (!SplitRegs.empty()) MIRBuilder.buildMerge(VRegs[Idx], SplitRegs); @@ -568,7 +568,7 @@ if (Arg.Flags.isByVal()) return false; - SmallVector Regs; + SmallVector Regs; splitToValueTypes(Arg, ArgInfos, MF, [&](unsigned Reg) { Regs.push_back(Reg); }); @@ -589,9 +589,9 @@ return false; ArgInfos.clear(); - SmallVector SplitRegs; + SmallVector SplitRegs; splitToValueTypes(OrigRet, ArgInfos, MF, - [&](unsigned Reg) { SplitRegs.push_back(Reg); }); + [&](Register Reg) { SplitRegs.push_back(Reg); }); auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg); CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn); Index: lib/Target/BPF/BPFRegisterInfo.h =================================================================== --- lib/Target/BPF/BPFRegisterInfo.h +++ lib/Target/BPF/BPFRegisterInfo.h @@ -32,7 +32,7 @@ unsigned FIOperandNum, RegScavenger *RS = nullptr) const override; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; }; } Index: lib/Target/BPF/BPFRegisterInfo.cpp =================================================================== --- lib/Target/BPF/BPFRegisterInfo.cpp +++ lib/Target/BPF/BPFRegisterInfo.cpp @@ -121,6 +121,6 @@ } } -unsigned BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return BPF::R10; } Index: lib/Target/Hexagon/HexagonCopyToCombine.cpp =================================================================== --- lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -254,8 +254,8 @@ MI.isMetaInstruction(); } -static unsigned UseReg(const MachineOperand& MO) { - return MO.isReg() ? MO.getReg() : 0; +static Register UseReg(const MachineOperand& MO) { + return MO.isReg() ? MO.getReg() : Register(); } /// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such Index: lib/Target/Hexagon/HexagonGenMux.cpp =================================================================== --- lib/Target/Hexagon/HexagonGenMux.cpp +++ lib/Target/Hexagon/HexagonGenMux.cpp @@ -303,8 +303,8 @@ std::advance(It2, MaxX); MachineInstr &Def1 = *It1, &Def2 = *It2; MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2); - unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0; - unsigned SR2 = Src2->isReg() ? Src2->getReg() : 0; + Register SR1 = Src1->isReg() ? Src1->getReg() : Register(); + Register SR2 = Src2->isReg() ? Src2->getReg() : Register(); bool Failure = false, CanUp = true, CanDown = true; for (unsigned X = MinX+1; X < MaxX; X++) { const DefUseInfo &DU = DUM.lookup(X); Index: lib/Target/Hexagon/HexagonGenPredicate.cpp =================================================================== --- lib/Target/Hexagon/HexagonGenPredicate.cpp +++ lib/Target/Hexagon/HexagonGenPredicate.cpp @@ -45,17 +45,19 @@ namespace { - struct Register { + // FIXME: Use TargetInstrInfo::RegSubRegPair + struct RegisterSubReg { unsigned R, S; - Register(unsigned r = 0, unsigned s = 0) : R(r), S(s) {} - Register(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {} + RegisterSubReg(unsigned r = 0, unsigned s = 0) : R(r), S(s) {} + RegisterSubReg(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {} + RegisterSubReg(const Register &Reg) : R(Reg), S(0) {} - bool operator== (const Register &Reg) const { + bool operator== (const RegisterSubReg &Reg) const { return R == Reg.R && S == Reg.S; } - bool operator< (const Register &Reg) const { + bool operator< (const RegisterSubReg &Reg) const { return R < Reg.R || (R == Reg.R && S < Reg.S); } }; @@ -63,10 +65,10 @@ struct PrintRegister { friend raw_ostream &operator<< (raw_ostream &OS, const PrintRegister &PR); - PrintRegister(Register R, const TargetRegisterInfo &I) : Reg(R), TRI(I) {} + PrintRegister(RegisterSubReg R, const TargetRegisterInfo &I) : Reg(R), TRI(I) {} private: - Register Reg; + RegisterSubReg Reg; const TargetRegisterInfo &TRI; }; @@ -98,8 +100,8 @@ private: using VectOfInst = SetVector; - using SetOfReg = std::set; - using RegToRegMap = std::map; + using SetOfReg = std::set; + using RegToRegMap = std::map; const HexagonInstrInfo *TII = nullptr; const HexagonRegisterInfo *TRI = nullptr; @@ -110,12 +112,12 @@ bool isPredReg(unsigned R); void collectPredicateGPR(MachineFunction &MF); - void processPredicateGPR(const Register &Reg); + void processPredicateGPR(const RegisterSubReg &Reg); unsigned getPredForm(unsigned Opc); bool isConvertibleToPredForm(const MachineInstr *MI); bool isScalarCmp(unsigned Opc); - bool isScalarPred(Register PredReg); - Register getPredRegFor(const Register &Reg); + bool isScalarPred(RegisterSubReg PredReg); + RegisterSubReg getPredRegFor(const RegisterSubReg &Reg); bool convertToPredForm(MachineInstr *MI); bool eliminatePredCopies(MachineFunction &MF); }; @@ -210,7 +212,7 @@ case Hexagon::C2_tfrpr: case TargetOpcode::COPY: if (isPredReg(MI->getOperand(1).getReg())) { - Register RD = MI->getOperand(0); + RegisterSubReg RD = MI->getOperand(0); if (TargetRegisterInfo::isVirtualRegister(RD.R)) PredGPRs.insert(RD); } @@ -220,7 +222,7 @@ } } -void HexagonGenPredicate::processPredicateGPR(const Register &Reg) { +void HexagonGenPredicate::processPredicateGPR(const RegisterSubReg &Reg) { LLVM_DEBUG(dbgs() << __func__ << ": " << printReg(Reg.R, TRI, Reg.S) << "\n"); using use_iterator = MachineRegisterInfo::use_iterator; @@ -239,7 +241,7 @@ } } -Register HexagonGenPredicate::getPredRegFor(const Register &Reg) { +RegisterSubReg HexagonGenPredicate::getPredRegFor(const RegisterSubReg &Reg) { // Create a predicate register for a given Reg. The newly created register // will have its value copied from Reg, so that it can be later used as // an operand in other instructions. @@ -254,7 +256,7 @@ unsigned Opc = DefI->getOpcode(); if (Opc == Hexagon::C2_tfrpr || Opc == TargetOpcode::COPY) { assert(DefI->getOperand(0).isDef() && DefI->getOperand(1).isUse()); - Register PR = DefI->getOperand(1); + RegisterSubReg PR = DefI->getOperand(1); G2P.insert(std::make_pair(Reg, PR)); LLVM_DEBUG(dbgs() << " -> " << PrintRegister(PR, *TRI) << '\n'); return PR; @@ -271,10 +273,10 @@ MachineBasicBlock::iterator DefIt = DefI; BuildMI(B, std::next(DefIt), DL, TII->get(TargetOpcode::COPY), NewPR) .addReg(Reg.R, 0, Reg.S); - G2P.insert(std::make_pair(Reg, Register(NewPR))); - LLVM_DEBUG(dbgs() << " -> !" << PrintRegister(Register(NewPR), *TRI) + G2P.insert(std::make_pair(Reg, RegisterSubReg(NewPR))); + LLVM_DEBUG(dbgs() << " -> !" << PrintRegister(RegisterSubReg(NewPR), *TRI) << '\n'); - return Register(NewPR); + return RegisterSubReg(NewPR); } llvm_unreachable("Invalid argument"); @@ -316,12 +318,12 @@ return false; } -bool HexagonGenPredicate::isScalarPred(Register PredReg) { - std::queue WorkQ; +bool HexagonGenPredicate::isScalarPred(RegisterSubReg PredReg) { + std::queue WorkQ; WorkQ.push(PredReg); while (!WorkQ.empty()) { - Register PR = WorkQ.front(); + RegisterSubReg PR = WorkQ.front(); WorkQ.pop(); const MachineInstr *DefI = MRI->getVRegDef(PR.R); if (!DefI) @@ -350,7 +352,7 @@ // Add operands to the queue. for (const MachineOperand &MO : DefI->operands()) if (MO.isReg() && MO.isUse()) - WorkQ.push(Register(MO.getReg())); + WorkQ.push(RegisterSubReg(MO.getReg())); break; // All non-vector compares are ok, everything else is bad. @@ -372,7 +374,7 @@ MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; - Register Reg(MO); + RegisterSubReg Reg(MO); if (Reg.S && Reg.S != Hexagon::isub_lo) return false; if (!PredGPRs.count(Reg)) @@ -399,7 +401,7 @@ // If it's a scalar predicate register, then all bits in it are // the same. Otherwise, to determine whether all bits are 0 or not // we would need to use any8. - Register PR = getPredRegFor(MI->getOperand(1)); + RegisterSubReg PR = getPredRegFor(MI->getOperand(1)); if (!isScalarPred(PR)) return false; // This will skip the immediate argument when creating the predicate @@ -410,19 +412,19 @@ // Some sanity: check that def is in operand #0. MachineOperand &Op0 = MI->getOperand(0); assert(Op0.isDef()); - Register OutR(Op0); + RegisterSubReg OutR(Op0); // Don't use getPredRegFor, since it will create an association between // the argument and a created predicate register (i.e. it will insert a // copy if a new predicate register is created). const TargetRegisterClass *PredRC = &Hexagon::PredRegsRegClass; - Register NewPR = MRI->createVirtualRegister(PredRC); + RegisterSubReg NewPR = MRI->createVirtualRegister(PredRC); MachineInstrBuilder MIB = BuildMI(B, MI, DL, TII->get(NewOpc), NewPR.R); // Add predicate counterparts of the GPRs. for (unsigned i = 1; i < NumOps; ++i) { - Register GPR = MI->getOperand(i); - Register Pred = getPredRegFor(GPR); + RegisterSubReg GPR = MI->getOperand(i); + RegisterSubReg Pred = getPredRegFor(GPR); MIB.addReg(Pred.R, 0, Pred.S); } LLVM_DEBUG(dbgs() << "generated: " << *MIB); @@ -440,7 +442,7 @@ // then the output will be a predicate register. Do not visit the // users of it. if (!isPredReg(NewOutR)) { - Register R(NewOutR); + RegisterSubReg R(NewOutR); PredGPRs.insert(R); processPredicateGPR(R); } @@ -467,8 +469,8 @@ for (MachineInstr &MI : MBB) { if (MI.getOpcode() != TargetOpcode::COPY) continue; - Register DR = MI.getOperand(0); - Register SR = MI.getOperand(1); + RegisterSubReg DR = MI.getOperand(0); + RegisterSubReg SR = MI.getOperand(1); if (!TargetRegisterInfo::isVirtualRegister(DR.R)) continue; if (!TargetRegisterInfo::isVirtualRegister(SR.R)) Index: lib/Target/Hexagon/HexagonRegisterInfo.h =================================================================== --- lib/Target/Hexagon/HexagonRegisterInfo.h +++ lib/Target/Hexagon/HexagonRegisterInfo.h @@ -66,7 +66,7 @@ // Debug information queries. unsigned getRARegister() const; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; unsigned getFrameRegister() const; unsigned getStackRegister() const; Index: lib/Target/Hexagon/HexagonRegisterInfo.cpp =================================================================== --- lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -286,7 +286,7 @@ } -unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction +Register HexagonRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const HexagonFrameLowering *TFI = getFrameLowering(MF); if (TFI->hasFP(MF)) Index: lib/Target/Lanai/LanaiRegisterInfo.h =================================================================== --- lib/Target/Lanai/LanaiRegisterInfo.h +++ lib/Target/Lanai/LanaiRegisterInfo.h @@ -42,8 +42,8 @@ // Debug information queries. unsigned getRARegister() const; - unsigned getFrameRegister(const MachineFunction &MF) const override; - unsigned getBaseRegister() const; + Register getFrameRegister(const MachineFunction &MF) const override; + Register getBaseRegister() const; bool hasBasePointer(const MachineFunction &MF) const; int getDwarfRegNum(unsigned RegNum, bool IsEH) const; Index: lib/Target/Lanai/LanaiRegisterInfo.cpp =================================================================== --- lib/Target/Lanai/LanaiRegisterInfo.cpp +++ lib/Target/Lanai/LanaiRegisterInfo.cpp @@ -258,12 +258,12 @@ unsigned LanaiRegisterInfo::getRARegister() const { return Lanai::RCA; } -unsigned +Register LanaiRegisterInfo::getFrameRegister(const MachineFunction & /*MF*/) const { return Lanai::FP; } -unsigned LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; } +Register LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; } const uint32_t * LanaiRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/, Index: lib/Target/MSP430/MSP430RegisterInfo.h =================================================================== --- lib/Target/MSP430/MSP430RegisterInfo.h +++ lib/Target/MSP430/MSP430RegisterInfo.h @@ -37,7 +37,7 @@ RegScavenger *RS = nullptr) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; }; } // end namespace llvm Index: lib/Target/MSP430/MSP430RegisterInfo.cpp =================================================================== --- lib/Target/MSP430/MSP430RegisterInfo.cpp +++ lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -154,7 +154,7 @@ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); } -unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const { const MSP430FrameLowering *TFI = getFrameLowering(MF); return TFI->hasFP(MF) ? MSP430::FP : MSP430::SP; } Index: lib/Target/Mips/MipsCallLowering.h =================================================================== --- lib/Target/Mips/MipsCallLowering.h +++ lib/Target/Mips/MipsCallLowering.h @@ -34,39 +34,39 @@ ArrayRef Args); protected: - bool assignVRegs(ArrayRef VRegs, ArrayRef ArgLocs, + bool assignVRegs(ArrayRef VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, const EVT &VT); - void setLeastSignificantFirst(SmallVectorImpl &VRegs); + void setLeastSignificantFirst(SmallVectorImpl &VRegs); MachineIRBuilder &MIRBuilder; MachineRegisterInfo &MRI; private: - bool assign(unsigned VReg, const CCValAssign &VA, const EVT &VT); + bool assign(Register VReg, const CCValAssign &VA, const EVT &VT); virtual unsigned getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) = 0; - virtual void assignValueToReg(unsigned ValVReg, const CCValAssign &VA, + virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) = 0; - virtual void assignValueToAddress(unsigned ValVReg, + virtual void assignValueToAddress(Register ValVReg, const CCValAssign &VA) = 0; - virtual bool handleSplit(SmallVectorImpl &VRegs, + virtual bool handleSplit(SmallVectorImpl &VRegs, ArrayRef ArgLocs, - unsigned ArgLocsStartIndex, unsigned ArgsReg, + unsigned ArgLocsStartIndex, Register ArgsReg, const EVT &VT) = 0; }; MipsCallLowering(const MipsTargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, Index: lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- lib/Target/Mips/MipsCallLowering.cpp +++ lib/Target/Mips/MipsCallLowering.cpp @@ -24,7 +24,7 @@ MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) : CallLowering(&TLI) {} -bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA, +bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA, const EVT &VT) { if (VA.isRegLoc()) { assignValueToReg(VReg, VA, VT); @@ -36,7 +36,7 @@ return true; } -bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef VRegs, +bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, const EVT &VT) { @@ -47,14 +47,14 @@ } void MipsCallLowering::MipsHandler::setLeastSignificantFirst( - SmallVectorImpl &VRegs) { + SmallVectorImpl &VRegs) { if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) std::reverse(VRegs.begin(), VRegs.end()); } bool MipsCallLowering::MipsHandler::handle( ArrayRef ArgLocs, ArrayRef Args) { - SmallVector VRegs; + SmallVector VRegs; unsigned SplitLength; const Function &F = MIRBuilder.getMF().getFunction(); const DataLayout &DL = F.getParent()->getDataLayout(); @@ -90,17 +90,17 @@ : MipsHandler(MIRBuilder, MRI) {} private: - void assignValueToReg(unsigned ValVReg, const CCValAssign &VA, + void assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) override; unsigned getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) override; - void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; + void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; - bool handleSplit(SmallVectorImpl &VRegs, + bool handleSplit(SmallVectorImpl &VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, - unsigned ArgsReg, const EVT &VT) override; + Register ArgsReg, const EVT &VT) override; virtual void markPhysRegUsed(unsigned PhysReg) { MIRBuilder.getMBB().addLiveIn(PhysReg); @@ -129,7 +129,7 @@ } // end anonymous namespace -void IncomingValueHandler::assignValueToReg(unsigned ValVReg, +void IncomingValueHandler::assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) { const MipsSubtarget &STI = @@ -194,22 +194,22 @@ return AddrReg; } -void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, +void IncomingValueHandler::assignValueToAddress(Register ValVReg, const CCValAssign &VA) { if (VA.getLocInfo() == CCValAssign::SExt || VA.getLocInfo() == CCValAssign::ZExt || VA.getLocInfo() == CCValAssign::AExt) { - unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); + Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); buildLoad(LoadReg, VA); MIRBuilder.buildTrunc(ValVReg, LoadReg); } else buildLoad(ValVReg, VA); } -bool IncomingValueHandler::handleSplit(SmallVectorImpl &VRegs, +bool IncomingValueHandler::handleSplit(SmallVectorImpl &VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, - unsigned ArgsReg, const EVT &VT) { + Register ArgsReg, const EVT &VT) { if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) return false; setLeastSignificantFirst(VRegs); @@ -225,28 +225,28 @@ : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} private: - void assignValueToReg(unsigned ValVReg, const CCValAssign &VA, + void assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) override; unsigned getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) override; - void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; + void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; - bool handleSplit(SmallVectorImpl &VRegs, + bool handleSplit(SmallVectorImpl &VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, - unsigned ArgsReg, const EVT &VT) override; + Register ArgsReg, const EVT &VT) override; - unsigned extendRegister(unsigned ValReg, const CCValAssign &VA); + unsigned extendRegister(Register ValReg, const CCValAssign &VA); MachineInstrBuilder &MIB; }; } // end anonymous namespace -void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, +void OutgoingValueHandler::assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) { - unsigned PhysReg = VA.getLocReg(); + Register PhysReg = VA.getLocReg(); const MipsSubtarget &STI = static_cast(MIRBuilder.getMF().getSubtarget()); @@ -287,14 +287,14 @@ LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); - unsigned SPReg = MRI.createGenericVirtualRegister(p0); + Register SPReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildCopy(SPReg, Mips::SP); - unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); + Register OffsetReg = MRI.createGenericVirtualRegister(s32); unsigned Offset = VA.getLocMemOffset(); MIRBuilder.buildConstant(OffsetReg, Offset); - unsigned AddrReg = MRI.createGenericVirtualRegister(p0); + Register AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MachinePointerInfo MPO = @@ -306,30 +306,30 @@ return AddrReg; } -void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, +void OutgoingValueHandler::assignValueToAddress(Register ValVReg, const CCValAssign &VA) { MachineMemOperand *MMO; - unsigned Addr = getStackAddress(VA, MMO); + Register Addr = getStackAddress(VA, MMO); unsigned ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildStore(ExtReg, Addr, *MMO); } -unsigned OutgoingValueHandler::extendRegister(unsigned ValReg, +unsigned OutgoingValueHandler::extendRegister(Register ValReg, const CCValAssign &VA) { LLT LocTy{VA.getLocVT()}; switch (VA.getLocInfo()) { case CCValAssign::SExt: { - unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + Register ExtReg = MRI.createGenericVirtualRegister(LocTy); MIRBuilder.buildSExt(ExtReg, ValReg); return ExtReg; } case CCValAssign::ZExt: { - unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + Register ExtReg = MRI.createGenericVirtualRegister(LocTy); MIRBuilder.buildZExt(ExtReg, ValReg); return ExtReg; } case CCValAssign::AExt: { - unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + Register ExtReg = MRI.createGenericVirtualRegister(LocTy); MIRBuilder.buildAnyExt(ExtReg, ValReg); return ExtReg; } @@ -342,10 +342,10 @@ llvm_unreachable("unable to extend register"); } -bool OutgoingValueHandler::handleSplit(SmallVectorImpl &VRegs, +bool OutgoingValueHandler::handleSplit(SmallVectorImpl &VRegs, ArrayRef ArgLocs, unsigned ArgLocsStartIndex, - unsigned ArgsReg, const EVT &VT) { + Register ArgsReg, const EVT &VT) { MIRBuilder.buildUnmerge(VRegs, ArgsReg); setLeastSignificantFirst(VRegs); if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT)) @@ -396,7 +396,7 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs) const { MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); @@ -444,7 +444,7 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { // Quick exit if there aren't any args. if (F.arg_empty()) Index: lib/Target/Mips/MipsRegisterInfo.h =================================================================== --- lib/Target/Mips/MipsRegisterInfo.h +++ lib/Target/Mips/MipsRegisterInfo.h @@ -69,7 +69,7 @@ bool canRealignStack(const MachineFunction &MF) const override; /// Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; /// Return GPR register class. virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0; Index: lib/Target/Mips/MipsRegisterInfo.cpp =================================================================== --- lib/Target/Mips/MipsRegisterInfo.cpp +++ lib/Target/Mips/MipsRegisterInfo.cpp @@ -277,7 +277,7 @@ eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset); } -unsigned MipsRegisterInfo:: +Register MipsRegisterInfo:: getFrameRegister(const MachineFunction &MF) const { const MipsSubtarget &Subtarget = MF.getSubtarget(); const TargetFrameLowering *TFI = Subtarget.getFrameLowering(); Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -3763,8 +3763,8 @@ const TargetInstrInfo *TII = Subtarget.getInstrInfo(); DebugLoc DL = MI.getDebugLoc(); - unsigned Fd = MI.getOperand(0).getReg(); - unsigned Ws = MI.getOperand(1).getReg(); + Register Fd = MI.getOperand(0).getReg(); + Register Ws = MI.getOperand(1).getReg(); MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); const TargetRegisterClass *GPRRC = @@ -3772,10 +3772,10 @@ unsigned MTC1Opc = IsFGR64onMips64 ? Mips::DMTC1 : (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1); - unsigned COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W; + Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W; - unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); - unsigned WPHI = Wtemp; + Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass); + Register WPHI = Wtemp; BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_W), Wtemp).addReg(Ws); if (IsFGR64) { @@ -3784,15 +3784,15 @@ } // Perform the safety regclass copy mentioned above. - unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC); - unsigned FPRPHI = IsFGR64onMips32 + Register Rtemp = RegInfo.createVirtualRegister(GPRRC); + Register FPRPHI = IsFGR64onMips32 ? RegInfo.createVirtualRegister(&Mips::FGR64RegClass) : Fd; BuildMI(*BB, MI, DL, TII->get(COPYOpc), Rtemp).addReg(WPHI).addImm(0); BuildMI(*BB, MI, DL, TII->get(MTC1Opc), FPRPHI).addReg(Rtemp); if (IsFGR64onMips32) { - unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC); + Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC); BuildMI(*BB, MI, DL, TII->get(Mips::COPY_S_W), Rtemp2) .addReg(WPHI) .addImm(1); Index: lib/Target/NVPTX/NVPTXRegisterInfo.h =================================================================== --- lib/Target/NVPTX/NVPTXRegisterInfo.h +++ lib/Target/NVPTX/NVPTXRegisterInfo.h @@ -42,7 +42,7 @@ unsigned FIOperandNum, RegScavenger *RS = nullptr) const override; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; ManagedStringPool *getStrPool() const { return const_cast(&ManagedStrPool); Index: lib/Target/NVPTX/NVPTXRegisterInfo.cpp =================================================================== --- lib/Target/NVPTX/NVPTXRegisterInfo.cpp +++ lib/Target/NVPTX/NVPTXRegisterInfo.cpp @@ -126,6 +126,6 @@ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); } -unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return NVPTX::VRFrame; } Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -2448,7 +2448,7 @@ /// Returns true if we should use a direct load into vector instruction /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence. static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) { - + // If there are any other uses other than scalar to vector, then we should // keep it as a scalar load -> direct move pattern to prevent multiple // loads. @@ -5109,7 +5109,7 @@ // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is // no way to mark dependencies as implicit here. // We will add the R2/X2 dependency in EmitInstrWithCustomInserter. - if (!isPatchPoint) + if (!isPatchPoint) Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2 : PPC::R2, PtrVT)); } @@ -7087,7 +7087,7 @@ // undefined): // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u> - // + // // The same operation in little-endian ordering will be: // to // @@ -9839,7 +9839,7 @@ BifID = Intrinsic::ppc_altivec_vmaxsh; else if (VT == MVT::v16i8) BifID = Intrinsic::ppc_altivec_vmaxsb; - + return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT); } @@ -10119,10 +10119,10 @@ MachineFunction *F = BB->getParent(); MachineFunction::iterator It = ++BB->getIterator(); - unsigned dest = MI.getOperand(0).getReg(); - unsigned ptrA = MI.getOperand(1).getReg(); - unsigned ptrB = MI.getOperand(2).getReg(); - unsigned incr = MI.getOperand(3).getReg(); + Register dest = MI.getOperand(0).getReg(); + Register ptrA = MI.getOperand(1).getReg(); + Register ptrB = MI.getOperand(2).getReg(); + Register incr = MI.getOperand(3).getReg(); DebugLoc dl = MI.getDebugLoc(); MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB); @@ -10138,7 +10138,7 @@ exitMBB->transferSuccessorsAndUpdatePHIs(BB); MachineRegisterInfo &RegInfo = F->getRegInfo(); - unsigned TmpReg = (!BinOpcode) ? incr : + Register TmpReg = (!BinOpcode) ? incr : RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass); @@ -10246,20 +10246,20 @@ is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned PtrReg = RegInfo.createVirtualRegister(RC); - unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC); - unsigned ShiftReg = + Register PtrReg = RegInfo.createVirtualRegister(RC); + Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); + Register ShiftReg = isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); - unsigned Incr2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned MaskReg = RegInfo.createVirtualRegister(GPRC); - unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Tmp3Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC); - unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC); - unsigned Ptr1Reg; - unsigned TmpReg = + Register Incr2Reg = RegInfo.createVirtualRegister(GPRC); + Register MaskReg = RegInfo.createVirtualRegister(GPRC); + Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); + Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); + Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); + Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC); + Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); + Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); + Register Ptr1Reg; + Register TmpReg = (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC); // thisMBB: @@ -11061,23 +11061,23 @@ is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass; const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; - unsigned PtrReg = RegInfo.createVirtualRegister(RC); - unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC); - unsigned ShiftReg = + Register PtrReg = RegInfo.createVirtualRegister(RC); + Register Shift1Reg = RegInfo.createVirtualRegister(GPRC); + Register ShiftReg = isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC); - unsigned NewVal2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned NewVal3Reg = RegInfo.createVirtualRegister(GPRC); - unsigned OldVal2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned OldVal3Reg = RegInfo.createVirtualRegister(GPRC); - unsigned MaskReg = RegInfo.createVirtualRegister(GPRC); - unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC); - unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC); - unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC); - unsigned Ptr1Reg; - unsigned TmpReg = RegInfo.createVirtualRegister(GPRC); - unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; + Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC); + Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC); + Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC); + Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC); + Register MaskReg = RegInfo.createVirtualRegister(GPRC); + Register Mask2Reg = RegInfo.createVirtualRegister(GPRC); + Register Mask3Reg = RegInfo.createVirtualRegister(GPRC); + Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC); + Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC); + Register TmpDestReg = RegInfo.createVirtualRegister(GPRC); + Register Ptr1Reg; + Register TmpReg = RegInfo.createVirtualRegister(GPRC); + Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO; // thisMBB: // ... // fallthrough --> loopMBB @@ -11273,7 +11273,7 @@ // Save FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); - // The floating point rounding mode is in the bits 62:63 of FPCSR, and has + // The floating point rounding mode is in the bits 62:63 of FPCSR, and has // the following settings: // 00 Round to nearest // 01 Round to 0 @@ -11293,7 +11293,7 @@ // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg. - // If the target doesn't have DirectMove, we should use stack to do the + // If the target doesn't have DirectMove, we should use stack to do the // conversion, because the target doesn't have the instructions like mtvsrd // or mfvsrd to do this conversion directly. auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) { @@ -11339,8 +11339,8 @@ MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx), MFI.getObjectAlignment(FrameIdx)); - // Load from the stack where SrcReg is stored, and save to DestReg, - // so we have done the RegClass conversion from RegClass::SrcReg to + // Load from the stack where SrcReg is stored, and save to DestReg, + // so we have done the RegClass conversion from RegClass::SrcReg to // RegClass::DestReg. BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg) .addImm(0) @@ -11350,14 +11350,14 @@ }; unsigned OldFPSCRReg = MI.getOperand(0).getReg(); - + // Save FPSCR value. BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg); // When the operand is gprc register, use two least significant bits of the - // register and mtfsf instruction to set the bits 62:63 of FPSCR. - // - // copy OldFPSCRTmpReg, OldFPSCRReg + // register and mtfsf instruction to set the bits 62:63 of FPSCR. + // + // copy OldFPSCRTmpReg, OldFPSCRReg // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1) // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62 // copy NewFPSCRReg, NewFPSCRTmpReg @@ -11367,7 +11367,7 @@ unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg); - + unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass); @@ -13791,9 +13791,9 @@ } case ISD::BUILD_VECTOR: return DAGCombineBuildVector(N, DCI); - case ISD::ABS: + case ISD::ABS: return combineABS(N, DCI); - case ISD::VSELECT: + case ISD::VSELECT: return combineVSelect(N, DCI); } @@ -13891,10 +13891,10 @@ if (!DisableInnermostLoopAlign32) { // If the nested loop is an innermost loop, prefer to a 32-byte alignment, - // so that we can decrease cache misses and branch-prediction misses. + // so that we can decrease cache misses and branch-prediction misses. // Actual alignment of the loop will depend on the hotness check and other // logic in alignBlocks. - if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) + if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty()) return 5; } @@ -14310,7 +14310,7 @@ if (CModel == CodeModel::Small || CModel == CodeModel::Large) return true; - // JumpTable and BlockAddress are accessed as got-indirect. + // JumpTable and BlockAddress are accessed as got-indirect. if (isa(GA) || isa(GA)) return true; Index: lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCInstrInfo.cpp +++ lib/Target/PowerPC/PPCInstrInfo.cpp @@ -391,9 +391,9 @@ // Swap op1/op2 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) && "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo."); - unsigned Reg0 = MI.getOperand(0).getReg(); - unsigned Reg1 = MI.getOperand(1).getReg(); - unsigned Reg2 = MI.getOperand(2).getReg(); + Register Reg0 = MI.getOperand(0).getReg(); + Register Reg1 = MI.getOperand(1).getReg(); + Register Reg2 = MI.getOperand(2).getReg(); unsigned SubReg1 = MI.getOperand(1).getSubReg(); unsigned SubReg2 = MI.getOperand(2).getSubReg(); bool Reg1IsKill = MI.getOperand(1).isKill(); @@ -421,7 +421,7 @@ if (NewMI) { // Create a new instruction. - unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg(); + Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg(); bool Reg0IsDead = MI.getOperand(0).isDead(); return BuildMI(MF, MI.getDebugLoc(), MI.getDesc()) .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead)) @@ -2400,7 +2400,7 @@ return &*It; } break; - } else if (It->readsRegister(Reg, &getRegisterInfo())) + } else if (It->readsRegister(Reg, &getRegisterInfo())) // If we see another use of this reg between the def and the MI, // we want to flat it so the def isn't deleted. SeenIntermediateUse = true; @@ -3218,7 +3218,7 @@ } } -// Check if the 'MI' that has the index OpNoForForwarding +// Check if the 'MI' that has the index OpNoForForwarding // meets the requirement described in the ImmInstrInfo. bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI, const ImmInstrInfo &III, @@ -3264,7 +3264,7 @@ MachineOperand *&RegMO) const { unsigned Opc = DefMI.getOpcode(); if (Opc != PPC::ADDItocL && Opc != PPC::ADDI && Opc != PPC::ADDI8) - return false; + return false; assert(DefMI.getNumOperands() >= 3 && "Add inst must have at least three operands"); @@ -3436,7 +3436,7 @@ // Otherwise, it is Constant Pool Index(CPI) or Global, // which is relocation in fact. We need to replace the special zero // register with ImmMO. - // Before that, we need to fixup the target flags for imm. + // Before that, we need to fixup the target flags for imm. // For some reason, we miss to set the flag for the ImmMO if it is CPI. if (DefMI.getOpcode() == PPC::ADDItocL) ImmMO->setTargetFlags(PPCII::MO_TOC_LO); Index: lib/Target/PowerPC/PPCRegisterInfo.h =================================================================== --- lib/Target/PowerPC/PPCRegisterInfo.h +++ lib/Target/PowerPC/PPCRegisterInfo.h @@ -132,10 +132,10 @@ int64_t Offset) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; // Base pointer (stack realignment) support. - unsigned getBaseRegister(const MachineFunction &MF) const; + Register getBaseRegister(const MachineFunction &MF) const; bool hasBasePointer(const MachineFunction &MF) const; /// stripRegisterPrefix - This method strips the character prefix from a Index: lib/Target/PowerPC/PPCRegisterInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCRegisterInfo.cpp +++ lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -1114,7 +1114,7 @@ MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true); } -unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const PPCFrameLowering *TFI = getFrameLowering(MF); if (!TM.isPPC64()) @@ -1123,7 +1123,7 @@ return TFI->hasFP(MF) ? PPC::X31 : PPC::X1; } -unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { +Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const { const PPCSubtarget &Subtarget = MF.getSubtarget(); if (!hasBasePointer(MF)) return getFrameRegister(MF); Index: lib/Target/Sparc/SparcRegisterInfo.h =================================================================== --- lib/Target/Sparc/SparcRegisterInfo.h +++ lib/Target/Sparc/SparcRegisterInfo.h @@ -38,7 +38,7 @@ int SPAdj, unsigned FIOperandNum, RegScavenger *RS = nullptr) const override; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; bool canRealignStack(const MachineFunction &MF) const override; Index: lib/Target/Sparc/SparcRegisterInfo.cpp =================================================================== --- lib/Target/Sparc/SparcRegisterInfo.cpp +++ lib/Target/Sparc/SparcRegisterInfo.cpp @@ -212,7 +212,7 @@ } -unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const { return SP::I6; } Index: lib/Target/SystemZ/SystemZElimCompare.cpp =================================================================== --- lib/Target/SystemZ/SystemZElimCompare.cpp +++ lib/Target/SystemZ/SystemZElimCompare.cpp @@ -525,9 +525,9 @@ // SrcReg2 is the register if the source operand is a register, // 0 if the source operand is immediate, and the base register // if the source operand is memory (index is not supported). - unsigned SrcReg = Compare.getOperand(0).getReg(); - unsigned SrcReg2 = - Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0; + Register SrcReg = Compare.getOperand(0).getReg(); + Register SrcReg2 = + Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : Register(); MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch; for (++MBBI; MBBI != MBBE; ++MBBI) if (MBBI->modifiesRegister(SrcReg, TRI) || Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -6249,7 +6249,7 @@ } // Force base value Base into a register before MI. Return the register. -static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, +static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII) { if (Base.isReg()) return Base.getReg(); @@ -6258,7 +6258,7 @@ MachineFunction &MF = *MBB->getParent(); MachineRegisterInfo &MRI = MF.getRegInfo(); - unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); + Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) .add(Base) .addImm(0) @@ -6542,8 +6542,8 @@ MachineOperand Base = earlyUseOperand(MI.getOperand(1)); int64_t Disp = MI.getOperand(2).getImm(); MachineOperand Src2 = earlyUseOperand(MI.getOperand(3)); - unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); - unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); + Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register(); + Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register(); DebugLoc DL = MI.getDebugLoc(); if (IsSubWord) BitSize = MI.getOperand(6).getImm(); @@ -6561,12 +6561,12 @@ assert(LOpcode && CSOpcode && "Displacement out of range"); // Create virtual registers for temporary results. - unsigned OrigVal = MRI.createVirtualRegister(RC); - unsigned OldVal = MRI.createVirtualRegister(RC); - unsigned NewVal = (BinOpcode || IsSubWord ? + Register OrigVal = MRI.createVirtualRegister(RC); + Register OldVal = MRI.createVirtualRegister(RC); + Register NewVal = (BinOpcode || IsSubWord ? MRI.createVirtualRegister(RC) : Src2.getReg()); - unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); - unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); + Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); + Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); // Insert a basic block for the main loop. MachineBasicBlock *StartMBB = MBB; @@ -6659,9 +6659,9 @@ unsigned Dest = MI.getOperand(0).getReg(); MachineOperand Base = earlyUseOperand(MI.getOperand(1)); int64_t Disp = MI.getOperand(2).getImm(); - unsigned Src2 = MI.getOperand(3).getReg(); - unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0); - unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0); + Register Src2 = MI.getOperand(3).getReg(); + Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register()); + Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register()); DebugLoc DL = MI.getDebugLoc(); if (IsSubWord) BitSize = MI.getOperand(6).getImm(); @@ -6679,12 +6679,12 @@ assert(LOpcode && CSOpcode && "Displacement out of range"); // Create virtual registers for temporary results. - unsigned OrigVal = MRI.createVirtualRegister(RC); - unsigned OldVal = MRI.createVirtualRegister(RC); - unsigned NewVal = MRI.createVirtualRegister(RC); - unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); - unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); - unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); + Register OrigVal = MRI.createVirtualRegister(RC); + Register OldVal = MRI.createVirtualRegister(RC); + Register NewVal = MRI.createVirtualRegister(RC); + Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal); + Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2); + Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal); // Insert 3 basic blocks for the loop. MachineBasicBlock *StartMBB = MBB; @@ -6967,22 +6967,22 @@ if (MI.getNumExplicitOperands() > 5) { bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase); - uint64_t StartCountReg = MI.getOperand(5).getReg(); - uint64_t StartSrcReg = forceReg(MI, SrcBase, TII); - uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg : + Register StartCountReg = MI.getOperand(5).getReg(); + Register StartSrcReg = forceReg(MI, SrcBase, TII); + Register StartDestReg = (HaveSingleBase ? StartSrcReg : forceReg(MI, DestBase, TII)); const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass; - uint64_t ThisSrcReg = MRI.createVirtualRegister(RC); - uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg : + Register ThisSrcReg = MRI.createVirtualRegister(RC); + Register ThisDestReg = (HaveSingleBase ? ThisSrcReg : MRI.createVirtualRegister(RC)); - uint64_t NextSrcReg = MRI.createVirtualRegister(RC); - uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg : + Register NextSrcReg = MRI.createVirtualRegister(RC); + Register NextDestReg = (HaveSingleBase ? NextSrcReg : MRI.createVirtualRegister(RC)); RC = &SystemZ::GR64BitRegClass; - uint64_t ThisCountReg = MRI.createVirtualRegister(RC); - uint64_t NextCountReg = MRI.createVirtualRegister(RC); + Register ThisCountReg = MRI.createVirtualRegister(RC); + Register NextCountReg = MRI.createVirtualRegister(RC); MachineBasicBlock *StartMBB = MBB; MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB); Index: lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZInstrInfo.cpp +++ lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -1177,13 +1177,13 @@ MemOpcode = -1; else { assert(NumOps == 3 && "Expected two source registers."); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned DstPhys = + Register DstReg = MI.getOperand(0).getReg(); + Register DstPhys = (TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg); - unsigned SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg() + Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg() : ((OpNum == 1 && MI.isCommutable()) ? MI.getOperand(2).getReg() - : 0)); + : Register())); if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg && TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg)) NeedsCommute = (OpNum == 1); Index: lib/Target/SystemZ/SystemZRegisterInfo.h =================================================================== --- lib/Target/SystemZ/SystemZRegisterInfo.h +++ lib/Target/SystemZ/SystemZRegisterInfo.h @@ -83,7 +83,7 @@ const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override; - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; }; } // end namespace llvm Index: lib/Target/SystemZ/SystemZRegisterInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZRegisterInfo.cpp +++ lib/Target/SystemZ/SystemZRegisterInfo.cpp @@ -164,8 +164,8 @@ continue; auto tryAddHint = [&](const MachineOperand *MO) -> void { - unsigned Reg = MO->getReg(); - unsigned PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg); + Register Reg = MO->getReg(); + Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg); if (PhysReg) { if (MO->getSubReg()) PhysReg = getSubReg(PhysReg, MO->getSubReg()); @@ -399,7 +399,7 @@ return true; } -unsigned +Register SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const SystemZFrameLowering *TFI = getFrameLowering(MF); return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D; Index: lib/Target/WebAssembly/WebAssemblyRegisterInfo.h =================================================================== --- lib/Target/WebAssembly/WebAssemblyRegisterInfo.h +++ lib/Target/WebAssembly/WebAssemblyRegisterInfo.h @@ -39,7 +39,7 @@ RegScavenger *RS = nullptr) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, Index: lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp +++ lib/Target/WebAssembly/WebAssemblyRegisterInfo.cpp @@ -66,7 +66,7 @@ assert(MFI.getObjectSize(FrameIndex) != 0 && "We assume that variable-sized objects have already been lowered, " "and don't use FrameIndex operands."); - unsigned FrameRegister = getFrameRegister(MF); + Register FrameRegister = getFrameRegister(MF); // If this is the address operand of a load or store, make it relative to SP // and fold the frame offset directly in. @@ -130,7 +130,7 @@ MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false); } -unsigned +Register WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const { static const unsigned Regs[2][2] = { /* !isArch64Bit isArch64Bit */ Index: lib/Target/X86/X86CallLowering.h =================================================================== --- lib/Target/X86/X86CallLowering.h +++ lib/Target/X86/X86CallLowering.h @@ -29,10 +29,10 @@ X86CallLowering(const X86TargetLowering &TLI); bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const override; + ArrayRef VRegs) const override; bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, @@ -40,7 +40,7 @@ private: /// A function of this type is used to perform value split action. - using SplitArgTy = std::function)>; + using SplitArgTy = std::function)>; bool splitToValueTypes(const ArgInfo &OrigArgInfo, SmallVectorImpl &SplitArgs, Index: lib/Target/X86/X86CallLowering.cpp =================================================================== --- lib/Target/X86/X86CallLowering.cpp +++ lib/Target/X86/X86CallLowering.cpp @@ -75,7 +75,7 @@ return true; } - SmallVector SplitRegs; + SmallVector SplitRegs; EVT PartVT = TLI.getRegisterType(Context, VT); Type *PartTy = PartVT.getTypeForEVT(Context); @@ -182,7 +182,7 @@ bool X86CallLowering::lowerReturn( MachineIRBuilder &MIRBuilder, const Value *Val, - ArrayRef VRegs) const { + ArrayRef VRegs) const { assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) && "Return value without a vreg"); auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0); @@ -205,7 +205,7 @@ ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); if (!splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, - [&](ArrayRef Regs) { + [&](ArrayRef Regs) { MIRBuilder.buildUnmerge(Regs, VRegs[i]); })) return false; @@ -321,7 +321,7 @@ bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, - ArrayRef VRegs) const { + ArrayRef VRegs) const { if (F.arg_empty()) return true; @@ -349,7 +349,7 @@ ArgInfo OrigArg(VRegs[Idx], Arg.getType()); setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F); if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI, - [&](ArrayRef Regs) { + [&](ArrayRef Regs) { MIRBuilder.buildMerge(VRegs[Idx], Regs); })) return false; @@ -409,7 +409,7 @@ return false; if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI, - [&](ArrayRef Regs) { + [&](ArrayRef Regs) { MIRBuilder.buildUnmerge(Regs, OrigArg.Reg); })) return false; @@ -452,10 +452,10 @@ if (OrigRet.Reg) { SplitArgs.clear(); - SmallVector NewRegs; + SmallVector NewRegs; if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI, - [&](ArrayRef Regs) { + [&](ArrayRef Regs) { NewRegs.assign(Regs.begin(), Regs.end()); })) return false; Index: lib/Target/X86/X86FrameLowering.cpp =================================================================== --- lib/Target/X86/X86FrameLowering.cpp +++ lib/Target/X86/X86FrameLowering.cpp @@ -584,23 +584,23 @@ // registers. For the prolog expansion we use RAX, RCX and RDX. MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RegClass = &X86::GR64RegClass; - const unsigned SizeReg = InProlog ? (unsigned)X86::RAX + const Register SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass), - ZeroReg = InProlog ? (unsigned)X86::RCX + ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), - CopyReg = InProlog ? (unsigned)X86::RDX + CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), - TestReg = InProlog ? (unsigned)X86::RDX + TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), - FinalReg = InProlog ? (unsigned)X86::RDX + FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), - RoundedReg = InProlog ? (unsigned)X86::RDX + RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass), - LimitReg = InProlog ? (unsigned)X86::RCX + LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), - JoinReg = InProlog ? (unsigned)X86::RCX + JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass), - ProbeReg = InProlog ? (unsigned)X86::RCX + ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass); // SP-relative offsets where we can save RCX and RDX. @@ -874,7 +874,7 @@ bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const { // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be - // clobbered by any interrupt handler. + // clobbered by any interrupt handler. assert(&STI == &MF.getSubtarget() && "MF used frame lowering for wrong subtarget"); const Function &Fn = MF.getFunction(); Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -4744,9 +4744,9 @@ unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex; if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) { bool HasDef = MI.getDesc().getNumDefs(); - unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; - unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); - unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); + Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register(); + Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg(); + Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg(); bool Tied1 = 0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO); bool Tied2 = Index: lib/Target/X86/X86RegisterInfo.h =================================================================== --- lib/Target/X86/X86RegisterInfo.h +++ lib/Target/X86/X86RegisterInfo.h @@ -133,7 +133,7 @@ RegScavenger *RS = nullptr) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const; unsigned getPtrSizedStackRegister(const MachineFunction &MF) const; unsigned getStackRegister() const { return StackPtr; } Index: lib/Target/X86/X86RegisterInfo.cpp =================================================================== --- lib/Target/X86/X86RegisterInfo.cpp +++ lib/Target/X86/X86RegisterInfo.cpp @@ -765,7 +765,7 @@ } } -unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const { const X86FrameLowering *TFI = getFrameLowering(MF); return TFI->hasFP(MF) ? FramePtr : StackPtr; } Index: lib/Target/XCore/XCoreRegisterInfo.h =================================================================== --- lib/Target/XCore/XCoreRegisterInfo.h +++ lib/Target/XCore/XCoreRegisterInfo.h @@ -43,7 +43,7 @@ RegScavenger *RS = nullptr) const override; // Debug information queries. - unsigned getFrameRegister(const MachineFunction &MF) const override; + Register getFrameRegister(const MachineFunction &MF) const override; //! Return whether to emit frame moves static bool needsFrameMoves(const MachineFunction &MF); Index: lib/Target/XCore/XCoreRegisterInfo.cpp =================================================================== --- lib/Target/XCore/XCoreRegisterInfo.cpp +++ lib/Target/XCore/XCoreRegisterInfo.cpp @@ -283,7 +283,7 @@ Offset += StackSize; - unsigned FrameReg = getFrameRegister(MF); + Register FrameReg = getFrameRegister(MF); // Special handling of DBG_VALUE instructions. if (MI.isDebugValue()) { @@ -321,7 +321,7 @@ } -unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const { +Register XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const { const XCoreFrameLowering *TFI = getFrameLowering(MF); return TFI->hasFP(MF) ? XCore::R10 : XCore::SP; Index: unittests/CodeGen/GlobalISel/GISelMITest.h =================================================================== --- unittests/CodeGen/GlobalISel/GISelMITest.h +++ unittests/CodeGen/GlobalISel/GISelMITest.h @@ -124,7 +124,7 @@ return MF; } -static void collectCopies(SmallVectorImpl &Copies, +static void collectCopies(SmallVectorImpl &Copies, MachineFunction *MF) { for (auto &MBB : *MF) for (MachineInstr &MI : MBB) { @@ -152,7 +152,7 @@ MachineFunction *MF; std::pair, std::unique_ptr> ModuleMMIPair; - SmallVector Copies; + SmallVector Copies; MachineBasicBlock *EntryMBB; MachineIRBuilder B; MachineRegisterInfo *MRI; Index: unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp =================================================================== --- unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp +++ unittests/CodeGen/GlobalISel/MachineIRBuilderTest.cpp @@ -75,7 +75,7 @@ if (!TM) return; - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); LLT s64 = LLT::scalar(64); @@ -100,7 +100,7 @@ if (!TM) return; - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); B.buildUnmerge(LLT::scalar(32), Copies[0]); B.buildUnmerge(LLT::scalar(16), Copies[1]); @@ -120,7 +120,7 @@ if (!TM) return; - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); LLT S64 = LLT::scalar(64); @@ -152,7 +152,7 @@ return; LLT S64 = LLT::scalar(64); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); // Make sure DstOp version works. sqrt is just a placeholder intrinsic. @@ -160,7 +160,7 @@ .addUse(Copies[0]); // Make sure register version works - SmallVector Results; + SmallVector Results; Results.push_back(MRI->createGenericVirtualRegister(S64)); B.buildIntrinsic(Intrinsic::sqrt, Results, false) .addUse(Copies[1]); @@ -181,7 +181,7 @@ LLT S64 = LLT::scalar(64); LLT S128 = LLT::scalar(128); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); B.buildXor(S64, Copies[0], Copies[1]); B.buildNot(S64, Copies[0]); @@ -208,7 +208,7 @@ return; LLT S32 = LLT::scalar(32); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); B.buildCTPOP(S32, Copies[0]); @@ -235,7 +235,7 @@ return; LLT S32 = LLT::scalar(32); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); B.buildUITOFP(S32, Copies[0]); @@ -259,7 +259,7 @@ return; LLT S64 = LLT::scalar(64); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); B.buildSMin(S64, Copies[0], Copies[1]); Index: unittests/CodeGen/GlobalISel/PatternMatchTest.cpp =================================================================== --- unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -161,7 +161,7 @@ bool match = mi_match(MIBAdd->getOperand(0).getReg(), MRI, m_GAdd(m_Reg(), m_Reg())); EXPECT_TRUE(match); - unsigned Src0, Src1, Src2; + Register Src0, Src1, Src2; match = mi_match(MIBAdd->getOperand(0).getReg(), MRI, m_GAdd(m_Reg(Src0), m_Reg(Src1))); EXPECT_TRUE(match); @@ -292,7 +292,7 @@ bool match = mi_match(MIBFabs->getOperand(0).getReg(), MRI, m_GFabs(m_Reg())); EXPECT_TRUE(match); - unsigned Src; + Register Src; auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32}); match = mi_match(MIBFNeg->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(Src))); EXPECT_TRUE(match); @@ -360,7 +360,7 @@ auto MIBAExt = B.buildAnyExt(s64, MIBTrunc); auto MIBZExt = B.buildZExt(s64, MIBTrunc); auto MIBSExt = B.buildSExt(s64, MIBTrunc); - unsigned Src0; + Register Src0; bool match = mi_match(MIBTrunc->getOperand(0).getReg(), MRI, m_GTrunc(m_Reg(Src0))); EXPECT_TRUE(match); @@ -433,7 +433,7 @@ LLT PtrTy = LLT::pointer(0, 64); auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]); auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr); - unsigned Src0; + Register Src0; // match the ptrtoint(inttoptr reg) bool match = mi_match(MIBPtrToInt->getOperand(0).getReg(), MRI, @@ -459,7 +459,7 @@ LLT s64 = LLT::scalar(64); LLT s32 = LLT::scalar(32); auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]); - unsigned Src0, Src1; + Register Src0, Src1; bool match = mi_match(MIBAdd->getOperand(0).getReg(), MRI, m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));