Index: include/llvm/CodeGen/CallingConvLower.h =================================================================== --- include/llvm/CodeGen/CallingConvLower.h +++ include/llvm/CodeGen/CallingConvLower.h @@ -145,7 +145,7 @@ bool needsCustom() const { return isCustom; } - unsigned getLocReg() const { assert(isRegLoc()); return Loc; } + Register getLocReg() const { assert(isRegLoc()); return Loc; } unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; } unsigned getExtraInfo() const { return Loc; } MVT getLocVT() const { return LocVT; } Index: include/llvm/CodeGen/GlobalISel/CallLowering.h =================================================================== --- include/llvm/CodeGen/GlobalISel/CallLowering.h +++ include/llvm/CodeGen/GlobalISel/CallLowering.h @@ -42,7 +42,7 @@ virtual void anchor(); public: struct ArgInfo { - unsigned Reg; + Register Reg; Type *Ty; ISD::ArgFlagsTy Flags; bool IsFixed; @@ -77,19 +77,19 @@ /// direct SP manipulation, depending on the context. \p MPO /// should be initialized to an appropriate description of the /// address created. - virtual unsigned getStackAddress(uint64_t Size, int64_t Offset, + virtual Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) = 0; /// The specified value has been assigned to a physical register, /// handle the appropriate COPY (either to or from) and mark any /// relevant uses/defines as needed. - virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + virtual void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) = 0; /// The specified value has been assigned to a stack /// location. Load or store it there, with appropriate extension /// if necessary. - virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr, + virtual void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) = 0; @@ -104,7 +104,7 @@ llvm_unreachable("Custom values not supported"); } - unsigned extendRegister(unsigned ValReg, CCValAssign &VA); + Register extendRegister(Register ValReg, CCValAssign &VA); virtual bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, const ArgInfo &Info, Index: include/llvm/CodeGen/GlobalISel/CombinerHelper.h =================================================================== --- include/llvm/CodeGen/GlobalISel/CombinerHelper.h +++ include/llvm/CodeGen/GlobalISel/CombinerHelper.h @@ -18,6 +18,7 @@ #define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H #include "llvm/CodeGen/LowLevelType.h" +#include "llvm/CodeGen/Register.h" namespace llvm { @@ -42,12 +43,12 @@ CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B); /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes - void replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, unsigned ToReg) const; + void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const; /// Replace a single register operand with a new register and inform the /// observer of the changes. void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, - unsigned ToReg) const; + Register ToReg) const; /// If \p MI is COPY, try to combine it. /// Returns true if MI changed. Index: include/llvm/CodeGen/GlobalISel/IRTranslator.h =================================================================== --- include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -198,7 +198,7 @@ /// the function. /// /// \return true if the materialization succeeded. - bool translate(const Constant &C, unsigned Reg); + bool translate(const Constant &C, Register Reg); /// Translate an LLVM bitcast into generic IR. Either a COPY or a G_BITCAST is /// emitted. @@ -214,7 +214,7 @@ bool translateMemfunc(const CallInst &CI, MachineIRBuilder &MIRBuilder, unsigned ID); - void getStackGuard(unsigned DstReg, MachineIRBuilder &MIRBuilder); + void getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder); bool translateOverflowIntrinsic(const CallInst &CI, unsigned Op, MachineIRBuilder &MIRBuilder); @@ -239,9 +239,9 @@ // until it is refactored. /// Combines all component registers of \p V into a single scalar with size /// "max(Offsets) + last size". - unsigned packRegs(const Value &V, MachineIRBuilder &MIRBuilder); + Register packRegs(const Value &V, MachineIRBuilder &MIRBuilder); - void unpackRegs(const Value &V, unsigned Src, MachineIRBuilder &MIRBuilder); + void unpackRegs(const Value &V, Register Src, MachineIRBuilder &MIRBuilder); /// Returns true if the value should be split into multiple LLTs. /// If \p Offsets is given then the split type's offsets will be stored in it. Index: include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -126,7 +126,6 @@ public: enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate }; - SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {} SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {} SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {} SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {} @@ -158,7 +157,7 @@ llvm_unreachable("Unrecognised SrcOp::SrcType enum"); } - unsigned getReg() const { + Register getReg() const { switch (Ty) { case SrcType::Ty_Predicate: llvm_unreachable("Not a register operand"); Index: include/llvm/CodeGen/MachineInstrBuilder.h =================================================================== --- include/llvm/CodeGen/MachineInstrBuilder.h +++ include/llvm/CodeGen/MachineInstrBuilder.h @@ -82,7 +82,7 @@ /// Get the register for the operand index. /// The operand at the index should be a register (asserted by /// MachineOperand). - unsigned getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); } + Register getReg(unsigned Idx) const { return MI->getOperand(Idx).getReg(); } /// Add a new virtual register operand. const MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0, Index: include/llvm/CodeGen/SwiftErrorValueTracking.h =================================================================== --- include/llvm/CodeGen/SwiftErrorValueTracking.h +++ include/llvm/CodeGen/SwiftErrorValueTracking.h @@ -77,7 +77,7 @@ /// Get or create the swifterror value virtual register in /// VRegDefMap for this basic block. - unsigned getOrCreateVReg(const MachineBasicBlock *, const Value *); + Register getOrCreateVReg(const MachineBasicBlock *, const Value *); /// Set the swifterror virtual register in the VRegDefMap for this /// basic block. @@ -85,12 +85,12 @@ /// Get or create the swifterror value virtual register for a def of a /// swifterror by an instruction. - unsigned getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, + Register getOrCreateVRegDefAt(const Instruction *, const MachineBasicBlock *, const Value *); /// Get or create the swifterror value virtual register for a use of a /// swifterror by an instruction. - unsigned getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, + Register getOrCreateVRegUseAt(const Instruction *, const MachineBasicBlock *, const Value *); /// Create initial definitions of swifterror values in the entry block of the Index: lib/CodeGen/GlobalISel/CallLowering.cpp =================================================================== --- lib/CodeGen/GlobalISel/CallLowering.cpp +++ lib/CodeGen/GlobalISel/CallLowering.cpp @@ -195,7 +195,7 @@ return true; } -unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg, +Register CallLowering::ValueHandler::extendRegister(Register ValReg, CCValAssign &VA) { LLT LocTy{VA.getLocVT()}; if (LocTy.getSizeInBits() == MRI.getType(ValReg).getSizeInBits()) Index: lib/CodeGen/GlobalISel/CombinerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -22,8 +22,8 @@ MachineIRBuilder &B) : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer) {} -void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, unsigned FromReg, - unsigned ToReg) const { +void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, + Register ToReg) const { Observer.changingAllUsesOfReg(MRI, FromReg); if (MRI.constrainRegAttrs(ToReg, FromReg)) @@ -36,7 +36,7 @@ void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp, - unsigned ToReg) const { + Register ToReg) const { assert(FromRegOp.getParent() && "Expected an operand in an MI"); Observer.changingInstr(*FromRegOp.getParent()); @@ -235,7 +235,7 @@ void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &Preferred) { // Rewrite the load to the chosen extending load. - unsigned ChosenDstReg = Preferred.MI->getOperand(0).getReg(); + Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); // Inserter to insert a truncate back to the original type at a given point // with some basic CSE to limit truncate duplication to one per BB. @@ -252,7 +252,7 @@ } Builder.setInsertPt(*InsertIntoBB, InsertBefore); - unsigned NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); + Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); EmittedInsns[InsertIntoBB] = NewMI; replaceRegOpWith(MRI, UseMO, NewDstReg); Index: lib/CodeGen/GlobalISel/IRTranslator.cpp =================================================================== --- lib/CodeGen/GlobalISel/IRTranslator.cpp +++ lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -287,9 +287,9 @@ // Unless the value is a Constant => loadimm cst? // or inline constant each time? // Creation of a virtual register needs to have a size. - unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); - unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); - unsigned Res = getOrCreateVReg(U); + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); uint16_t Flags = 0; if (isa(U)) { const Instruction &I = cast(U); @@ -304,8 +304,8 @@ // -0.0 - X --> G_FNEG if (isa(U.getOperand(0)) && U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) { - unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); - unsigned Res = getOrCreateVReg(U); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); uint16_t Flags = 0; if (isa(U)) { const Instruction &I = cast(U); @@ -319,8 +319,8 @@ } bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) { - unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); - unsigned Res = getOrCreateVReg(U); + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); uint16_t Flags = 0; if (isa(U)) { const Instruction &I = cast(U); @@ -333,9 +333,9 @@ bool IRTranslator::translateCompare(const User &U, MachineIRBuilder &MIRBuilder) { const CmpInst *CI = dyn_cast(&U); - unsigned Op0 = getOrCreateVReg(*U.getOperand(0)); - unsigned Op1 = getOrCreateVReg(*U.getOperand(1)); - unsigned Res = getOrCreateVReg(U); + Register Op0 = getOrCreateVReg(*U.getOperand(0)); + Register Op1 = getOrCreateVReg(*U.getOperand(1)); + Register Res = getOrCreateVReg(U); CmpInst::Predicate Pred = CI ? CI->getPredicate() : static_cast( cast(U).getPredicate()); @@ -382,7 +382,7 @@ unsigned Succ = 0; if (!BrInst.isUnconditional()) { // We want a G_BRCOND to the true BB followed by an unconditional branch. - unsigned Tst = getOrCreateVReg(*BrInst.getCondition()); + Register Tst = getOrCreateVReg(*BrInst.getCondition()); const BasicBlock &TrueTgt = *cast(BrInst.getSuccessor(Succ++)); MachineBasicBlock &TrueBB = getMBB(TrueTgt); MIRBuilder.buildBrCond(Tst, TrueBB); @@ -412,13 +412,13 @@ // lowering in there. const SwitchInst &SwInst = cast(U); - const unsigned SwCondValue = getOrCreateVReg(*SwInst.getCondition()); + const Register SwCondValue = getOrCreateVReg(*SwInst.getCondition()); const BasicBlock *OrigBB = SwInst.getParent(); LLT LLTi1 = getLLTForType(*Type::getInt1Ty(U.getContext()), *DL); for (auto &CaseIt : SwInst.cases()) { - const unsigned CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); - const unsigned Tst = MRI->createGenericVirtualRegister(LLTi1); + const Register CaseValueReg = getOrCreateVReg(*CaseIt.getCaseValue()); + const Register Tst = MRI->createGenericVirtualRegister(LLTi1); MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Tst, CaseValueReg, SwCondValue); MachineBasicBlock &CurMBB = MIRBuilder.getMBB(); const BasicBlock *TrueBB = CaseIt.getCaseSuccessor(); @@ -452,7 +452,7 @@ MachineIRBuilder &MIRBuilder) { const IndirectBrInst &BrInst = cast(U); - const unsigned Tgt = getOrCreateVReg(*BrInst.getAddress()); + const Register Tgt = getOrCreateVReg(*BrInst.getAddress()); MIRBuilder.buildBrIndirect(Tgt); // Link successors. @@ -483,14 +483,14 @@ ArrayRef Regs = getOrCreateVRegs(LI); ArrayRef Offsets = *VMap.getOffsets(LI); - unsigned Base = getOrCreateVReg(*LI.getPointerOperand()); + Register Base = getOrCreateVReg(*LI.getPointerOperand()); Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType()); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) { assert(Regs.size() == 1 && "swifterror should be single pointer"); - unsigned VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), + Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), LI.getPointerOperand()); MIRBuilder.buildCopy(Regs[0], VReg); return true; @@ -524,7 +524,7 @@ ArrayRef Vals = getOrCreateVRegs(*SI.getValueOperand()); ArrayRef Offsets = *VMap.getOffsets(*SI.getValueOperand()); - unsigned Base = getOrCreateVReg(*SI.getPointerOperand()); + Register Base = getOrCreateVReg(*SI.getPointerOperand()); Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType()); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); @@ -532,7 +532,7 @@ if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) { assert(Vals.size() == 1 && "swifterror should be single pointer"); - unsigned VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), + Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(), SI.getPointerOperand()); MIRBuilder.buildCopy(VReg, Vals[0]); return true; @@ -614,7 +614,7 @@ bool IRTranslator::translateSelect(const User &U, MachineIRBuilder &MIRBuilder) { - unsigned Tst = getOrCreateVReg(*U.getOperand(0)); + Register Tst = getOrCreateVReg(*U.getOperand(0)); ArrayRef ResRegs = getOrCreateVRegs(U); ArrayRef Op0Regs = getOrCreateVRegs(*U.getOperand(1)); ArrayRef Op1Regs = getOrCreateVRegs(*U.getOperand(2)); @@ -637,7 +637,7 @@ // If we're bitcasting to the source type, we can reuse the source vreg. if (getLLTForType(*U.getOperand(0)->getType(), *DL) == getLLTForType(*U.getType(), *DL)) { - unsigned SrcReg = getOrCreateVReg(*U.getOperand(0)); + Register SrcReg = getOrCreateVReg(*U.getOperand(0)); auto &Regs = *VMap.getVRegs(U); // If we already assigned a vreg for this bitcast, we can't change that. // Emit a copy to satisfy the users we already emitted. @@ -654,8 +654,8 @@ bool IRTranslator::translateCast(unsigned Opcode, const User &U, MachineIRBuilder &MIRBuilder) { - unsigned Op = getOrCreateVReg(*U.getOperand(0)); - unsigned Res = getOrCreateVReg(U); + Register Op = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); MIRBuilder.buildInstr(Opcode, {Res}, {Op}); return true; } @@ -667,7 +667,7 @@ return false; Value &Op0 = *U.getOperand(0); - unsigned BaseReg = getOrCreateVReg(Op0); + Register BaseReg = getOrCreateVReg(Op0); Type *PtrIRTy = Op0.getType(); LLT PtrTy = getLLTForType(*PtrIRTy, *DL); Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy); @@ -692,7 +692,7 @@ } if (Offset != 0) { - unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); + Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL); auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset); MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0)); @@ -701,16 +701,16 @@ Offset = 0; } - unsigned IdxReg = getOrCreateVReg(*Idx); + Register IdxReg = getOrCreateVReg(*Idx); if (MRI->getType(IdxReg) != OffsetTy) { - unsigned NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); + Register NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy); MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg); IdxReg = NewIdxReg; } // N = N + Idx * ElementSize; // Avoid doing it for ElementSize of 1. - unsigned GepOffsetReg; + Register GepOffsetReg; if (ElementSize != 1) { GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy); auto ElementSizeMIB = MIRBuilder.buildConstant( @@ -719,7 +719,7 @@ } else GepOffsetReg = IdxReg; - unsigned NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); + Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy); MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg); BaseReg = NewBaseReg; } @@ -786,7 +786,7 @@ CallLowering::ArgInfo(0, CI.getType()), Args); } -void IRTranslator::getStackGuard(unsigned DstReg, +void IRTranslator::getStackGuard(Register DstReg, MachineIRBuilder &MIRBuilder) { const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF)); @@ -996,7 +996,7 @@ } else if (const auto *CI = dyn_cast(V)) { MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression()); } else { - unsigned Reg = getOrCreateVReg(*V); + Register Reg = getOrCreateVReg(*V); // FIXME: This does not handle register-indirect values at offset 0. The // direct/indirect thing shouldn't really be handled by something as // implicit as reg+noreg vs reg+imm in the first palce, but it seems @@ -1020,10 +1020,10 @@ case Intrinsic::fmuladd: { const TargetMachine &TM = MF->getTarget(); const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering(); - unsigned Dst = getOrCreateVReg(CI); - unsigned Op0 = getOrCreateVReg(*CI.getArgOperand(0)); - unsigned Op1 = getOrCreateVReg(*CI.getArgOperand(1)); - unsigned Op2 = getOrCreateVReg(*CI.getArgOperand(2)); + Register Dst = getOrCreateVReg(CI); + Register Op0 = getOrCreateVReg(*CI.getArgOperand(0)); + Register Op1 = getOrCreateVReg(*CI.getArgOperand(1)); + Register Op2 = getOrCreateVReg(*CI.getArgOperand(2)); if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) { // TODO: Revisit this to see if we should move this part of the @@ -1045,7 +1045,7 @@ return translateMemfunc(CI, MIRBuilder, ID); case Intrinsic::eh_typeid_for: { GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0)); - unsigned Reg = getOrCreateVReg(CI); + Register Reg = getOrCreateVReg(CI); unsigned TypeID = MF->getTypeIDFor(GV); MIRBuilder.buildConstant(Reg, TypeID); return true; @@ -1067,7 +1067,7 @@ return true; case Intrinsic::stackprotector: { LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); - unsigned GuardVal = MRI->createGenericVirtualRegister(PtrTy); + Register GuardVal = MRI->createGenericVirtualRegister(PtrTy); getStackGuard(GuardVal, MIRBuilder); AllocaInst *Slot = cast(CI.getArgOperand(1)); @@ -1084,8 +1084,8 @@ } case Intrinsic::stacksave: { // Save the stack pointer to the location provided by the intrinsic. - unsigned Reg = getOrCreateVReg(CI); - unsigned StackPtr = MF->getSubtarget() + Register Reg = getOrCreateVReg(CI); + Register StackPtr = MF->getSubtarget() .getTargetLowering() ->getStackPointerRegisterToSaveRestore(); @@ -1098,8 +1098,8 @@ } case Intrinsic::stackrestore: { // Restore the stack pointer from the location provided by the intrinsic. - unsigned Reg = getOrCreateVReg(*CI.getArgOperand(0)); - unsigned StackPtr = MF->getSubtarget() + Register Reg = getOrCreateVReg(*CI.getArgOperand(0)); + Register StackPtr = MF->getSubtarget() .getTargetLowering() ->getStackPointerRegisterToSaveRestore(); @@ -1126,7 +1126,7 @@ } case Intrinsic::invariant_start: { LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL); - unsigned Undef = MRI->createGenericVirtualRegister(PtrTy); + Register Undef = MRI->createGenericVirtualRegister(PtrTy); MIRBuilder.buildUndef(Undef); return true; } @@ -1160,7 +1160,7 @@ return true; } -unsigned IRTranslator::packRegs(const Value &V, +Register IRTranslator::packRegs(const Value &V, MachineIRBuilder &MIRBuilder) { ArrayRef Regs = getOrCreateVRegs(V); ArrayRef Offsets = *VMap.getOffsets(V); @@ -1169,17 +1169,17 @@ if (Regs.size() == 1) return Regs[0]; - unsigned Dst = MRI->createGenericVirtualRegister(BigTy); + Register Dst = MRI->createGenericVirtualRegister(BigTy); MIRBuilder.buildUndef(Dst); for (unsigned i = 0; i < Regs.size(); ++i) { - unsigned NewDst = MRI->createGenericVirtualRegister(BigTy); + Register NewDst = MRI->createGenericVirtualRegister(BigTy); MIRBuilder.buildInsert(NewDst, Dst, Regs[i], Offsets[i]); Dst = NewDst; } return Dst; } -void IRTranslator::unpackRegs(const Value &V, unsigned Src, +void IRTranslator::unpackRegs(const Value &V, Register Src, MachineIRBuilder &MIRBuilder) { ArrayRef Regs = getOrCreateVRegs(V); ArrayRef Offsets = *VMap.getOffsets(V); @@ -1218,7 +1218,7 @@ for (auto &Arg: CI.arg_operands()) { if (CLI->supportSwiftError() && isSwiftError(Arg)) { LLT Ty = getLLTForType(*Arg->getType(), *DL); - unsigned InVReg = MRI->createGenericVirtualRegister(Ty); + Register InVReg = MRI->createGenericVirtualRegister(Ty); MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt( &CI, &MIRBuilder.getMBB(), Arg)); Args.push_back(InVReg); @@ -1310,7 +1310,7 @@ MCSymbol *BeginSymbol = Context.createTempSymbol(); MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); - unsigned Res = 0; + Register Res; if (!I.getType()->isVoidTy()) Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL)); SmallVector Args; @@ -1318,7 +1318,7 @@ for (auto &Arg : I.arg_operands()) { if (CLI->supportSwiftError() && isSwiftError(Arg)) { LLT Ty = getLLTForType(*Arg->getType(), *DL); - unsigned InVReg = MRI->createGenericVirtualRegister(Ty); + Register InVReg = MRI->createGenericVirtualRegister(Ty); MIRBuilder.buildCopy(InVReg, SwiftError.getOrCreateVRegUseAt( &I, &MIRBuilder.getMBB(), Arg)); Args.push_back(InVReg); @@ -1385,7 +1385,7 @@ .addSym(MF->addLandingPad(&MBB)); LLT Ty = getLLTForType(*LP.getType(), *DL); - unsigned Undef = MRI->createGenericVirtualRegister(Ty); + Register Undef = MRI->createGenericVirtualRegister(Ty); MIRBuilder.buildUndef(Undef); SmallVector Tys; @@ -1394,7 +1394,7 @@ assert(Tys.size() == 2 && "Only two-valued landingpads are supported"); // Mark exception register as live in. - unsigned ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); + Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn); if (!ExceptionReg) return false; @@ -1402,12 +1402,12 @@ ArrayRef ResRegs = getOrCreateVRegs(LP); MIRBuilder.buildCopy(ResRegs[0], ExceptionReg); - unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); + Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn); if (!SelectorReg) return false; MBB.addLiveIn(SelectorReg); - unsigned PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); + Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]); MIRBuilder.buildCopy(PtrVReg, SelectorReg); MIRBuilder.buildCast(ResRegs[1], PtrVReg); @@ -1422,7 +1422,7 @@ return true; if (AI.isStaticAlloca()) { - unsigned Res = getOrCreateVReg(AI); + Register Res = getOrCreateVReg(AI); int FI = getOrCreateFrameIndex(AI); MIRBuilder.buildFrameIndex(Res, FI); return true; @@ -1437,29 +1437,29 @@ unsigned Align = std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment()); - unsigned NumElts = getOrCreateVReg(*AI.getArraySize()); + Register NumElts = getOrCreateVReg(*AI.getArraySize()); Type *IntPtrIRTy = DL->getIntPtrType(AI.getType()); LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL); if (MRI->getType(NumElts) != IntPtrTy) { - unsigned ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); + Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy); MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts); NumElts = ExtElts; } - unsigned AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); - unsigned TySize = + Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy); + Register TySize = getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty))); MIRBuilder.buildMul(AllocSize, NumElts, TySize); LLT PtrTy = getLLTForType(*AI.getType(), *DL); auto &TLI = *MF->getSubtarget().getTargetLowering(); - unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); + Register SPReg = TLI.getStackPointerRegisterToSaveRestore(); - unsigned SPTmp = MRI->createGenericVirtualRegister(PtrTy); + Register SPTmp = MRI->createGenericVirtualRegister(PtrTy); MIRBuilder.buildCopy(SPTmp, SPReg); - unsigned AllocTmp = MRI->createGenericVirtualRegister(PtrTy); + Register AllocTmp = MRI->createGenericVirtualRegister(PtrTy); MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize); // Handle alignment. We have to realign if the allocation granule was smaller @@ -1472,7 +1472,7 @@ // Round the size of the allocation up to the stack alignment size // by add SA-1 to the size. This doesn't overflow because we're computing // an address inside an alloca. - unsigned AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); + Register AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy); MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align)); AllocTmp = AlignedAlloc; } @@ -1502,7 +1502,7 @@ // If it is a <1 x Ty> vector, use the scalar as it is // not a legal vector type in LLT. if (U.getType()->getVectorNumElements() == 1) { - unsigned Elt = getOrCreateVReg(*U.getOperand(1)); + Register Elt = getOrCreateVReg(*U.getOperand(1)); auto &Regs = *VMap.getVRegs(U); if (Regs.empty()) { Regs.push_back(Elt); @@ -1513,10 +1513,10 @@ return true; } - unsigned Res = getOrCreateVReg(U); - unsigned Val = getOrCreateVReg(*U.getOperand(0)); - unsigned Elt = getOrCreateVReg(*U.getOperand(1)); - unsigned Idx = getOrCreateVReg(*U.getOperand(2)); + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); + Register Elt = getOrCreateVReg(*U.getOperand(1)); + Register Idx = getOrCreateVReg(*U.getOperand(2)); MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx); return true; } @@ -1526,7 +1526,7 @@ // If it is a <1 x Ty> vector, use the scalar as it is // not a legal vector type in LLT. if (U.getOperand(0)->getType()->getVectorNumElements() == 1) { - unsigned Elt = getOrCreateVReg(*U.getOperand(0)); + Register Elt = getOrCreateVReg(*U.getOperand(0)); auto &Regs = *VMap.getVRegs(U); if (Regs.empty()) { Regs.push_back(Elt); @@ -1536,11 +1536,11 @@ } return true; } - unsigned Res = getOrCreateVReg(U); - unsigned Val = getOrCreateVReg(*U.getOperand(0)); + Register Res = getOrCreateVReg(U); + Register Val = getOrCreateVReg(*U.getOperand(0)); const auto &TLI = *MF->getSubtarget().getTargetLowering(); unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits(); - unsigned Idx = 0; + Register Idx; if (auto *CI = dyn_cast(U.getOperand(1))) { if (CI->getBitWidth() != PreferredVecIdxWidth) { APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth); @@ -1596,11 +1596,11 @@ Type *ValType = ResType->Type::getStructElementType(0); auto Res = getOrCreateVRegs(I); - unsigned OldValRes = Res[0]; - unsigned SuccessRes = Res[1]; - unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); - unsigned Cmp = getOrCreateVReg(*I.getCompareOperand()); - unsigned NewVal = getOrCreateVReg(*I.getNewValOperand()); + Register OldValRes = Res[0]; + Register SuccessRes = Res[1]; + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Cmp = getOrCreateVReg(*I.getCompareOperand()); + Register NewVal = getOrCreateVReg(*I.getNewValOperand()); MIRBuilder.buildAtomicCmpXchgWithSuccess( OldValRes, SuccessRes, Addr, Cmp, NewVal, @@ -1622,9 +1622,9 @@ Type *ResType = I.getType(); - unsigned Res = getOrCreateVReg(I); - unsigned Addr = getOrCreateVReg(*I.getPointerOperand()); - unsigned Val = getOrCreateVReg(*I.getValOperand()); + Register Res = getOrCreateVReg(I); + Register Addr = getOrCreateVReg(*I.getPointerOperand()); + Register Val = getOrCreateVReg(*I.getValOperand()); unsigned Opcode = 0; switch (I.getOperation()) { @@ -1744,7 +1744,7 @@ } } -bool IRTranslator::translate(const Constant &C, unsigned Reg) { +bool IRTranslator::translate(const Constant &C, Register Reg) { if (auto CI = dyn_cast(&C)) EntryBuilder->buildConstant(Reg, *CI); else if (auto CF = dyn_cast(&C)) @@ -1757,7 +1757,7 @@ unsigned NullSize = DL->getTypeSizeInBits(C.getType()); auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize); auto *ZeroVal = ConstantInt::get(ZeroTy, 0); - unsigned ZeroReg = getOrCreateVReg(*ZeroVal); + Register ZeroReg = getOrCreateVReg(*ZeroVal); EntryBuilder->buildCast(Reg, ZeroReg); } else if (auto GV = dyn_cast(&C)) EntryBuilder->buildGlobalValue(Reg, GV); Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -525,12 +525,12 @@ extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); - unsigned CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register CarryIn = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildConstant(CarryIn, 0); for (int i = 0; i < NumParts; ++i) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); - unsigned CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register CarryOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildUAdde(DstReg, CarryOut, Src1Regs[i], Src2Regs[i], CarryIn); @@ -538,7 +538,7 @@ DstRegs.push_back(DstReg); CarryIn = CarryOut; } - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if(MRI.getType(DstReg).isVector()) MIRBuilder.buildBuildVector(DstReg, DstRegs); else @@ -558,12 +558,12 @@ extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs); extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs); - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); - unsigned BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildInstr(TargetOpcode::G_USUBO, {DstReg, BorrowOut}, {Src1Regs[0], Src2Regs[0]}); DstRegs.push_back(DstReg); - unsigned BorrowIn = BorrowOut; + Register BorrowIn = BorrowOut; for (int i = 1; i < NumParts; ++i) { DstReg = MRI.createGenericVirtualRegister(NarrowTy); BorrowOut = MRI.createGenericVirtualRegister(LLT::scalar(1)); @@ -587,13 +587,13 @@ return narrowScalarInsert(MI, TypeIdx, NarrowTy); case TargetOpcode::G_LOAD: { const auto &MMO = **MI.memoperands_begin(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy.isVector()) return UnableToLegalize; if (8 * MMO.getSize() != DstTy.getSizeInBits()) { - unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); auto &MMO = **MI.memoperands_begin(); MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO); MIRBuilder.buildAnyExt(DstReg, TmpReg); @@ -606,10 +606,10 @@ case TargetOpcode::G_ZEXTLOAD: case TargetOpcode::G_SEXTLOAD: { bool ZExt = MI.getOpcode() == TargetOpcode::G_ZEXTLOAD; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned PtrReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); - unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); auto &MMO = **MI.memoperands_begin(); if (MMO.getSizeInBits() == NarrowSize) { MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); @@ -633,7 +633,7 @@ case TargetOpcode::G_STORE: { const auto &MMO = **MI.memoperands_begin(); - unsigned SrcReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(0).getReg(); LLT SrcTy = MRI.getType(SrcReg); if (SrcTy.isVector()) return UnableToLegalize; @@ -645,7 +645,7 @@ return UnableToLegalize; if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { - unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); auto &MMO = **MI.memoperands_begin(); MIRBuilder.buildTrunc(TmpReg, SrcReg); MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO); @@ -724,7 +724,7 @@ void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned TruncOpcode) { MachineOperand &MO = MI.getOperand(OpIdx); - unsigned DstExt = MRI.createGenericVirtualRegister(WideTy); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); MIRBuilder.buildInstr(TruncOpcode, {MO.getReg()}, {DstExt}); MO.setReg(DstExt); @@ -733,7 +733,7 @@ void LegalizerHelper::narrowScalarDst(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx, unsigned ExtOpcode) { MachineOperand &MO = MI.getOperand(OpIdx); - unsigned DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); + Register DstTrunc = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); MIRBuilder.buildInstr(ExtOpcode, {MO.getReg()}, {DstTrunc}); MO.setReg(DstTrunc); @@ -742,7 +742,7 @@ void LegalizerHelper::moreElementsVectorDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx) { MachineOperand &MO = MI.getOperand(OpIdx); - unsigned DstExt = MRI.createGenericVirtualRegister(WideTy); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); MIRBuilder.setInsertPt(MIRBuilder.getMBB(), ++MIRBuilder.getInsertPt()); MIRBuilder.buildExtract(MO.getReg(), DstExt, 0); MO.setReg(DstExt); @@ -772,8 +772,8 @@ return; } - unsigned MoreReg = MRI.createGenericVirtualRegister(MoreTy); - unsigned ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); + Register MoreReg = MRI.createGenericVirtualRegister(MoreTy); + Register ImpDef = MIRBuilder.buildUndef(MoreTy).getReg(0); MIRBuilder.buildInsert(MoreReg, ImpDef, MO.getReg(), 0); MO.setReg(MoreReg); } @@ -793,7 +793,7 @@ unsigned NumSrc = MI.getNumOperands() - 1; unsigned PartSize = DstTy.getSizeInBits() / NumSrc; - unsigned Src1 = MI.getOperand(1).getReg(); + Register Src1 = MI.getOperand(1).getReg(); Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg(); for (unsigned I = 2; I != NumOps; ++I) { @@ -1001,7 +1001,7 @@ return Legalized; } - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); // First ZEXT the input. auto MIBSrc = MIRBuilder.buildZExt(WideTy, SrcReg); @@ -1034,11 +1034,11 @@ } case TargetOpcode::G_BSWAP: { Observer.changingInstr(MI); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); - unsigned ShrReg = MRI.createGenericVirtualRegister(WideTy); - unsigned DstExt = MRI.createGenericVirtualRegister(WideTy); - unsigned ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); + Register ShrReg = MRI.createGenericVirtualRegister(WideTy); + Register DstExt = MRI.createGenericVirtualRegister(WideTy); + Register ShiftAmtReg = MRI.createGenericVirtualRegister(WideTy); widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT); MI.getOperand(0).setReg(DstExt); @@ -1298,7 +1298,7 @@ } case TargetOpcode::G_EXTRACT_VECTOR_ELT: { if (TypeIdx == 0) { - unsigned VecReg = MI.getOperand(1).getReg(); + Register VecReg = MI.getOperand(1).getReg(); LLT VecTy = MRI.getType(VecReg); Observer.changingInstr(MI); @@ -1380,13 +1380,13 @@ return UnableToLegalize; case TargetOpcode::G_SREM: case TargetOpcode::G_UREM: { - unsigned QuotReg = MRI.createGenericVirtualRegister(Ty); + Register QuotReg = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildInstr(MI.getOpcode() == G_SREM ? G_SDIV : G_UDIV) .addDef(QuotReg) .addUse(MI.getOperand(1).getReg()) .addUse(MI.getOperand(2).getReg()); - unsigned ProdReg = MRI.createGenericVirtualRegister(Ty); + Register ProdReg = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildMul(ProdReg, QuotReg, MI.getOperand(2).getReg()); MIRBuilder.buildSub(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), ProdReg); @@ -1397,10 +1397,10 @@ case TargetOpcode::G_UMULO: { // Generate G_UMULH/G_SMULH to check for overflow and a normal G_MUL for the // result. - unsigned Res = MI.getOperand(0).getReg(); - unsigned Overflow = MI.getOperand(1).getReg(); - unsigned LHS = MI.getOperand(2).getReg(); - unsigned RHS = MI.getOperand(3).getReg(); + Register Res = MI.getOperand(0).getReg(); + Register Overflow = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); MIRBuilder.buildMul(Res, LHS, RHS); @@ -1408,20 +1408,20 @@ ? TargetOpcode::G_SMULH : TargetOpcode::G_UMULH; - unsigned HiPart = MRI.createGenericVirtualRegister(Ty); + Register HiPart = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildInstr(Opcode) .addDef(HiPart) .addUse(LHS) .addUse(RHS); - unsigned Zero = MRI.createGenericVirtualRegister(Ty); + Register Zero = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildConstant(Zero, 0); // For *signed* multiply, overflow is detected by checking: // (hi != (lo >> bitwidth-1)) if (Opcode == TargetOpcode::G_SMULH) { - unsigned Shifted = MRI.createGenericVirtualRegister(Ty); - unsigned ShiftAmt = MRI.createGenericVirtualRegister(Ty); + Register Shifted = MRI.createGenericVirtualRegister(Ty); + Register ShiftAmt = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildConstant(ShiftAmt, Ty.getSizeInBits() - 1); MIRBuilder.buildInstr(TargetOpcode::G_ASHR) .addDef(Shifted) @@ -1439,7 +1439,7 @@ // represent them. if (Ty.isVector()) return UnableToLegalize; - unsigned Res = MI.getOperand(0).getReg(); + Register Res = MI.getOperand(0).getReg(); Type *ZeroTy; LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); switch (Ty.getSizeInBits()) { @@ -1461,8 +1461,8 @@ ConstantFP &ZeroForNegation = *cast(ConstantFP::getZeroValueForNegation(ZeroTy)); auto Zero = MIRBuilder.buildFConstant(Ty, ZeroForNegation); - unsigned SubByReg = MI.getOperand(1).getReg(); - unsigned ZeroReg = Zero->getOperand(0).getReg(); + Register SubByReg = MI.getOperand(1).getReg(); + Register ZeroReg = Zero->getOperand(0).getReg(); MIRBuilder.buildInstr(TargetOpcode::G_FSUB, {Res}, {ZeroReg, SubByReg}, MI.getFlags()); MI.eraseFromParent(); @@ -1474,21 +1474,21 @@ // end up with an infinite loop as G_FSUB is used to legalize G_FNEG. if (LI.getAction({G_FNEG, {Ty}}).Action == Lower) return UnableToLegalize; - unsigned Res = MI.getOperand(0).getReg(); - unsigned LHS = MI.getOperand(1).getReg(); - unsigned RHS = MI.getOperand(2).getReg(); - unsigned Neg = MRI.createGenericVirtualRegister(Ty); + Register Res = MI.getOperand(0).getReg(); + Register LHS = MI.getOperand(1).getReg(); + Register RHS = MI.getOperand(2).getReg(); + Register Neg = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildInstr(TargetOpcode::G_FNEG).addDef(Neg).addUse(RHS); MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Res}, {LHS, Neg}, MI.getFlags()); MI.eraseFromParent(); return Legalized; } case TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS: { - unsigned OldValRes = MI.getOperand(0).getReg(); - unsigned SuccessRes = MI.getOperand(1).getReg(); - unsigned Addr = MI.getOperand(2).getReg(); - unsigned CmpVal = MI.getOperand(3).getReg(); - unsigned NewVal = MI.getOperand(4).getReg(); + Register OldValRes = MI.getOperand(0).getReg(); + Register SuccessRes = MI.getOperand(1).getReg(); + Register Addr = MI.getOperand(2).getReg(); + Register CmpVal = MI.getOperand(3).getReg(); + Register NewVal = MI.getOperand(4).getReg(); MIRBuilder.buildAtomicCmpXchg(OldValRes, Addr, CmpVal, NewVal, **MI.memoperands_begin()); MIRBuilder.buildICmp(CmpInst::ICMP_EQ, SuccessRes, OldValRes, CmpVal); @@ -1499,8 +1499,8 @@ case TargetOpcode::G_SEXTLOAD: case TargetOpcode::G_ZEXTLOAD: { // Lower to a memory-width G_LOAD and a G_SEXT/G_ZEXT/G_ANYEXT - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned PtrReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register PtrReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); auto &MMO = **MI.memoperands_begin(); @@ -1515,7 +1515,7 @@ } if (DstTy.isScalar()) { - unsigned TmpReg = + Register TmpReg = MRI.createGenericVirtualRegister(LLT::scalar(MMO.getSizeInBits())); MIRBuilder.buildLoad(TmpReg, PtrReg, MMO); switch (MI.getOpcode()) { @@ -1544,10 +1544,10 @@ case TargetOpcode::G_CTPOP: return lowerBitCount(MI, TypeIdx, Ty); case G_UADDO: { - unsigned Res = MI.getOperand(0).getReg(); - unsigned CarryOut = MI.getOperand(1).getReg(); - unsigned LHS = MI.getOperand(2).getReg(); - unsigned RHS = MI.getOperand(3).getReg(); + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); MIRBuilder.buildAdd(Res, LHS, RHS); MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CarryOut, Res, RHS); @@ -1556,14 +1556,14 @@ return Legalized; } case G_UADDE: { - unsigned Res = MI.getOperand(0).getReg(); - unsigned CarryOut = MI.getOperand(1).getReg(); - unsigned LHS = MI.getOperand(2).getReg(); - unsigned RHS = MI.getOperand(3).getReg(); - unsigned CarryIn = MI.getOperand(4).getReg(); + Register Res = MI.getOperand(0).getReg(); + Register CarryOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register CarryIn = MI.getOperand(4).getReg(); - unsigned TmpRes = MRI.createGenericVirtualRegister(Ty); - unsigned ZExtCarryIn = MRI.createGenericVirtualRegister(Ty); + Register TmpRes = MRI.createGenericVirtualRegister(Ty); + Register ZExtCarryIn = MRI.createGenericVirtualRegister(Ty); MIRBuilder.buildAdd(TmpRes, LHS, RHS); MIRBuilder.buildZExt(ZExtCarryIn, CarryIn); @@ -1574,10 +1574,10 @@ return Legalized; } case G_USUBO: { - unsigned Res = MI.getOperand(0).getReg(); - unsigned BorrowOut = MI.getOperand(1).getReg(); - unsigned LHS = MI.getOperand(2).getReg(); - unsigned RHS = MI.getOperand(3).getReg(); + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); MIRBuilder.buildSub(Res, LHS, RHS); MIRBuilder.buildICmp(CmpInst::ICMP_ULT, BorrowOut, LHS, RHS); @@ -1586,16 +1586,16 @@ return Legalized; } case G_USUBE: { - unsigned Res = MI.getOperand(0).getReg(); - unsigned BorrowOut = MI.getOperand(1).getReg(); - unsigned LHS = MI.getOperand(2).getReg(); - unsigned RHS = MI.getOperand(3).getReg(); - unsigned BorrowIn = MI.getOperand(4).getReg(); + Register Res = MI.getOperand(0).getReg(); + Register BorrowOut = MI.getOperand(1).getReg(); + Register LHS = MI.getOperand(2).getReg(); + Register RHS = MI.getOperand(3).getReg(); + Register BorrowIn = MI.getOperand(4).getReg(); - unsigned TmpRes = MRI.createGenericVirtualRegister(Ty); - unsigned ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty); - unsigned LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); - unsigned LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register TmpRes = MRI.createGenericVirtualRegister(Ty); + Register ZExtBorrowIn = MRI.createGenericVirtualRegister(Ty); + Register LHS_EQ_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register LHS_ULT_RHS = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildSub(TmpRes, LHS, RHS); MIRBuilder.buildZExt(ZExtBorrowIn, BorrowIn); @@ -1619,7 +1619,7 @@ SmallVector DstRegs; unsigned NarrowSize = NarrowTy.getSizeInBits(); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); unsigned Size = MRI.getType(DstReg).getSizeInBits(); int NumParts = Size / NarrowSize; // FIXME: Don't know how to handle the situation where the small vectors @@ -1628,7 +1628,7 @@ return UnableToLegalize; for (int i = 0; i < NumParts; ++i) { - unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildUndef(TmpReg); DstRegs.push_back(TmpReg); } @@ -1663,7 +1663,7 @@ return UnableToLegalize; if (BitsForNumParts != Size) { - unsigned AccumDstReg = MRI.createGenericVirtualRegister(DstTy); + Register AccumDstReg = MRI.createGenericVirtualRegister(DstTy); MIRBuilder.buildUndef(AccumDstReg); // Handle the pieces which evenly divide into the requested type with @@ -1671,15 +1671,15 @@ for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) { SmallVector SrcOps; for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { - unsigned PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); + Register PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset); SrcOps.push_back(PartOpReg); } - unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); - unsigned PartInsertReg = MRI.createGenericVirtualRegister(DstTy); + Register PartInsertReg = MRI.createGenericVirtualRegister(DstTy); MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset); AccumDstReg = PartInsertReg; } @@ -1687,13 +1687,13 @@ // Handle the remaining element sized leftover piece. SmallVector SrcOps; for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { - unsigned PartOpReg = MRI.createGenericVirtualRegister(EltTy); + Register PartOpReg = MRI.createGenericVirtualRegister(EltTy); MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), BitsForNumParts); SrcOps.push_back(PartOpReg); } - unsigned PartDstReg = MRI.createGenericVirtualRegister(EltTy); + Register PartDstReg = MRI.createGenericVirtualRegister(EltTy); MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts); MI.eraseFromParent(); @@ -1712,7 +1712,7 @@ extractParts(MI.getOperand(3).getReg(), NarrowTy, NumParts, Src2Regs); for (int i = 0; i < NumParts; ++i) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); if (NumOps == 1) MIRBuilder.buildInstr(Opc, {DstReg}, {Src0Regs[i]}, Flags); @@ -1757,7 +1757,7 @@ const unsigned NewNumElts = NarrowTy0.isVector() ? NarrowTy0.getNumElements() : 1; - const unsigned DstReg = MI.getOperand(0).getReg(); + const Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); LLT LeftoverTy0; @@ -1777,7 +1777,7 @@ for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { LLT LeftoverTy; - unsigned SrcReg = MI.getOperand(I).getReg(); + Register SrcReg = MI.getOperand(I).getReg(); LLT SrcTyI = MRI.getType(SrcReg); LLT NarrowTyI = LLT::scalarOrVector(NewNumElts, SrcTyI.getScalarType()); LLT LeftoverTyI; @@ -1791,16 +1791,16 @@ if (I == 1) { // For the first operand, create an instruction for each part and setup // the result. - for (unsigned PartReg : PartRegs) { - unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); + for (Register PartReg : PartRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(NarrowTy0); NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) .addDef(PartDstReg) .addUse(PartReg)); DstRegs.push_back(PartDstReg); } - for (unsigned LeftoverReg : LeftoverRegs) { - unsigned PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); + for (Register LeftoverReg : LeftoverRegs) { + Register PartDstReg = MRI.createGenericVirtualRegister(LeftoverTy0); NewInsts.push_back(MIRBuilder.buildInstrNoInsert(MI.getOpcode()) .addDef(PartDstReg) .addUse(LeftoverReg)); @@ -1839,8 +1839,8 @@ if (TypeIdx != 0) return UnableToLegalize; - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned SrcReg = MI.getOperand(1).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(SrcReg); @@ -1864,7 +1864,7 @@ extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs); for (unsigned I = 0; I < NumParts; ++I) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); MachineInstr *NewInst = MIRBuilder.buildInstr(MI.getOpcode()) .addDef(DstReg) .addUse(SrcRegs[I]); @@ -1885,8 +1885,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Src0Reg = MI.getOperand(2).getReg(); + Register DstReg = MI.getOperand(0).getReg(); + Register Src0Reg = MI.getOperand(2).getReg(); LLT DstTy = MRI.getType(DstReg); LLT SrcTy = MRI.getType(Src0Reg); @@ -1928,7 +1928,7 @@ extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs); for (unsigned I = 0; I < NumParts; ++I) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0); DstRegs.push_back(DstReg); if (MI.getOpcode() == TargetOpcode::G_ICMP) @@ -2024,7 +2024,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorPhi(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - const unsigned DstReg = MI.getOperand(0).getReg(); + const Register DstReg = MI.getOperand(0).getReg(); LLT PhiTy = MRI.getType(DstReg); LLT LeftoverTy; @@ -2065,7 +2065,7 @@ PartRegs.clear(); LeftoverRegs.clear(); - unsigned SrcReg = MI.getOperand(I).getReg(); + Register SrcReg = MI.getOperand(I).getReg(); MachineBasicBlock &OpMBB = *MI.getOperand(I + 1).getMBB(); MIRBuilder.setInsertPt(OpMBB, OpMBB.getFirstTerminator()); @@ -2265,8 +2265,8 @@ LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt, const LLT HalfTy, const LLT AmtTy) { - unsigned InL = MRI.createGenericVirtualRegister(HalfTy); - unsigned InH = MRI.createGenericVirtualRegister(HalfTy); + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); if (Amt.isNullValue()) { @@ -2279,7 +2279,7 @@ unsigned NVTBits = HalfTy.getSizeInBits(); unsigned VTBits = 2 * NVTBits; - SrcOp Lo(0), Hi(0); + SrcOp Lo(Register(0)), Hi(Register(0)); if (MI.getOpcode() == TargetOpcode::G_SHL) { if (Amt.ugt(VTBits)) { Lo = Hi = MIRBuilder.buildConstant(NVT, 0); @@ -2360,12 +2360,12 @@ return Legalized; } - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); if (DstTy.isVector()) return UnableToLegalize; - unsigned Amt = MI.getOperand(2).getReg(); + Register Amt = MI.getOperand(2).getReg(); LLT ShiftAmtTy = MRI.getType(Amt); const unsigned DstEltSize = DstTy.getScalarSizeInBits(); if (DstEltSize % 2 != 0) @@ -2389,8 +2389,8 @@ // Handle the fully general expansion by an unknown amount. auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize); - unsigned InL = MRI.createGenericVirtualRegister(HalfTy); - unsigned InH = MRI.createGenericVirtualRegister(HalfTy); + Register InL = MRI.createGenericVirtualRegister(HalfTy); + Register InH = MRI.createGenericVirtualRegister(HalfTy); MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg()); auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits); @@ -2564,7 +2564,7 @@ unsigned DstParts = DstRegs.size(); unsigned DstIdx = 0; // Low bits of the result. - unsigned FactorSum = + Register FactorSum = B.buildMul(NarrowTy, Src1Regs[DstIdx], Src2Regs[DstIdx]).getReg(0); DstRegs[DstIdx] = FactorSum; @@ -2591,7 +2591,7 @@ Factors.push_back(CarrySumPrevDstIdx); } - unsigned CarrySum = 0; + Register CarrySum; // Add all factors and accumulate all carries into CarrySum. if (DstIdx != DstParts - 1) { MachineInstrBuilder Uaddo = @@ -2672,7 +2672,7 @@ SmallVector Indexes; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); - unsigned OpReg = MI.getOperand(0).getReg(); + Register OpReg = MI.getOperand(0).getReg(); uint64_t OpStart = MI.getOperand(2).getImm(); uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); for (int i = 0; i < NumParts; ++i) { @@ -2699,7 +2699,7 @@ SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); } - unsigned SegReg = SrcRegs[i]; + Register SegReg = SrcRegs[i]; if (ExtractOffset != 0 || SegSize != NarrowSize) { // A genuine extract is needed. SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); @@ -2709,7 +2709,7 @@ DstRegs.push_back(SegReg); } - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if(MRI.getType(DstReg).isVector()) MIRBuilder.buildBuildVector(DstReg, DstRegs); else @@ -2739,7 +2739,7 @@ SmallVector Indexes; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs); - unsigned OpReg = MI.getOperand(2).getReg(); + Register OpReg = MI.getOperand(2).getReg(); uint64_t OpStart = MI.getOperand(3).getImm(); uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); for (int i = 0; i < NumParts; ++i) { @@ -2771,20 +2771,20 @@ std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); } - unsigned SegReg = OpReg; + Register SegReg = OpReg; if (ExtractOffset != 0 || SegSize != OpSize) { // A genuine extract is needed. SegReg = MRI.createGenericVirtualRegister(LLT::scalar(SegSize)); MIRBuilder.buildExtract(SegReg, OpReg, ExtractOffset); } - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); + Register DstReg = MRI.createGenericVirtualRegister(NarrowTy); MIRBuilder.buildInsert(DstReg, SrcRegs[i], SegReg, InsertOffset); DstRegs.push_back(DstReg); } assert(DstRegs.size() == (unsigned)NumParts && "not all parts covered"); - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); if(MRI.getType(DstReg).isVector()) MIRBuilder.buildBuildVector(DstReg, DstRegs); else @@ -2796,7 +2796,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalarBasic(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); assert(MI.getNumOperands() == 3 && TypeIdx == 0); @@ -2840,12 +2840,12 @@ if (TypeIdx != 0) return UnableToLegalize; - unsigned CondReg = MI.getOperand(1).getReg(); + Register CondReg = MI.getOperand(1).getReg(); LLT CondTy = MRI.getType(CondReg); if (CondTy.isVector()) // TODO: Handle vselect return UnableToLegalize; - unsigned DstReg = MI.getOperand(0).getReg(); + Register DstReg = MI.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); SmallVector DstRegs, DstLeftoverRegs; @@ -2899,7 +2899,7 @@ return Legalized; } case TargetOpcode::G_CTLZ: { - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); unsigned Len = Ty.getSizeInBits(); if (isSupported({TargetOpcode::G_CTLZ_ZERO_UNDEF, {Ty, Ty}})) { // If CTLZ_ZERO_UNDEF is supported, emit that and a select for zero. @@ -2925,7 +2925,7 @@ // return Len - popcount(x); // // Ref: "Hacker's Delight" by Henry Warren - unsigned Op = SrcReg; + Register Op = SrcReg; unsigned NewLen = PowerOf2Ceil(Len); for (unsigned i = 0; (1U << i) <= (NewLen / 2); ++i) { auto MIBShiftAmt = MIRBuilder.buildConstant(Ty, 1ULL << i); @@ -2949,7 +2949,7 @@ return Legalized; } case TargetOpcode::G_CTTZ: { - unsigned SrcReg = MI.getOperand(1).getReg(); + Register SrcReg = MI.getOperand(1).getReg(); unsigned Len = Ty.getSizeInBits(); if (isSupported({TargetOpcode::G_CTTZ_ZERO_UNDEF, {Ty, Ty}})) { // If CTTZ_ZERO_UNDEF is legal or custom, emit that and a select with @@ -2997,8 +2997,8 @@ // representation. LegalizerHelper::LegalizeResult LegalizerHelper::lowerU64ToF32BitOps(MachineInstr &MI) { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); const LLT S64 = LLT::scalar(64); const LLT S32 = LLT::scalar(32); const LLT S1 = LLT::scalar(1); @@ -3053,8 +3053,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerUITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(Dst); LLT SrcTy = MRI.getType(Src); @@ -3074,8 +3074,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerSITOFP(MachineInstr &MI, unsigned TypeIdx, LLT Ty) { - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); LLT DstTy = MRI.getType(Dst); LLT SrcTy = MRI.getType(Src); @@ -3092,7 +3092,7 @@ // float r = cul2f((l + s) ^ s); // return s ? -r : r; // } - unsigned L = Src; + Register L = Src; auto SignBit = MIRBuilder.buildConstant(S64, 63); auto S = MIRBuilder.buildAShr(S64, L, SignBit); Index: lib/CodeGen/SwiftErrorValueTracking.cpp =================================================================== --- lib/CodeGen/SwiftErrorValueTracking.cpp +++ lib/CodeGen/SwiftErrorValueTracking.cpp @@ -22,7 +22,7 @@ using namespace llvm; -unsigned SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB, +Register SwiftErrorValueTracking::getOrCreateVReg(const MachineBasicBlock *MBB, const Value *Val) { auto Key = std::make_pair(MBB, Val); auto It = VRegDefMap.find(Key); @@ -46,7 +46,7 @@ VRegDefMap[std::make_pair(MBB, Val)] = VReg; } -unsigned SwiftErrorValueTracking::getOrCreateVRegDefAt( +Register SwiftErrorValueTracking::getOrCreateVRegDefAt( const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) { auto Key = PointerIntPair(I, true); auto It = VRegDefUses.find(Key); @@ -55,20 +55,20 @@ auto &DL = MF->getDataLayout(); const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL)); - unsigned VReg = MF->getRegInfo().createVirtualRegister(RC); + Register VReg = MF->getRegInfo().createVirtualRegister(RC); VRegDefUses[Key] = VReg; setCurrentVReg(MBB, Val, VReg); return VReg; } -unsigned SwiftErrorValueTracking::getOrCreateVRegUseAt( +Register SwiftErrorValueTracking::getOrCreateVRegUseAt( const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) { auto Key = PointerIntPair(I, false); auto It = VRegDefUses.find(Key); if (It != VRegDefUses.end()) return It->second; - unsigned VReg = getOrCreateVReg(MBB, Val); + Register VReg = getOrCreateVReg(MBB, Val); VRegDefUses[Key] = VReg; return VReg; } @@ -129,7 +129,7 @@ // least by the 'return' of the swifterror. if (SwiftErrorArg && SwiftErrorArg == SwiftErrorVal) continue; - unsigned VReg = MF->getRegInfo().createVirtualRegister(RC); + Register VReg = MF->getRegInfo().createVirtualRegister(RC); // Assign Undef to Vreg. We construct MI directly to make sure it works // with FastISel. BuildMI(*MBB, MBB->getFirstNonPHI(), DbgLoc, @@ -177,7 +177,7 @@ // Check whether we have a single vreg def from all predecessors. // Otherwise we need a phi. - SmallVector, 4> VRegs; + SmallVector, 4> VRegs; SmallSet Visited; for (auto *Pred : MBB->predecessors()) { if (!Visited.insert(Pred).second) @@ -203,7 +203,7 @@ VRegs.size() >= 1 && std::find_if( VRegs.begin(), VRegs.end(), - [&](const std::pair &V) + [&](const std::pair &V) -> bool { return V.second != VRegs[0].second; }) != VRegs.end(); @@ -227,7 +227,7 @@ assert(UpwardsUse); assert(!VRegs.empty() && "No predecessors? Is the Calling Convention correct?"); - unsigned DestReg = UUseVReg; + Register DestReg = UUseVReg; BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc, TII->get(TargetOpcode::COPY), DestReg) .addReg(VRegs[0].second); Index: lib/Target/AArch64/AArch64CallLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64CallLowering.cpp +++ lib/Target/AArch64/AArch64CallLowering.cpp @@ -57,18 +57,18 @@ CCAssignFn *AssignFn) : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { auto &MFI = MIRBuilder.getMF().getFrameInfo(); int FI = MFI.CreateFixedObject(Size, Offset, true); MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); - unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64)); + Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64)); MIRBuilder.buildFrameIndex(AddrReg, FI); StackUsed = std::max(StackUsed, Size + Offset); return AddrReg; } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { markPhysRegUsed(PhysReg); switch (VA.getLocInfo()) { @@ -85,7 +85,7 @@ } } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { // FIXME: Get alignment auto MMO = MIRBuilder.getMF().getMachineMemOperand( @@ -133,31 +133,31 @@ : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB), AssignFnVarArg(AssignFnVarArg), StackSize(0) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { LLT p0 = LLT::pointer(0, 64); LLT s64 = LLT::scalar(64); - unsigned SPReg = MRI.createGenericVirtualRegister(p0); - MIRBuilder.buildCopy(SPReg, AArch64::SP); + Register SPReg = MRI.createGenericVirtualRegister(p0); + MIRBuilder.buildCopy(SPReg, Register(AArch64::SP)); - unsigned OffsetReg = MRI.createGenericVirtualRegister(s64); + Register OffsetReg = MRI.createGenericVirtualRegister(s64); MIRBuilder.buildConstant(OffsetReg, Offset); - unsigned AddrReg = MRI.createGenericVirtualRegister(p0); + Register AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); return AddrReg; } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { MIB.addUse(PhysReg, RegState::Implicit); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildCopy(PhysReg, ExtReg); } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) { Size = VA.getLocVT().getSizeInBits() / 8; @@ -263,7 +263,7 @@ return false; } - unsigned CurVReg = VRegs[i]; + Register CurVReg = VRegs[i]; ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)}; setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); @@ -367,7 +367,7 @@ setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F); bool Split = false; LLT Ty = MRI.getType(VRegs[i]); - unsigned Dst = VRegs[i]; + Register Dst = VRegs[i]; splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv(), [&](unsigned Reg, uint64_t Offset) { @@ -436,7 +436,7 @@ SmallVector SplitArgs; for (auto &OrigArg : OrigArgs) { splitToValueTypes(OrigArg, SplitArgs, DL, MRI, CallConv, - [&](unsigned Reg, uint64_t Offset) { + [&](Register Reg, uint64_t Offset) { MIRBuilder.buildExtract(Reg, OrigArg.Reg, Offset); }); // AAPCS requires that we zero-extend i1 to 8 bits by the caller. @@ -512,7 +512,7 @@ if (SwiftErrorVReg) { MIB.addDef(AArch64::X21, RegState::Implicit); - MIRBuilder.buildCopy(SwiftErrorVReg, AArch64::X21); + MIRBuilder.buildCopy(SwiftErrorVReg, Register(AArch64::X21)); } CallSeqStart.addImm(Handler.StackSize).addImm(0); Index: lib/Target/AArch64/AArch64InstructionSelector.cpp =================================================================== --- lib/Target/AArch64/AArch64InstructionSelector.cpp +++ lib/Target/AArch64/AArch64InstructionSelector.cpp @@ -74,7 +74,7 @@ // returned via 'Dst'. MachineInstr *emitScalarToVector(unsigned EltSize, const TargetRegisterClass *DstRC, - unsigned Scalar, + Register Scalar, MachineIRBuilder &MIRBuilder) const; /// Emit a lane insert into \p DstReg, or a new vector register if None is @@ -83,8 +83,8 @@ /// The lane inserted into is defined by \p LaneIdx. The vector source /// register is given by \p SrcReg. The register containing the element is /// given by \p EltReg. - MachineInstr *emitLaneInsert(Optional DstReg, unsigned SrcReg, - unsigned EltReg, unsigned LaneIdx, + MachineInstr *emitLaneInsert(Optional DstReg, Register SrcReg, + Register EltReg, unsigned LaneIdx, const RegisterBank &RB, MachineIRBuilder &MIRBuilder) const; bool selectInsertElt(MachineInstr &I, MachineRegisterInfo &MRI) const; @@ -110,12 +110,12 @@ MachineIRBuilder &MIRBuilder) const; // Emit a vector concat operation. - MachineInstr *emitVectorConcat(Optional Dst, unsigned Op1, - unsigned Op2, + MachineInstr *emitVectorConcat(Optional Dst, Register Op1, + Register Op2, MachineIRBuilder &MIRBuilder) const; - MachineInstr *emitExtractVectorElt(Optional DstReg, + MachineInstr *emitExtractVectorElt(Optional DstReg, const RegisterBank &DstRB, LLT ScalarTy, - unsigned VecReg, unsigned LaneIdx, + Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const; /// Helper function for selecting G_FCONSTANT. If the G_FCONSTANT can be @@ -125,7 +125,7 @@ MachineRegisterInfo &MRI) const; /// Emit a CSet for a compare. - MachineInstr *emitCSetForICMP(unsigned DefReg, unsigned Pred, + MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred, MachineIRBuilder &MIRBuilder) const; ComplexRendererFns selectArithImmed(MachineOperand &Root) const; @@ -858,7 +858,7 @@ bool AArch64InstructionSelector::selectCompareBranch( MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { - const unsigned CondReg = I.getOperand(0).getReg(); + const Register CondReg = I.getOperand(0).getReg(); MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); MachineInstr *CCMI = MRI.getVRegDef(CondReg); if (CCMI->getOpcode() == TargetOpcode::G_TRUNC) @@ -866,8 +866,8 @@ if (CCMI->getOpcode() != TargetOpcode::G_ICMP) return false; - unsigned LHS = CCMI->getOperand(2).getReg(); - unsigned RHS = CCMI->getOperand(3).getReg(); + Register LHS = CCMI->getOperand(2).getReg(); + Register RHS = CCMI->getOperand(3).getReg(); if (!getConstantVRegVal(RHS, MRI)) std::swap(RHS, LHS); @@ -904,10 +904,10 @@ bool AArch64InstructionSelector::selectVectorSHL( MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_SHL); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const LLT Ty = MRI.getType(DstReg); - unsigned Src1Reg = I.getOperand(1).getReg(); - unsigned Src2Reg = I.getOperand(2).getReg(); + Register Src1Reg = I.getOperand(1).getReg(); + Register Src2Reg = I.getOperand(2).getReg(); if (!Ty.isVector()) return false; @@ -932,10 +932,10 @@ bool AArch64InstructionSelector::selectVectorASHR( MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_ASHR); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const LLT Ty = MRI.getType(DstReg); - unsigned Src1Reg = I.getOperand(1).getReg(); - unsigned Src2Reg = I.getOperand(2).getReg(); + Register Src1Reg = I.getOperand(1).getReg(); + Register Src2Reg = I.getOperand(2).getReg(); if (!Ty.isVector()) return false; @@ -977,9 +977,9 @@ bool AArch64InstructionSelector::selectVaStartDarwin( MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const { AArch64FunctionInfo *FuncInfo = MF.getInfo(); - unsigned ListReg = I.getOperand(0).getReg(); + Register ListReg = I.getOperand(0).getReg(); - unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); + Register ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri)) @@ -1033,7 +1033,7 @@ constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI); return DstReg; }; - unsigned DstReg = BuildMovK(MovZ.getReg(0), + Register DstReg = BuildMovK(MovZ.getReg(0), AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0); DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0); BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg()); @@ -1058,7 +1058,7 @@ return constrainSelectedInstRegOperands(I, TII, TRI, RBI); if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) { - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); const LLT DefTy = MRI.getType(DefReg); const TargetRegisterClass *DefRC = nullptr; @@ -1119,7 +1119,7 @@ return false; } - const unsigned CondReg = I.getOperand(0).getReg(); + const Register CondReg = I.getOperand(0).getReg(); MachineBasicBlock *DestMBB = I.getOperand(1).getMBB(); // Speculation tracking/SLH assumes that optimized TB(N)Z/CB(N)Z @@ -1161,7 +1161,7 @@ case TargetOpcode::G_BSWAP: { // Handle vector types for G_BSWAP directly. - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); // We should only get vector types here; everything else is handled by the @@ -1204,7 +1204,7 @@ const LLT s64 = LLT::scalar(64); const LLT p0 = LLT::pointer(0, 64); - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); const LLT DefTy = MRI.getType(DefReg); const unsigned DefSize = DefTy.getSizeInBits(); const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); @@ -1261,7 +1261,7 @@ return true; // Nope. Emit a copy and use a normal mov instead. - const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC); + const Register DefGPRReg = MRI.createVirtualRegister(&GPRRC); MachineOperand &RegOp = I.getOperand(0); RegOp.setReg(DefGPRReg); MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); @@ -1308,7 +1308,7 @@ return constrainSelectedInstRegOperands(I, TII, TRI, RBI); } - unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); + Register DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); MIB.buildInstr(TargetOpcode::COPY, {I.getOperand(0).getReg()}, {}) .addReg(DstReg, 0, AArch64::sub_32); @@ -1340,7 +1340,7 @@ return constrainSelectedInstRegOperands(I, TII, TRI, RBI); } - unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); + Register SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); BuildMI(MBB, I.getIterator(), I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) .addDef(SrcReg) @@ -1418,7 +1418,7 @@ } unsigned MemSizeInBits = MemOp.getSize() * 8; - const unsigned PtrReg = I.getOperand(1).getReg(); + const Register PtrReg = I.getOperand(1).getReg(); #ifndef NDEBUG const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI); // Sanity-check the pointer register. @@ -1428,7 +1428,7 @@ "Load/Store pointer operand isn't a pointer"); #endif - const unsigned ValReg = I.getOperand(0).getReg(); + const Register ValReg = I.getOperand(0).getReg(); const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI); const unsigned NewOpc = @@ -1479,8 +1479,8 @@ return false; // If we have a ZEXTLOAD then change the load's type to be a narrower reg //and zero_extend with SUBREG_TO_REG. - unsigned LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); - unsigned DstReg = I.getOperand(0).getReg(); + Register LdReg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); + Register DstReg = I.getOperand(0).getReg(); I.getOperand(0).setReg(LdReg); MIB.setInsertPt(MIB.getMBB(), std::next(I.getIterator())); @@ -1501,7 +1501,7 @@ if (unsupportedBinOp(I, RBI, MRI, TRI)) return false; - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); if (RB.getID() != AArch64::GPRRegBankID) { @@ -1546,7 +1546,7 @@ const unsigned OpSize = Ty.getSizeInBits(); - const unsigned DefReg = I.getOperand(0).getReg(); + const Register DefReg = I.getOperand(0).getReg(); const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize); @@ -1591,7 +1591,7 @@ // this case, we want to increment when carry is set. auto CsetMI = MIRBuilder .buildInstr(AArch64::CSINCWr, {I.getOperand(1).getReg()}, - {AArch64::WZR, AArch64::WZR}) + {Register(AArch64::WZR), Register(AArch64::WZR)}) .addImm(getInvertedCondCode(AArch64CC::HS)); constrainSelectedInstRegOperands(*CsetMI, TII, TRI, RBI); I.eraseFromParent(); @@ -1614,8 +1614,8 @@ const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); const LLT SrcTy = MRI.getType(I.getOperand(1).getReg()); - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI); @@ -1672,8 +1672,8 @@ } case TargetOpcode::G_ANYEXT: { - const unsigned DstReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DstReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI); if (RBDst.getID() != AArch64::GPRRegBankID) { @@ -1704,7 +1704,7 @@ // At this point G_ANYEXT is just like a plain COPY, but we need // to explicitly form the 64-bit value if any. if (DstSize > 32) { - unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass); + Register ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass); BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) .addDef(ExtSrc) .addImm(0) @@ -1721,8 +1721,8 @@ const LLT DstTy = MRI.getType(I.getOperand(0).getReg()), SrcTy = MRI.getType(I.getOperand(1).getReg()); const bool isSigned = Opcode == TargetOpcode::G_SEXT; - const unsigned DefReg = I.getOperand(0).getReg(); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register DefReg = I.getOperand(0).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI); if (RB.getID() != AArch64::GPRRegBankID) { @@ -1740,7 +1740,7 @@ return false; } - const unsigned SrcXReg = + const Register SrcXReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass); BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG)) .addDef(SrcXReg) @@ -1808,9 +1808,9 @@ return false; } - const unsigned CondReg = I.getOperand(1).getReg(); - const unsigned TReg = I.getOperand(2).getReg(); - const unsigned FReg = I.getOperand(3).getReg(); + const Register CondReg = I.getOperand(1).getReg(); + const Register TReg = I.getOperand(2).getReg(); + const Register FReg = I.getOperand(3).getReg(); // If we have a floating-point result, then we should use a floating point // select instead of an integer select. @@ -1820,7 +1820,7 @@ if (IsFP && tryOptSelect(I)) return true; - unsigned CSelOpc = selectSelectOpc(I, MRI, RBI); + Register CSelOpc = selectSelectOpc(I, MRI, RBI); MachineInstr &TstMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri)) .addDef(AArch64::WZR) @@ -1850,7 +1850,7 @@ } unsigned CmpOpc = 0; - unsigned ZReg = 0; + Register ZReg; // Check if this compare can be represented as a cmn, and perform any // necessary transformations to do so. @@ -1921,8 +1921,8 @@ if (CmpOpc != AArch64::FCMPSri && CmpOpc != AArch64::FCMPDri) CmpMI = CmpMI.addUse(I.getOperand(3).getReg()); - const unsigned DefReg = I.getOperand(0).getReg(); - unsigned Def1Reg = DefReg; + const Register DefReg = I.getOperand(0).getReg(); + Register Def1Reg = DefReg; if (CC2 != AArch64CC::AL) Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); @@ -1934,7 +1934,7 @@ .addImm(getInvertedCondCode(CC1)); if (CC2 != AArch64CC::AL) { - unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); + Register Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass); MachineInstr &CSet2MI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr)) .addDef(Def2Reg) @@ -1965,7 +1965,7 @@ case TargetOpcode::G_IMPLICIT_DEF: { I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); - const unsigned DstReg = I.getOperand(0).getReg(); + const Register DstReg = I.getOperand(0).getReg(); const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI); const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(DstTy, DstRB, RBI); @@ -2125,10 +2125,10 @@ bool AArch64InstructionSelector::selectVectorICmp( MachineInstr &I, MachineRegisterInfo &MRI) const { - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); LLT DstTy = MRI.getType(DstReg); - unsigned SrcReg = I.getOperand(2).getReg(); - unsigned Src2Reg = I.getOperand(3).getReg(); + Register SrcReg = I.getOperand(2).getReg(); + Register Src2Reg = I.getOperand(3).getReg(); LLT SrcTy = MRI.getType(SrcReg); unsigned SrcEltSize = SrcTy.getElementType().getSizeInBits(); @@ -2296,7 +2296,7 @@ } MachineInstr *AArch64InstructionSelector::emitScalarToVector( - unsigned EltSize, const TargetRegisterClass *DstRC, unsigned Scalar, + unsigned EltSize, const TargetRegisterClass *DstRC, Register Scalar, MachineIRBuilder &MIRBuilder) const { auto Undef = MIRBuilder.buildInstr(TargetOpcode::IMPLICIT_DEF, {DstRC}, {}); @@ -2339,14 +2339,14 @@ return false; auto *DstRC = &AArch64::GPR64RegClass; - unsigned SubToRegDef = MRI.createVirtualRegister(DstRC); + Register SubToRegDef = MRI.createVirtualRegister(DstRC); MachineInstr &SubRegMI = *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::SUBREG_TO_REG)) .addDef(SubToRegDef) .addImm(0) .addUse(I.getOperand(1).getReg()) .addImm(AArch64::sub_32); - unsigned SubToRegDef2 = MRI.createVirtualRegister(DstRC); + Register SubToRegDef2 = MRI.createVirtualRegister(DstRC); // Need to anyext the second scalar before we can use bfm MachineInstr &SubRegMI2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::SUBREG_TO_REG)) @@ -2394,8 +2394,8 @@ } MachineInstr *AArch64InstructionSelector::emitExtractVectorElt( - Optional DstReg, const RegisterBank &DstRB, LLT ScalarTy, - unsigned VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const { + Optional DstReg, const RegisterBank &DstRB, LLT ScalarTy, + Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const { MachineRegisterInfo &MRI = *MIRBuilder.getMRI(); unsigned CopyOpc = 0; unsigned ExtractSubReg = 0; @@ -2422,7 +2422,7 @@ } // The register that we're going to copy into. - unsigned InsertReg = VecReg; + Register InsertReg = VecReg; if (!DstReg) DstReg = MRI.createVirtualRegister(DstRC); // If the lane index is 0, we just use a subregister COPY. @@ -2457,9 +2457,9 @@ MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT && "unexpected opcode!"); - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const LLT NarrowTy = MRI.getType(DstReg); - const unsigned SrcReg = I.getOperand(1).getReg(); + const Register SrcReg = I.getOperand(1).getReg(); const LLT WideTy = MRI.getType(SrcReg); (void)WideTy; assert(WideTy.getSizeInBits() >= NarrowTy.getSizeInBits() && @@ -2496,7 +2496,7 @@ bool AArch64InstructionSelector::selectSplitVectorUnmerge( MachineInstr &I, MachineRegisterInfo &MRI) const { unsigned NumElts = I.getNumOperands() - 1; - unsigned SrcReg = I.getOperand(NumElts).getReg(); + Register SrcReg = I.getOperand(NumElts).getReg(); const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); const LLT SrcTy = MRI.getType(SrcReg); @@ -2513,7 +2513,7 @@ const RegisterBank &DstRB = *RBI.getRegBank(I.getOperand(0).getReg(), MRI, TRI); for (unsigned OpIdx = 0; OpIdx < NumElts; ++OpIdx) { - unsigned Dst = I.getOperand(OpIdx).getReg(); + Register Dst = I.getOperand(OpIdx).getReg(); MachineInstr *Extract = emitExtractVectorElt(Dst, DstRB, NarrowTy, SrcReg, OpIdx, MIB); if (!Extract) @@ -2541,7 +2541,7 @@ // The last operand is the vector source register, and every other operand is // a register to unpack into. unsigned NumElts = I.getNumOperands() - 1; - unsigned SrcReg = I.getOperand(NumElts).getReg(); + Register SrcReg = I.getOperand(NumElts).getReg(); const LLT NarrowTy = MRI.getType(I.getOperand(0).getReg()); const LLT WideTy = MRI.getType(SrcReg); (void)WideTy; @@ -2565,7 +2565,7 @@ MachineBasicBlock &MBB = *I.getParent(); // Stores the registers we'll be copying from. - SmallVector InsertRegs; + SmallVector InsertRegs; // We'll use the first register twice, so we only need NumElts-1 registers. unsigned NumInsertRegs = NumElts - 1; @@ -2574,18 +2574,18 @@ // directly. Otherwise, we need to do a bit of setup with some subregister // inserts. if (NarrowTy.getSizeInBits() * NumElts == 128) { - InsertRegs = SmallVector(NumInsertRegs, SrcReg); + InsertRegs = SmallVector(NumInsertRegs, SrcReg); } else { // No. We have to perform subregister inserts. For each insert, create an // implicit def and a subregister insert, and save the register we create. for (unsigned Idx = 0; Idx < NumInsertRegs; ++Idx) { - unsigned ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); + Register ImpDefReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); MachineInstr &ImpDefMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::IMPLICIT_DEF), ImpDefReg); // Now, create the subregister insert from SrcReg. - unsigned InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); + Register InsertReg = MRI.createVirtualRegister(&AArch64::FPR128RegClass); MachineInstr &InsMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(TargetOpcode::INSERT_SUBREG), InsertReg) @@ -2605,15 +2605,15 @@ // create the copies. // // Perform the first copy separately as a subregister copy. - unsigned CopyTo = I.getOperand(0).getReg(); + Register CopyTo = I.getOperand(0).getReg(); auto FirstCopy = MIB.buildInstr(TargetOpcode::COPY, {CopyTo}, {}) .addReg(InsertRegs[0], 0, ExtractSubReg); constrainSelectedInstRegOperands(*FirstCopy, TII, TRI, RBI); // Now, perform the remaining copies as vector lane copies. unsigned LaneIdx = 1; - for (unsigned InsReg : InsertRegs) { - unsigned CopyTo = I.getOperand(LaneIdx).getReg(); + for (Register InsReg : InsertRegs) { + Register CopyTo = I.getOperand(LaneIdx).getReg(); MachineInstr &CopyInst = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CopyOpc), CopyTo) .addUse(InsReg) @@ -2641,9 +2641,9 @@ MachineInstr &I, MachineRegisterInfo &MRI) const { assert(I.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && "Unexpected opcode"); - unsigned Dst = I.getOperand(0).getReg(); - unsigned Op1 = I.getOperand(1).getReg(); - unsigned Op2 = I.getOperand(2).getReg(); + Register Dst = I.getOperand(0).getReg(); + Register Op1 = I.getOperand(1).getReg(); + Register Op2 = I.getOperand(2).getReg(); MachineIRBuilder MIRBuilder(I); MachineInstr *ConcatMI = emitVectorConcat(Dst, Op1, Op2, MIRBuilder); if (!ConcatMI) @@ -2759,7 +2759,7 @@ } MachineInstr *AArch64InstructionSelector::emitVectorConcat( - Optional Dst, unsigned Op1, unsigned Op2, + Optional Dst, Register Op1, Register Op2, MachineIRBuilder &MIRBuilder) const { // We implement a vector concat by: // 1. Use scalar_to_vector to insert the lower vector into the larger dest @@ -2852,14 +2852,14 @@ } MachineInstr * -AArch64InstructionSelector::emitCSetForICMP(unsigned DefReg, unsigned Pred, +AArch64InstructionSelector::emitCSetForICMP(Register DefReg, unsigned Pred, MachineIRBuilder &MIRBuilder) const { // CSINC increments the result when the predicate is false. Invert it. const AArch64CC::CondCode InvCC = changeICMPPredToAArch64CC( CmpInst::getInversePredicate((CmpInst::Predicate)Pred)); auto I = MIRBuilder - .buildInstr(AArch64::CSINCWr, {DefReg}, {AArch64::WZR, AArch64::WZR}) + .buildInstr(AArch64::CSINCWr, {DefReg}, {Register(AArch64::WZR), Register(AArch64::WZR)}) .addImm(InvCC); constrainSelectedInstRegOperands(*I, TII, TRI, RBI); return &*I; @@ -2963,7 +2963,7 @@ // cmn z, y // Helper lambda to find the def. - auto FindDef = [&](unsigned VReg) { + auto FindDef = [&](Register VReg) { MachineInstr *Def = MRI.getVRegDef(VReg); while (Def) { if (Def->getOpcode() != TargetOpcode::COPY) @@ -3043,7 +3043,7 @@ (MRI.getType(I.getOperand(2).getReg()).getSizeInBits() == 32); auto ImmFns = selectArithImmed(I.getOperand(3)); unsigned Opc = OpcTable[Is32Bit][ImmFns.hasValue()]; - unsigned ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; + Register ZReg = Is32Bit ? AArch64::WZR : AArch64::XZR; auto CmpMI = MIRBuilder.buildInstr(Opc, {ZReg}, {I.getOperand(2).getReg()}); @@ -3097,7 +3097,7 @@ if (!UndefMI) return false; // Match the scalar being splatted. - unsigned ScalarReg = InsMI->getOperand(2).getReg(); + Register ScalarReg = InsMI->getOperand(2).getReg(); const RegisterBank *ScalarRB = RBI.getRegBank(ScalarReg, MRI, TRI); // Match the index constant 0. int64_t Index = 0; @@ -3158,9 +3158,9 @@ if (tryOptVectorShuffle(I)) return true; const LLT DstTy = MRI.getType(I.getOperand(0).getReg()); - unsigned Src1Reg = I.getOperand(1).getReg(); + Register Src1Reg = I.getOperand(1).getReg(); const LLT Src1Ty = MRI.getType(Src1Reg); - unsigned Src2Reg = I.getOperand(2).getReg(); + Register Src2Reg = I.getOperand(2).getReg(); const LLT Src2Ty = MRI.getType(Src2Reg); MachineBasicBlock &MBB = *I.getParent(); @@ -3254,7 +3254,7 @@ } MachineInstr *AArch64InstructionSelector::emitLaneInsert( - Optional DstReg, unsigned SrcReg, unsigned EltReg, + Optional DstReg, Register SrcReg, Register EltReg, unsigned LaneIdx, const RegisterBank &RB, MachineIRBuilder &MIRBuilder) const { MachineInstr *InsElt = nullptr; @@ -3289,12 +3289,12 @@ assert(I.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT); // Get information on the destination. - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); const LLT DstTy = MRI.getType(DstReg); unsigned VecSize = DstTy.getSizeInBits(); // Get information on the element we want to insert into the destination. - unsigned EltReg = I.getOperand(2).getReg(); + Register EltReg = I.getOperand(2).getReg(); const LLT EltTy = MRI.getType(EltReg); unsigned EltSize = EltTy.getSizeInBits(); if (EltSize < 16 || EltSize > 64) @@ -3302,14 +3302,14 @@ // Find the definition of the index. Bail out if it's not defined by a // G_CONSTANT. - unsigned IdxReg = I.getOperand(3).getReg(); + Register IdxReg = I.getOperand(3).getReg(); auto VRegAndVal = getConstantVRegValWithLookThrough(IdxReg, MRI); if (!VRegAndVal) return false; unsigned LaneIdx = VRegAndVal->Value; // Perform the lane insert. - unsigned SrcReg = I.getOperand(1).getReg(); + Register SrcReg = I.getOperand(1).getReg(); const RegisterBank &EltRB = *RBI.getRegBank(EltReg, MRI, TRI); MachineIRBuilder MIRBuilder(I); @@ -3332,7 +3332,7 @@ if (VecSize < 128) { // If we had to widen to perform the insert, then we have to demote back to // the original size to get the result we want. - unsigned DemoteVec = InsMI->getOperand(0).getReg(); + Register DemoteVec = InsMI->getOperand(0).getReg(); const TargetRegisterClass *RC = getMinClassForRegBank(*RBI.getRegBank(DemoteVec, MRI, TRI), VecSize); if (RC != &AArch64::FPR32RegClass && RC != &AArch64::FPR64RegClass) { @@ -3380,7 +3380,7 @@ if (!ScalarToVec) return false; - unsigned DstVec = ScalarToVec->getOperand(0).getReg(); + Register DstVec = ScalarToVec->getOperand(0).getReg(); unsigned DstSize = DstTy.getSizeInBits(); // Keep track of the last MI we inserted. Later on, we might be able to save @@ -3416,8 +3416,8 @@ return false; } - unsigned Reg = MRI.createVirtualRegister(RC); - unsigned DstReg = I.getOperand(0).getReg(); + Register Reg = MRI.createVirtualRegister(RC); + Register DstReg = I.getOperand(0).getReg(); MIRBuilder.buildInstr(TargetOpcode::COPY, {DstReg}, {}) .addReg(DstVec, 0, SubReg); @@ -3478,17 +3478,17 @@ MIRBuilder.buildInstr(AArch64::BRK, {}, {}).addImm(1); break; case Intrinsic::aarch64_stlxr: - unsigned StatReg = I.getOperand(0).getReg(); + Register StatReg = I.getOperand(0).getReg(); assert(RBI.getSizeInBits(StatReg, MRI, TRI) == 32 && "Status register must be 32 bits!"); - unsigned SrcReg = I.getOperand(2).getReg(); + Register SrcReg = I.getOperand(2).getReg(); if (RBI.getSizeInBits(SrcReg, MRI, TRI) != 64) { LLVM_DEBUG(dbgs() << "Only support 64-bit sources right now.\n"); return false; } - unsigned PtrReg = I.getOperand(3).getReg(); + Register PtrReg = I.getOperand(3).getReg(); assert(MRI.getType(PtrReg).isPointer() && "Expected pointer operand"); // Expect only one memory operand. @@ -3520,8 +3520,8 @@ default: break; case Intrinsic::aarch64_crypto_sha1h: - unsigned DstReg = I.getOperand(0).getReg(); - unsigned SrcReg = I.getOperand(2).getReg(); + Register DstReg = I.getOperand(0).getReg(); + Register SrcReg = I.getOperand(2).getReg(); // FIXME: Should this be an assert? if (MRI.getType(DstReg).getSizeInBits() != 32 || Index: lib/Target/AMDGPU/AMDGPUCallLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUCallLowering.h +++ lib/Target/AMDGPU/AMDGPUCallLowering.h @@ -22,7 +22,7 @@ class AMDGPUTargetLowering; class AMDGPUCallLowering: public CallLowering { - unsigned lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy, + Register lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy, uint64_t Offset) const; void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy, Index: lib/Target/AMDGPU/AMDGPUCallLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUCallLowering.cpp +++ lib/Target/AMDGPU/AMDGPUCallLowering.cpp @@ -37,17 +37,17 @@ MachineInstrBuilder MIB; - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { llvm_unreachable("not implemented"); } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { llvm_unreachable("not implemented"); } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { MIB.addUse(PhysReg); MIRBuilder.buildCopy(PhysReg, ValVReg); @@ -111,7 +111,7 @@ return true; } -unsigned AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, +Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &MIRBuilder, Type *ParamTy, uint64_t Offset) const { @@ -122,12 +122,12 @@ const DataLayout &DL = F.getParent()->getDataLayout(); PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS); LLT PtrType = getLLTForType(*PtrTy, DL); - unsigned DstReg = MRI.createGenericVirtualRegister(PtrType); - unsigned KernArgSegmentPtr = + Register DstReg = MRI.createGenericVirtualRegister(PtrType); + Register KernArgSegmentPtr = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); - unsigned KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); + Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr); - unsigned OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); + Register OffsetReg = MRI.createGenericVirtualRegister(LLT::scalar(64)); MIRBuilder.buildConstant(OffsetReg, Offset); MIRBuilder.buildGEP(DstReg, KernArgSegmentVReg, OffsetReg); @@ -156,7 +156,7 @@ MIRBuilder.buildLoad(DstReg, PtrReg, *MMO); } -static unsigned findFirstFreeSGPR(CCState &CCInfo) { +static Register findFirstFreeSGPR(CCState &CCInfo) { unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs(); for (unsigned Reg = 0; Reg < NumSGPRs; ++Reg) { if (!CCInfo.isAllocated(AMDGPU::SGPR0 + Reg)) { @@ -215,27 +215,27 @@ // FIXME: How should these inputs interact with inreg / custom SGPR inputs? if (Info->hasPrivateSegmentBuffer()) { - unsigned PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); + Register PrivateSegmentBufferReg = Info->addPrivateSegmentBuffer(*TRI); MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SReg_128RegClass); CCInfo.AllocateReg(PrivateSegmentBufferReg); } if (Info->hasDispatchPtr()) { - unsigned DispatchPtrReg = Info->addDispatchPtr(*TRI); + Register DispatchPtrReg = Info->addDispatchPtr(*TRI); // FIXME: Need to add reg as live-in CCInfo.AllocateReg(DispatchPtrReg); } if (Info->hasQueuePtr()) { - unsigned QueuePtrReg = Info->addQueuePtr(*TRI); + Register QueuePtrReg = Info->addQueuePtr(*TRI); // FIXME: Need to add reg as live-in CCInfo.AllocateReg(QueuePtrReg); } if (Info->hasKernargSegmentPtr()) { - unsigned InputPtrReg = Info->addKernargSegmentPtr(*TRI); + Register InputPtrReg = Info->addKernargSegmentPtr(*TRI); const LLT P2 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64); - unsigned VReg = MRI.createGenericVirtualRegister(P2); + Register VReg = MRI.createGenericVirtualRegister(P2); MRI.addLiveIn(InputPtrReg, VReg); MIRBuilder.getMBB().addLiveIn(InputPtrReg); MIRBuilder.buildCopy(VReg, InputPtrReg); Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -836,12 +836,12 @@ auto SegmentNull = MIRBuilder.buildConstant(DstTy, NullVal); auto FlatNull = MIRBuilder.buildConstant(SrcTy, 0); - unsigned PtrLo32 = MRI.createGenericVirtualRegister(DstTy); + Register PtrLo32 = MRI.createGenericVirtualRegister(DstTy); // Extract low 32-bits of the pointer. MIRBuilder.buildExtract(PtrLo32, Src, 0); - unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, FlatNull.getReg(0)); MIRBuilder.buildSelect(Dst, CmpRes, PtrLo32, SegmentNull.getReg(0)); @@ -857,15 +857,15 @@ auto FlatNull = MIRBuilder.buildConstant(DstTy, TM.getNullPointerValue(DestAS)); - unsigned ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder); + Register ApertureReg = getSegmentAperture(DestAS, MRI, MIRBuilder); - unsigned CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1)); + Register CmpRes = MRI.createGenericVirtualRegister(LLT::scalar(1)); MIRBuilder.buildICmp(CmpInst::ICMP_NE, CmpRes, Src, SegmentNull.getReg(0)); - unsigned BuildPtr = MRI.createGenericVirtualRegister(DstTy); + Register BuildPtr = MRI.createGenericVirtualRegister(DstTy); // Coerce the type of the low half of the result so we can use merge_values. - unsigned SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32)); + Register SrcAsInt = MRI.createGenericVirtualRegister(LLT::scalar(32)); MIRBuilder.buildInstr(TargetOpcode::G_PTRTOINT) .addDef(SrcAsInt) .addUse(Src); @@ -884,7 +884,7 @@ MachineIRBuilder &MIRBuilder) const { MIRBuilder.setInstr(MI); - unsigned Src = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(1).getReg(); LLT Ty = MRI.getType(Src); assert(Ty.isScalar() && Ty.getSizeInBits() == 64); @@ -914,7 +914,7 @@ const LLT S1 = LLT::scalar(1); const LLT S64 = LLT::scalar(64); - unsigned Src = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(1).getReg(); assert(MRI.getType(Src) == S64); // result = trunc(src) @@ -960,12 +960,12 @@ const LLT S32 = LLT::scalar(32); const LLT S64 = LLT::scalar(64); - unsigned Src = MI.getOperand(1).getReg(); + Register Src = MI.getOperand(1).getReg(); assert(MRI.getType(Src) == S64); // TODO: Should this use extract since the low half is unused? auto Unmerge = B.buildUnmerge({S32, S32}, Src); - unsigned Hi = Unmerge.getReg(1); + Register Hi = Unmerge.getReg(1); // Extract the upper half, since this is where we will find the sign and // exponent. @@ -1002,8 +1002,8 @@ MachineIRBuilder &B, bool Signed) const { B.setInstr(MI); - unsigned Dst = MI.getOperand(0).getReg(); - unsigned Src = MI.getOperand(1).getReg(); + Register Dst = MI.getOperand(0).getReg(); + Register Src = MI.getOperand(1).getReg(); const LLT S64 = LLT::scalar(64); const LLT S32 = LLT::scalar(32); Index: lib/Target/ARM/ARMCallLowering.cpp =================================================================== --- lib/Target/ARM/ARMCallLowering.cpp +++ lib/Target/ARM/ARMCallLowering.cpp @@ -90,27 +90,27 @@ MachineInstrBuilder &MIB, CCAssignFn *AssignFn) : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); - unsigned SPReg = MRI.createGenericVirtualRegister(p0); - MIRBuilder.buildCopy(SPReg, ARM::SP); + Register SPReg = MRI.createGenericVirtualRegister(p0); + MIRBuilder.buildCopy(SPReg, Register(ARM::SP)); - unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); + Register OffsetReg = MRI.createGenericVirtualRegister(s32); MIRBuilder.buildConstant(OffsetReg, Offset); - unsigned AddrReg = MRI.createGenericVirtualRegister(p0); + Register AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); return AddrReg; } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { assert(VA.isRegLoc() && "Value shouldn't be assigned to reg"); assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?"); @@ -118,17 +118,17 @@ assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size"); assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size"); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildCopy(PhysReg, ExtReg); MIB.addUse(PhysReg, RegState::Implicit); } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(), /* Alignment */ 1); @@ -298,7 +298,7 @@ bool isArgumentHandler() const override { return true; } - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); @@ -315,7 +315,7 @@ return AddrReg; } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) && "Unsupported size"); @@ -336,14 +336,14 @@ } } - void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment, + void buildLoad(Register Val, Register Addr, uint64_t Size, unsigned Alignment, MachinePointerInfo &MPO) { auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOLoad, Size, Alignment); MIRBuilder.buildLoad(Val, Addr, *MMO); } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { assert(VA.isRegLoc() && "Value shouldn't be assigned to reg"); assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?"); Index: lib/Target/ARM/ARMLegalizerInfo.cpp =================================================================== --- lib/Target/ARM/ARMLegalizerInfo.cpp +++ lib/Target/ARM/ARMLegalizerInfo.cpp @@ -414,7 +414,7 @@ auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); auto *RetTy = Type::getInt32Ty(Ctx); - SmallVector Results; + SmallVector Results; for (auto Libcall : Libcalls) { auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32)); auto Status = Index: lib/Target/Mips/MipsCallLowering.h =================================================================== --- lib/Target/Mips/MipsCallLowering.h +++ lib/Target/Mips/MipsCallLowering.h @@ -45,7 +45,7 @@ private: bool assign(Register VReg, const CCValAssign &VA, const EVT &VT); - virtual unsigned getStackAddress(const CCValAssign &VA, + virtual Register getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) = 0; virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA, Index: lib/Target/Mips/MipsCallLowering.cpp =================================================================== --- lib/Target/Mips/MipsCallLowering.cpp +++ lib/Target/Mips/MipsCallLowering.cpp @@ -93,7 +93,7 @@ void assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) override; - unsigned getStackAddress(const CCValAssign &VA, + Register getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) override; void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; @@ -134,7 +134,7 @@ const EVT &VT) { const MipsSubtarget &STI = static_cast(MIRBuilder.getMF().getSubtarget()); - unsigned PhysReg = VA.getLocReg(); + Register PhysReg = VA.getLocReg(); if (VT == MVT::f64 && PhysReg >= Mips::A0 && PhysReg <= Mips::A3) { const MipsSubtarget &STI = static_cast(MIRBuilder.getMF().getSubtarget()); @@ -173,7 +173,7 @@ } } -unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA, +Register IncomingValueHandler::getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) { MachineFunction &MF = MIRBuilder.getMF(); unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; @@ -188,7 +188,7 @@ unsigned Align = MinAlign(TFL->getStackAlignment(), Offset); MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size, Align); - unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); + Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); MIRBuilder.buildFrameIndex(AddrReg, FI); return AddrReg; @@ -228,7 +228,7 @@ void assignValueToReg(Register ValVReg, const CCValAssign &VA, const EVT &VT) override; - unsigned getStackAddress(const CCValAssign &VA, + Register getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) override; void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override; @@ -237,7 +237,7 @@ ArrayRef ArgLocs, unsigned ArgLocsStartIndex, Register ArgsReg, const EVT &VT) override; - unsigned extendRegister(Register ValReg, const CCValAssign &VA); + Register extendRegister(Register ValReg, const CCValAssign &VA); MachineInstrBuilder &MIB; }; @@ -274,13 +274,13 @@ .constrainAllUses(MIRBuilder.getTII(), *STI.getRegisterInfo(), *STI.getRegBankInfo()); } else { - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildCopy(PhysReg, ExtReg); MIB.addUse(PhysReg, RegState::Implicit); } } -unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA, +Register OutgoingValueHandler::getStackAddress(const CCValAssign &VA, MachineMemOperand *&MMO) { MachineFunction &MF = MIRBuilder.getMF(); const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); @@ -288,7 +288,7 @@ LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); Register SPReg = MRI.createGenericVirtualRegister(p0); - MIRBuilder.buildCopy(SPReg, Mips::SP); + MIRBuilder.buildCopy(SPReg, Register(Mips::SP)); Register OffsetReg = MRI.createGenericVirtualRegister(s32); unsigned Offset = VA.getLocMemOffset(); @@ -310,11 +310,11 @@ const CCValAssign &VA) { MachineMemOperand *MMO; Register Addr = getStackAddress(VA, MMO); - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); MIRBuilder.buildStore(ExtReg, Addr, *MMO); } -unsigned OutgoingValueHandler::extendRegister(Register ValReg, +Register OutgoingValueHandler::extendRegister(Register ValReg, const CCValAssign &VA) { LLT LocTy{VA.getLocVT()}; switch (VA.getLocInfo()) { @@ -530,7 +530,7 @@ Callee.isReg() || IsCalleeGlobalPIC ? Mips::JALRPseudo : Mips::JAL); MIB.addDef(Mips::SP, RegState::Implicit); if (IsCalleeGlobalPIC) { - unsigned CalleeReg = + Register CalleeReg = MF.getRegInfo().createGenericVirtualRegister(LLT::pointer(0, 32)); MachineInstr *CalleeGlobalValue = MIRBuilder.buildGlobalValue(CalleeReg, Callee.getGlobal()); @@ -583,8 +583,8 @@ if (IsCalleeGlobalPIC) { MIRBuilder.buildCopy( - Mips::GP, - MF.getInfo()->getGlobalBaseRegForGlobalISel()); + Register(Mips::GP), + MF.getInfo()->getGlobalBaseRegForGlobalISel()); MIB.addDef(Mips::GP, RegState::Implicit); } MIRBuilder.insertInstr(MIB); Index: lib/Target/Mips/MipsInstructionSelector.cpp =================================================================== --- lib/Target/Mips/MipsInstructionSelector.cpp +++ lib/Target/Mips/MipsInstructionSelector.cpp @@ -38,7 +38,7 @@ private: bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; - bool materialize32BitImm(unsigned DestReg, APInt Imm, + bool materialize32BitImm(Register DestReg, APInt Imm, MachineIRBuilder &B) const; bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const; @@ -80,7 +80,7 @@ bool MipsInstructionSelector::selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const { - unsigned DstReg = I.getOperand(0).getReg(); + Register DstReg = I.getOperand(0).getReg(); if (TargetRegisterInfo::isPhysicalRegister(DstReg)) return true; @@ -104,12 +104,12 @@ return true; } -bool MipsInstructionSelector::materialize32BitImm(unsigned DestReg, APInt Imm, +bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm, MachineIRBuilder &B) const { assert(Imm.getBitWidth() == 32 && "Unsupported immediate size."); // Ori zero extends immediate. Used for values with zeros in high 16 bits. if (Imm.getHiBits(16).isNullValue()) { - MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Mips::ZERO}) + MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)}) .addImm(Imm.getLoBits(16).getLimitedValue()); return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); } @@ -121,12 +121,12 @@ } // ADDiu sign extends immediate. Used for values with 1s in high 17 bits. if (Imm.isSignedIntN(16)) { - MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Mips::ZERO}) + MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)}) .addImm(Imm.getLoBits(16).getLimitedValue()); return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI); } // Values that cannot be materialized with single immediate instruction. - unsigned LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); + Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass); MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {}) .addImm(Imm.getHiBits(16).getLimitedValue()); MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg}) @@ -201,7 +201,7 @@ switch (I.getOpcode()) { case G_UMULH: { - unsigned PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); + Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); MachineInstr *PseudoMULTu, *PseudoMove; PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu)) @@ -242,7 +242,7 @@ break; } case G_PHI: { - const unsigned DestReg = I.getOperand(0).getReg(); + const Register DestReg = I.getOperand(0).getReg(); const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); @@ -257,7 +257,7 @@ case G_LOAD: case G_ZEXTLOAD: case G_SEXTLOAD: { - const unsigned DestReg = I.getOperand(0).getReg(); + const Register DestReg = I.getOperand(0).getReg(); const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID(); const unsigned OpSize = MRI.getType(DestReg).getSizeInBits(); const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize(); @@ -281,7 +281,7 @@ case G_UREM: case G_SDIV: case G_SREM: { - unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); + Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; @@ -328,7 +328,7 @@ unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits(); if (Size == 32) { - unsigned GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); MachineIRBuilder B(I); if (!materialize32BitImm(GPRReg, APImm, B)) return false; @@ -339,8 +339,8 @@ return false; } if (Size == 64) { - unsigned GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); - unsigned GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass); MachineIRBuilder B(I); if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B)) return false; @@ -390,7 +390,7 @@ return false; if (GVal->hasLocalLinkage()) { - unsigned LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass); LWGOT->getOperand(0).setReg(LWGOTDef); MachineInstr *ADDiu = @@ -403,7 +403,7 @@ return false; } } else { - unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi)) .addDef(LUiReg) @@ -426,8 +426,9 @@ } case G_ICMP: { struct Instr { - unsigned Opcode, Def, LHS, RHS; - Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS) + unsigned Opcode; + Register Def, LHS, RHS; + Instr(unsigned Opcode, Register Def, Register LHS, Register RHS) : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; bool hasImm() const { @@ -438,10 +439,10 @@ }; SmallVector Instructions; - unsigned ICMPReg = I.getOperand(0).getReg(); - unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); - unsigned LHS = I.getOperand(2).getReg(); - unsigned RHS = I.getOperand(3).getReg(); + Register ICMPReg = I.getOperand(0).getReg(); + Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); + Register LHS = I.getOperand(2).getReg(); + Register RHS = I.getOperand(3).getReg(); CmpInst::Predicate Cond = static_cast(I.getOperand(1).getPredicate()); Index: lib/Target/Mips/MipsMachineFunction.h =================================================================== --- lib/Target/Mips/MipsMachineFunction.h +++ lib/Target/Mips/MipsMachineFunction.h @@ -32,8 +32,8 @@ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } bool globalBaseRegSet() const; - unsigned getGlobalBaseReg(); - unsigned getGlobalBaseRegForGlobalISel(); + Register getGlobalBaseReg(); + Register getGlobalBaseRegForGlobalISel(); // Insert instructions to initialize the global base register in the // first MBB of the function. Index: lib/Target/Mips/MipsMachineFunction.cpp =================================================================== --- lib/Target/Mips/MipsMachineFunction.cpp +++ lib/Target/Mips/MipsMachineFunction.cpp @@ -44,14 +44,14 @@ return Mips::GPR32RegClass; } -unsigned MipsFunctionInfo::getGlobalBaseReg() { +Register MipsFunctionInfo::getGlobalBaseReg() { if (!GlobalBaseReg) GlobalBaseReg = MF.getRegInfo().createVirtualRegister(&getGlobalBaseRegClass(MF)); return GlobalBaseReg; } -unsigned MipsFunctionInfo::getGlobalBaseRegForGlobalISel() { +Register MipsFunctionInfo::getGlobalBaseRegForGlobalISel() { if (!GlobalBaseReg) { getGlobalBaseReg(); initGlobalBaseReg(); Index: lib/Target/X86/X86CallLowering.cpp =================================================================== --- lib/Target/X86/X86CallLowering.cpp +++ lib/Target/X86/X86CallLowering.cpp @@ -101,28 +101,28 @@ DL(MIRBuilder.getMF().getDataLayout()), STI(MIRBuilder.getMF().getSubtarget()) {} - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { LLT p0 = LLT::pointer(0, DL.getPointerSizeInBits(0)); LLT SType = LLT::scalar(DL.getPointerSizeInBits(0)); - unsigned SPReg = MRI.createGenericVirtualRegister(p0); + Register SPReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildCopy(SPReg, STI.getRegisterInfo()->getStackRegister()); - unsigned OffsetReg = MRI.createGenericVirtualRegister(SType); + Register OffsetReg = MRI.createGenericVirtualRegister(SType); MIRBuilder.buildConstant(OffsetReg, Offset); - unsigned AddrReg = MRI.createGenericVirtualRegister(p0); + Register AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); return AddrReg; } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { MIB.addUse(PhysReg, RegState::Implicit); - unsigned ExtReg; + Register ExtReg; // If we are copying the value to a physical register with the // size larger than the size of the value itself - build AnyExt // to the size of the register first and only then do the copy. @@ -143,9 +143,9 @@ MIRBuilder.buildCopy(PhysReg, ExtReg); } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { - unsigned ExtReg = extendRegister(ValVReg, VA); + Register ExtReg = extendRegister(ValVReg, VA); auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(), /* Alignment */ 1); @@ -230,7 +230,7 @@ bool isArgumentHandler() const override { return true; } - unsigned getStackAddress(uint64_t Size, int64_t Offset, + Register getStackAddress(uint64_t Size, int64_t Offset, MachinePointerInfo &MPO) override { auto &MFI = MIRBuilder.getMF().getFrameInfo(); int FI = MFI.CreateFixedObject(Size, Offset, true); @@ -242,7 +242,7 @@ return AddrReg; } - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, + void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size, MachinePointerInfo &MPO, CCValAssign &VA) override { auto MMO = MIRBuilder.getMF().getMachineMemOperand( MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size, @@ -250,7 +250,7 @@ MIRBuilder.buildLoad(ValVReg, Addr, *MMO); } - void assignValueToReg(unsigned ValVReg, unsigned PhysReg, + void assignValueToReg(Register ValVReg, Register PhysReg, CCValAssign &VA) override { markPhysRegUsed(PhysReg); Index: lib/Target/X86/X86RegisterInfo.h =================================================================== --- lib/Target/X86/X86RegisterInfo.h +++ lib/Target/X86/X86RegisterInfo.h @@ -136,13 +136,13 @@ Register getFrameRegister(const MachineFunction &MF) const override; unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const; unsigned getPtrSizedStackRegister(const MachineFunction &MF) const; - unsigned getStackRegister() const { return StackPtr; } - unsigned getBaseRegister() const { return BasePtr; } + Register getStackRegister() const { return StackPtr; } + Register getBaseRegister() const { return BasePtr; } /// Returns physical register used as frame pointer. /// This will always returns the frame pointer register, contrary to /// getFrameRegister() which returns the "base pointer" in situations /// involving a stack, frame and base pointer. - unsigned getFramePtr() const { return FramePtr; } + Register getFramePtr() const { return FramePtr; } // FIXME: Move to FrameInfok unsigned getSlotSize() const { return SlotSize; } }; Index: unittests/CodeGen/GlobalISel/PatternMatchTest.cpp =================================================================== --- unittests/CodeGen/GlobalISel/PatternMatchTest.cpp +++ unittests/CodeGen/GlobalISel/PatternMatchTest.cpp @@ -149,7 +149,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF); @@ -276,7 +276,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF); @@ -347,7 +347,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF); @@ -403,7 +403,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF); @@ -450,7 +450,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF); @@ -493,7 +493,7 @@ auto ModuleMMIPair = createDummyModule(Context, *TM, ""); MachineFunction *MF = getMFFromMMI(ModuleMMIPair.first.get(), ModuleMMIPair.second.get()); - SmallVector Copies; + SmallVector Copies; collectCopies(Copies, MF); MachineBasicBlock *EntryMBB = &*MF->begin(); MachineIRBuilder B(*MF);