Index: include/llvm/CodeGen/MachineRegisterInfo.h =================================================================== --- include/llvm/CodeGen/MachineRegisterInfo.h +++ include/llvm/CodeGen/MachineRegisterInfo.h @@ -132,6 +132,10 @@ return MF->getSubtarget().getRegisterInfo(); } + const TargetSubtargetInfo &getTargetSubtargetInfo() const { + return MF->getSubtarget(); + } + void resetDelegate(Delegate *delegate) { // Ensure another delegate does not take over unless the current // delegate first unattaches itself. If we ever need to multicast @@ -820,6 +824,26 @@ /// subregisters of the virtual register @p Reg. LaneBitmask getMaxLaneMaskForVReg(unsigned Reg) const; + /// \brief Returns the size of a register from the register class \p RC. + unsigned getRegSize(const TargetRegisterClass *RC) const { + return getTargetRegisterInfo()->getRegSize(RC->getID(), + getTargetSubtargetInfo()); + } + + /// \brief Returns the minimum size of a spill slot for storing a register + /// from the register class \p RC. + unsigned getSpillSize(const TargetRegisterClass *RC) const { + return getTargetRegisterInfo()->getSpillSize(RC->getID(), + getTargetSubtargetInfo()); + } + + /// \brief Returns the minimum alignment of a spill slot for storing a + /// register from the register class \p RC. + unsigned getSpillAlignment(const TargetRegisterClass *RC) const { + return getTargetRegisterInfo()->getSpillAlignment(RC->getID(), + getTargetSubtargetInfo()); + } + /// defusechain_iterator - This class provides iterator support for machine /// operands in the function that use or define a specific register. If /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it Index: include/llvm/CodeGen/StackMaps.h =================================================================== --- include/llvm/CodeGen/StackMaps.h +++ include/llvm/CodeGen/StackMaps.h @@ -21,6 +21,7 @@ class AsmPrinter; class MCExpr; class MCStreamer; +class TargetSubtargetInfo; /// \brief MI-level stackmap operands. /// @@ -271,7 +272,8 @@ /// \brief Create a live-out register record for the given register @p Reg. LiveOutReg createLiveOutReg(unsigned Reg, - const TargetRegisterInfo *TRI) const; + const TargetRegisterInfo *TRI, + const TargetSubtargetInfo &STI) const; /// \brief Parse the register live-out mask and return a vector of live-out /// registers that need to be recorded in the stackmap. Index: include/llvm/MC/MCRegisterInfo.h =================================================================== --- include/llvm/MC/MCRegisterInfo.h +++ include/llvm/MC/MCRegisterInfo.h @@ -26,6 +26,8 @@ /// but not necessarily virtual registers. typedef uint16_t MCPhysReg; +class MCSubtargetInfo; + /// MCRegisterClass - Base class of TargetRegisterClass. class MCRegisterClass { public: @@ -77,14 +79,6 @@ return contains(Reg1) && contains(Reg2); } - /// getSize - Return the size of the register in bytes, which is also the size - /// of a stack slot allocated to hold a spilled copy of this register. - unsigned getSize() const { return RegSize; } - - /// getAlignment - Return the minimum required alignment for a register of - /// this class. - unsigned getAlignment() const { return Alignment; } - /// getCopyCost - Return the cost of copying a value between two registers in /// this class. A negative number means the register class is very expensive /// to copy e.g. status flag register classes. @@ -93,6 +87,12 @@ /// isAllocatable - Return true if this register class may be used to create /// virtual registers. bool isAllocatable() const { return Allocatable; } + +private: + // XXX: Move these functions here to avoid accidental use. + unsigned getSize() const { return RegSize; } + unsigned getAlignment() const { return Alignment; } + friend class MCRegisterInfo; }; /// MCRegisterDesc - This record contains information about a particular @@ -464,6 +464,27 @@ bool isSuperOrSubRegisterEq(unsigned RegA, unsigned RegB) const { return isSubRegisterEq(RegA, RegB) || isSuperRegister(RegA, RegB); } + + /// \brief Returns the size of a register from the register class with + /// id \p RCID. + unsigned getRegSize(uint16_t RCID, const MCSubtargetInfo &STI) const { + // XXX No-op change. + return getRegClass(RCID).getSize(); + } + + /// \brief Returns the minimum size of a spill slot for storing a register + /// from the register class with id \p RCID. + unsigned getSpillSize(uint16_t RCID, const MCSubtargetInfo &STI) const { + // XXX No-op change. + return getRegClass(RCID).getSize(); + } + + /// \brief Returns the minimum alignment of a spill slot for storing a + /// register from the register class with id \p RCID. + unsigned getSpillAlignment(uint16_t RCID, const MCSubtargetInfo &STI) const { + // XXX No-op change. + return getRegClass(RCID).getAlignment(); + } }; //===----------------------------------------------------------------------===// Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -1424,11 +1424,11 @@ /// Return the largest legal super-reg register class of the register class /// for the specified type and its associated "cost". virtual std::pair - findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const; + findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const; /// Once all of the register classes are added, this allows us to compute /// derived properties we expose. - void computeRegisterProperties(const TargetRegisterInfo *TRI); + void computeRegisterProperties(const TargetSubtargetInfo &STI); /// Indicate that the specified operation does not work with the specified /// type and indicate what to do about it. Index: include/llvm/Target/TargetRegisterInfo.h =================================================================== --- include/llvm/Target/TargetRegisterInfo.h +++ include/llvm/Target/TargetRegisterInfo.h @@ -35,6 +35,7 @@ class VirtRegMap; class raw_ostream; class LiveRegMatrix; +class TargetSubtargetInfo; /// A bitmask representing the covering of a register with sub-registers. /// @@ -109,13 +110,6 @@ return MC->contains(Reg1, Reg2); } - /// Return the size of the register in bytes, which is also the size - /// of a stack slot allocated to hold a spilled copy of this register. - unsigned getSize() const { return MC->getSize(); } - - /// Return the minimum required alignment for a register of this class. - unsigned getAlignment() const { return MC->getAlignment(); } - /// Return the cost of copying a value between two registers in this class. /// A negative number means the register class is very expensive /// to copy e.g. status flag register classes. @@ -520,7 +514,8 @@ // subreg index DefSubReg, reading from another source with class SrcRC and // subregister SrcSubReg return true if this is a preferrable copy // instruction or an earlier use should be used. - virtual bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, + virtual bool shouldRewriteCopySrc(const TargetSubtargetInfo &STI, + const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const; @@ -635,6 +630,7 @@ const TargetRegisterClass* getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, const TargetRegisterClass *RCB, unsigned SubB, + const TargetSubtargetInfo &STI, unsigned &PreA, unsigned &PreB) const; //===--------------------------------------------------------------------===// Index: lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp =================================================================== --- lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp +++ lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp @@ -192,8 +192,7 @@ "nop (could not find a dwarf register number)"); // Attempt to find a valid super- or sub-register. - if (!Expr.AddMachineRegPiece(*MF->getSubtarget().getRegisterInfo(), - MLoc.getReg())) + if (!Expr.AddMachineRegPiece(MF->getSubtarget(), MLoc.getReg())) Expr.EmitOp(dwarf::DW_OP_nop, "nop (could not find a dwarf register number)"); return; Index: lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -516,8 +516,7 @@ const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering(); int Offset = TFI->getFrameIndexReference(*Asm->MF, FI, FrameReg); assert(Expr != DV.getExpression().end() && "Wrong number of expressions"); - DwarfExpr.AddMachineRegIndirect(*Asm->MF->getSubtarget().getRegisterInfo(), - FrameReg, Offset); + DwarfExpr.AddMachineRegIndirect(Asm->MF->getSubtarget(), FrameReg, Offset); DwarfExpr.AddExpression((*Expr)->expr_op_begin(), (*Expr)->expr_op_end()); ++Expr; } @@ -744,14 +743,14 @@ DIEDwarfExpression DwarfExpr(*Asm, *this, *Loc); const DIExpression *Expr = DV.getSingleExpression(); bool ValidReg; - const TargetRegisterInfo &TRI = *Asm->MF->getSubtarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = Asm->MF->getSubtarget(); if (Location.getOffset()) { - ValidReg = DwarfExpr.AddMachineRegIndirect(TRI, Location.getReg(), + ValidReg = DwarfExpr.AddMachineRegIndirect(STI, Location.getReg(), Location.getOffset()); if (ValidReg) DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end()); } else - ValidReg = DwarfExpr.AddMachineRegExpression(TRI, Expr, Location.getReg()); + ValidReg = DwarfExpr.AddMachineRegExpression(STI, Expr, Location.getReg()); // Now attach the location information to the DIE. if (ValidReg) Index: lib/CodeGen/AsmPrinter/DwarfDebug.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -1423,13 +1423,13 @@ AP.EmitDwarfRegOp(Streamer, Loc); else { // Complex address entry. - const TargetRegisterInfo &TRI = *AP.MF->getSubtarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = AP.MF->getSubtarget(); if (Loc.getOffset()) { - DwarfExpr.AddMachineRegIndirect(TRI, Loc.getReg(), Loc.getOffset()); + DwarfExpr.AddMachineRegIndirect(STI, Loc.getReg(), Loc.getOffset()); DwarfExpr.AddExpression(Expr->expr_op_begin(), Expr->expr_op_end(), PieceOffsetInBits); } else - DwarfExpr.AddMachineRegExpression(TRI, Expr, Loc.getReg(), + DwarfExpr.AddMachineRegExpression(STI, Expr, Loc.getReg(), PieceOffsetInBits); } } else if (Value.isConstantFP()) { Index: lib/CodeGen/AsmPrinter/DwarfExpression.h =================================================================== --- lib/CodeGen/AsmPrinter/DwarfExpression.h +++ lib/CodeGen/AsmPrinter/DwarfExpression.h @@ -22,6 +22,7 @@ class AsmPrinter; class ByteStreamer; class TargetRegisterInfo; +class TargetSubtargetInfo; class DwarfUnit; class DIELoc; @@ -74,8 +75,8 @@ /// Emit an indirect dwarf register operation for the given machine register. /// \return false if no DWARF register exists for MachineReg. - bool AddMachineRegIndirect(const TargetRegisterInfo &TRI, unsigned MachineReg, - int Offset = 0); + bool AddMachineRegIndirect(const TargetSubtargetInfo &STI, + unsigned MachineReg, int Offset = 0); /// \brief Emit a partial DWARF register operation. /// \param MachineReg the register @@ -91,7 +92,7 @@ /// subregisters that alias the register. /// /// \return false if no DWARF register exists for MachineReg. - bool AddMachineRegPiece(const TargetRegisterInfo &TRI, unsigned MachineReg, + bool AddMachineRegPiece(const TargetSubtargetInfo &STI, unsigned MachineReg, unsigned PieceSizeInBits = 0, unsigned PieceOffsetInBits = 0); @@ -107,7 +108,7 @@ /// \param PieceOffsetInBits If this is one piece out of a fragmented /// location, this is the offset of the piece inside the entire variable. /// \return false if no DWARF register exists for MachineReg. - bool AddMachineRegExpression(const TargetRegisterInfo &TRI, + bool AddMachineRegExpression(const TargetSubtargetInfo &STI, const DIExpression *Expr, unsigned MachineReg, unsigned PieceOffsetInBits = 0); /// Emit a the operations remaining the DIExpressionIterator I. Index: lib/CodeGen/AsmPrinter/DwarfExpression.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfExpression.cpp +++ lib/CodeGen/AsmPrinter/DwarfExpression.cpp @@ -65,8 +65,9 @@ EmitOp(dwarf::DW_OP_shr); } -bool DwarfExpression::AddMachineRegIndirect(const TargetRegisterInfo &TRI, +bool DwarfExpression::AddMachineRegIndirect(const TargetSubtargetInfo &STI, unsigned MachineReg, int Offset) { + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); if (isFrameRegister(TRI, MachineReg)) { // If variable offset is based in frame register then use fbreg. EmitOp(dwarf::DW_OP_fbreg); @@ -82,10 +83,11 @@ return true; } -bool DwarfExpression::AddMachineRegPiece(const TargetRegisterInfo &TRI, +bool DwarfExpression::AddMachineRegPiece(const TargetSubtargetInfo &STI, unsigned MachineReg, unsigned PieceSizeInBits, unsigned PieceOffsetInBits) { + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); if (!TRI.isPhysicalRegister(MachineReg)) return false; @@ -130,7 +132,8 @@ // efficient DW_OP_piece. unsigned CurPos = PieceOffsetInBits; // The size of the register in bits, assuming 8 bits per byte. - unsigned RegSize = TRI.getMinimalPhysRegClass(MachineReg)->getSize() * 8; + const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg); + unsigned RegSize = TRI.getRegSize(RC->getID(), STI) * 8; // Keep track of the bits in the register we already emitted, so we // can avoid emitting redundant aliasing subregs. SmallBitVector Coverage(RegSize, false); @@ -202,14 +205,14 @@ return OffsetInBits; } -bool DwarfExpression::AddMachineRegExpression(const TargetRegisterInfo &TRI, +bool DwarfExpression::AddMachineRegExpression(const TargetSubtargetInfo &STI, const DIExpression *Expr, unsigned MachineReg, unsigned PieceOffsetInBits) { auto I = Expr->expr_op_begin(); auto E = Expr->expr_op_end(); if (I == E) - return AddMachineRegPiece(TRI, MachineReg); + return AddMachineRegPiece(STI, MachineReg); // Pattern-match combinations for which more efficient representations exist // first. @@ -219,7 +222,7 @@ unsigned OffsetInBits = I->getArg(0); unsigned SizeInBits = I->getArg(1); // Piece always comes at the end of the expression. - return AddMachineRegPiece(TRI, MachineReg, SizeInBits, + return AddMachineRegPiece(STI, MachineReg, SizeInBits, getOffsetOrZero(OffsetInBits, PieceOffsetInBits)); } case dwarf::DW_OP_plus: @@ -230,15 +233,15 @@ if (N != E && N->getOp() == dwarf::DW_OP_deref) { unsigned Offset = I->getArg(0); ValidReg = AddMachineRegIndirect( - TRI, MachineReg, I->getOp() == dwarf::DW_OP_plus ? Offset : -Offset); + STI, MachineReg, I->getOp() == dwarf::DW_OP_plus ? Offset : -Offset); std::advance(I, 2); break; } else - ValidReg = AddMachineRegPiece(TRI, MachineReg); + ValidReg = AddMachineRegPiece(STI, MachineReg); } case dwarf::DW_OP_deref: { // [DW_OP_reg,DW_OP_deref] --> [DW_OP_breg]. - ValidReg = AddMachineRegIndirect(TRI, MachineReg); + ValidReg = AddMachineRegIndirect(STI, MachineReg); ++I; break; } Index: lib/CodeGen/AsmPrinter/DwarfUnit.cpp =================================================================== --- lib/CodeGen/AsmPrinter/DwarfUnit.cpp +++ lib/CodeGen/AsmPrinter/DwarfUnit.cpp @@ -376,16 +376,15 @@ bool DwarfUnit::addRegisterOpPiece(DIELoc &TheDie, unsigned Reg, unsigned SizeInBits, unsigned OffsetInBits) { DIEDwarfExpression Expr(*Asm, *this, TheDie); - Expr.AddMachineRegPiece(*Asm->MF->getSubtarget().getRegisterInfo(), Reg, - SizeInBits, OffsetInBits); + Expr.AddMachineRegPiece(Asm->MF->getSubtarget(), Reg, SizeInBits, + OffsetInBits); return true; } bool DwarfUnit::addRegisterOffset(DIELoc &TheDie, unsigned Reg, int64_t Offset) { DIEDwarfExpression Expr(*Asm, *this, TheDie); - return Expr.AddMachineRegIndirect(*Asm->MF->getSubtarget().getRegisterInfo(), - Reg, Offset); + return Expr.AddMachineRegIndirect(Asm->MF->getSubtarget(), Reg, Offset); } /* Byref variables, in Blocks, are declared by the programmer as "SomeType Index: lib/CodeGen/DetectDeadLanes.cpp =================================================================== --- lib/CodeGen/DetectDeadLanes.cpp +++ lib/CodeGen/DetectDeadLanes.cpp @@ -165,6 +165,8 @@ unsigned SrcSubIdx = MO.getSubReg(); const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo(); + const TargetSubtargetInfo &STI = MRI.getTargetSubtargetInfo(); + unsigned DstSubIdx = 0; switch (MI.getOpcode()) { case TargetOpcode::INSERT_SUBREG: @@ -184,8 +186,8 @@ unsigned PreA, PreB; // Unused. if (SrcSubIdx && DstSubIdx) - return !TRI.getCommonSuperRegClass(SrcRC, SrcSubIdx, DstRC, DstSubIdx, PreA, - PreB); + return !TRI.getCommonSuperRegClass(SrcRC, SrcSubIdx, DstRC, DstSubIdx, STI, + PreA, PreB); if (SrcSubIdx) return !TRI.getMatchingSuperRegClass(SrcRC, DstRC, SrcSubIdx); if (DstSubIdx) Index: lib/CodeGen/MachineRegisterInfo.cpp =================================================================== --- lib/CodeGen/MachineRegisterInfo.cpp +++ lib/CodeGen/MachineRegisterInfo.cpp @@ -144,7 +144,7 @@ for (auto &VRegToType : getVRegToType()) { auto *RC = getRegClass(VRegToType.first); if (VRegToType.second.isSized() && - VRegToType.second.getSizeInBits() > (RC->getSize() * 8)) + VRegToType.second.getSizeInBits() > getRegSize(RC) * 8) llvm_unreachable( "Virtual register has explicit size different from its class size"); } Index: lib/CodeGen/PeepholeOptimizer.cpp =================================================================== --- lib/CodeGen/PeepholeOptimizer.cpp +++ lib/CodeGen/PeepholeOptimizer.cpp @@ -637,6 +637,7 @@ if (TargetRegisterInfo::isPhysicalRegister(Reg)) return false; const TargetRegisterClass *DefRC = MRI->getRegClass(Reg); + const TargetSubtargetInfo &STI = MRI->getTargetSubtargetInfo(); SmallVector SrcToLook; TargetInstrInfo::RegSubRegPair CurSrcPair(Reg, SubReg); @@ -697,7 +698,7 @@ return false; const TargetRegisterClass *SrcRC = MRI->getRegClass(CurSrcPair.Reg); - ShouldRewrite = TRI->shouldRewriteCopySrc(DefRC, SubReg, SrcRC, + ShouldRewrite = TRI->shouldRewriteCopySrc(STI, DefRC, SubReg, SrcRC, CurSrcPair.SubReg); } while (!ShouldRewrite); Index: lib/CodeGen/PrologEpilogInserter.cpp =================================================================== --- lib/CodeGen/PrologEpilogInserter.cpp +++ lib/CodeGen/PrologEpilogInserter.cpp @@ -347,6 +347,7 @@ if (CSI.empty()) return; // Early exit if no callee saved registers are modified! + MachineRegisterInfo &MRI = F.getRegInfo(); unsigned NumFixedSpillSlots; const TargetFrameLowering::SpillSlot *FixedSpillSlots = TFI->getCalleeSavedSpillSlots(NumFixedSpillSlots); @@ -370,22 +371,22 @@ FixedSlot->Reg != Reg) ++FixedSlot; + unsigned Size = MRI.getSpillSize(RC); + if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) { // Nope, just spill it anywhere convenient. - unsigned Align = RC->getAlignment(); + unsigned Align = MRI.getSpillAlignment(RC); unsigned StackAlign = TFI->getStackAlignment(); - // We may not be able to satisfy the desired alignment specification of // the TargetRegisterClass if the stack alignment is smaller. Use the // min. Align = std::min(Align, StackAlign); - FrameIdx = MFI.CreateStackObject(RC->getSize(), Align, true); + FrameIdx = MFI.CreateStackObject(Size, Align, true); if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx; if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx; } else { // Spill it to the stack where we must. - FrameIdx = - MFI.CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset); + FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset); } CS.setFrameIdx(FrameIdx); Index: lib/CodeGen/RegAllocFast.cpp =================================================================== --- lib/CodeGen/RegAllocFast.cpp +++ lib/CodeGen/RegAllocFast.cpp @@ -214,8 +214,9 @@ return SS; // Already has space allocated? // Allocate a new stack object for this spill location... - int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(), - RC->getAlignment()); + unsigned Size = MRI->getSpillSize(RC); + unsigned Align = MRI->getSpillAlignment(RC); + int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(Size, Align); // Assign the slot. StackSlotForVirtReg[VirtReg] = FrameIdx; Index: lib/CodeGen/RegisterCoalescer.cpp =================================================================== --- lib/CodeGen/RegisterCoalescer.cpp +++ lib/CodeGen/RegisterCoalescer.cpp @@ -330,7 +330,9 @@ Flipped = true; } - const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); + const MachineFunction &MF = *MI->getParent()->getParent(); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); if (TargetRegisterInfo::isPhysicalRegister(Dst)) { // Eliminate DstSub on a physreg. @@ -358,7 +360,7 @@ if (Src == Dst && SrcSub != DstSub) return false; - NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, + NewRC = TRI.getCommonSuperRegClass(SrcRC, SrcSub, DstRC, DstSub, STI, SrcIdx, DstIdx); if (!NewRC) return false; Index: lib/CodeGen/RegisterScavenging.cpp =================================================================== --- lib/CodeGen/RegisterScavenging.cpp +++ lib/CodeGen/RegisterScavenging.cpp @@ -443,8 +443,8 @@ // Find an available scavenging slot with size and alignment matching // the requirements of the class RC. const MachineFrameInfo &MFI = MF.getFrameInfo(); - unsigned NeedSize = RC->getSize(); - unsigned NeedAlign = RC->getAlignment(); + unsigned NeedSize = MRI->getSpillSize(RC); + unsigned NeedAlign = MRI->getSpillAlignment(RC); unsigned SI = Scavenged.size(), Diff = UINT_MAX; int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd(); Index: lib/CodeGen/StackMaps.cpp =================================================================== --- lib/CodeGen/StackMaps.cpp +++ lib/CodeGen/StackMaps.cpp @@ -92,7 +92,8 @@ StackMaps::parseOperand(MachineInstr::const_mop_iterator MOI, MachineInstr::const_mop_iterator MOE, LocationVec &Locs, LiveOutVec &LiveOuts) const { - const TargetRegisterInfo *TRI = AP.MF->getSubtarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = AP.MF->getSubtarget(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); if (MOI->isImm()) { switch (MOI->getImm()) { default: @@ -150,7 +151,8 @@ if (SubRegIdx) Offset = TRI->getSubRegIdxOffset(SubRegIdx); - Locs.emplace_back(Location::Register, RC->getSize(), DwarfRegNum, Offset); + unsigned Size = TRI->getSpillSize(RC->getID(), STI); + Locs.emplace_back(Location::Register, Size, DwarfRegNum, Offset); return ++MOI; } @@ -232,10 +234,11 @@ /// Create a live-out register record for the given register Reg. StackMaps::LiveOutReg -StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const { +StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI, + const TargetSubtargetInfo &STI) const { unsigned DwarfRegNum = getDwarfRegNum(Reg, TRI); - unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize(); - return LiveOutReg(Reg, DwarfRegNum, Size); + unsigned ID = TRI->getMinimalPhysRegClass(Reg)->getID(); + return LiveOutReg(Reg, DwarfRegNum, TRI->getSpillSize(ID, STI)); } /// Parse the register live-out mask and return a vector of live-out registers @@ -243,13 +246,14 @@ StackMaps::LiveOutVec StackMaps::parseRegisterLiveOutMask(const uint32_t *Mask) const { assert(Mask && "No register mask specified"); - const TargetRegisterInfo *TRI = AP.MF->getSubtarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = AP.MF->getSubtarget(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); LiveOutVec LiveOuts; // Create a LiveOutReg for each bit that is set in the register mask. for (unsigned Reg = 0, NumRegs = TRI->getNumRegs(); Reg != NumRegs; ++Reg) if ((Mask[Reg / 32] >> Reg % 32) & 1) - LiveOuts.push_back(createLiveOutReg(Reg, TRI)); + LiveOuts.push_back(createLiveOutReg(Reg, TRI, STI)); // We don't need to keep track of a register if its super-register is already // in the list. Merge entries that refer to the same dwarf register and use Index: lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- lib/CodeGen/TargetInstrInfo.cpp +++ lib/CodeGen/TargetInstrInfo.cpp @@ -345,12 +345,15 @@ unsigned SubIdx, unsigned &Size, unsigned &Offset, const MachineFunction &MF) const { + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); + unsigned RCSize = TRI->getSpillSize(RC->getID(), STI); + if (!SubIdx) { - Size = RC->getSize(); + Size = RCSize; Offset = 0; return true; } - const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); unsigned BitSize = TRI->getSubRegIdxSize(SubIdx); // Convert bit size to byte size to be consistent with // MCRegisterClass::getSize(). @@ -364,10 +367,10 @@ Size = BitSize /= 8; Offset = (unsigned)BitOffset / 8; - assert(RC->getSize() >= (Offset + Size) && "bad subregister range"); + assert(RCSize >= (Offset + Size) && "bad subregister range"); if (!MF.getDataLayout().isLittleEndian()) { - Offset = RC->getSize() - (Offset + Size); + Offset = RCSize - (Offset + Size); } return true; } Index: lib/CodeGen/TargetLoweringBase.cpp =================================================================== --- lib/CodeGen/TargetLoweringBase.cpp +++ lib/CodeGen/TargetLoweringBase.cpp @@ -1253,27 +1253,32 @@ // isTypeLegal over as well - a massive change that would just require // TargetLowering having a TargetRegisterInfo class member that it would use. std::pair -TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, +TargetLoweringBase::findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const { const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; if (!RC) return std::make_pair(RC, 0); // Compute the set of all super-register classes. - BitVector SuperRegRC(TRI->getNumRegClasses()); - for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI) + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + BitVector SuperRegRC(TRI.getNumRegClasses()); + for (SuperRegClassIterator RCI(RC, &TRI); RCI.isValid(); ++RCI) SuperRegRC.setBitsInMask(RCI.getMask()); // Find the first legal register class with the largest spill size. const TargetRegisterClass *BestRC = RC; + unsigned BestSize = TRI.getSpillSize(BestRC->getID(), STI); + for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) { - const TargetRegisterClass *SuperRC = TRI->getRegClass(i); + const TargetRegisterClass *SuperRC = TRI.getRegClass(i); // We want the largest possible spill size. - if (SuperRC->getSize() <= BestRC->getSize()) + unsigned SuperSize = TRI.getSpillSize(SuperRC->getID(), STI); + if (SuperSize <= BestSize) continue; if (!isLegalRC(SuperRC)) continue; BestRC = SuperRC; + BestSize = SuperSize; } return std::make_pair(BestRC, 1); } @@ -1281,7 +1286,7 @@ /// computeRegisterProperties - Once all of the register classes are added, /// this allows us to compute derived properties we expose. void TargetLoweringBase::computeRegisterProperties( - const TargetRegisterInfo *TRI) { + const TargetSubtargetInfo &STI) { static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE, "Too many value types for ValueTypeActions to hold!"); @@ -1465,7 +1470,7 @@ for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { const TargetRegisterClass* RRC; uint8_t Cost; - std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); + std::tie(RRC, Cost) = findRepresentativeClass(STI, (MVT::SimpleValueType)i); RepRegClassForVT[i] = RRC; RepRegClassCostForVT[i] = Cost; } Index: lib/CodeGen/TargetRegisterInfo.cpp =================================================================== --- lib/CodeGen/TargetRegisterInfo.cpp +++ lib/CodeGen/TargetRegisterInfo.cpp @@ -225,6 +225,7 @@ const TargetRegisterClass *TargetRegisterInfo:: getCommonSuperRegClass(const TargetRegisterClass *RCA, unsigned SubA, const TargetRegisterClass *RCB, unsigned SubB, + const TargetSubtargetInfo &STI, unsigned &PreA, unsigned &PreB) const { assert(RCA && SubA && RCB && SubB && "Invalid arguments"); @@ -243,7 +244,7 @@ const TargetRegisterClass *BestRC = nullptr; unsigned *BestPreA = &PreA; unsigned *BestPreB = &PreB; - if (RCA->getSize() < RCB->getSize()) { + if (getRegSize(RCA->getID(), STI) < getRegSize(RCB->getID(), STI)) { std::swap(RCA, RCB); std::swap(SubA, SubB); std::swap(BestPreA, BestPreB); @@ -251,7 +252,7 @@ // Also terminate the search one we have found a register class as small as // RCA. - unsigned MinSize = RCA->getSize(); + unsigned MinSize = getRegSize(RCA->getID(), STI); for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) { unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA); @@ -259,7 +260,7 @@ // Check if a common super-register class exists for this index pair. const TargetRegisterClass *RC = firstCommonClass(IA.getMask(), IB.getMask(), this); - if (!RC || RC->getSize() < MinSize) + if (!RC || getRegSize(RC->getID(), STI) < MinSize) continue; // The indexes must compose identically: PreA+SubA == PreB+SubB. @@ -268,7 +269,8 @@ continue; // Is RC a better candidate than BestRC? - if (BestRC && RC->getSize() >= BestRC->getSize()) + if (BestRC && + getRegSize(RC->getID(), STI) >= getRegSize(BestRC->getID(), STI)) continue; // Yes, RC is the smallest super-register seen so far. @@ -277,7 +279,7 @@ *BestPreB = IB.getSubReg(); // Bail early if we reached MinSize. We won't find a better candidate. - if (BestRC->getSize() == MinSize) + if (getRegSize(BestRC->getID(), STI) == MinSize) return BestRC; } } @@ -286,7 +288,7 @@ /// \brief Check if the registers defined by the pair (RegisterClass, SubReg) /// share the same register file. -static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, +static bool shareSameRegisterFile(const TargetSubtargetInfo &STI, const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, @@ -295,10 +297,12 @@ if (DefRC == SrcRC) return true; + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + // Both operands are sub registers. Check if they share a register class. unsigned SrcIdx, DefIdx; if (SrcSubReg && DefSubReg) { - return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, + return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, STI, SrcIdx, DefIdx) != nullptr; } @@ -317,12 +321,14 @@ return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr; } -bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetRegisterClass *DefRC, +bool TargetRegisterInfo::shouldRewriteCopySrc(const TargetSubtargetInfo &STI, + const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const { // If this source does not incur a cross register bank copy, use it. - return shareSameRegisterFile(*this, DefRC, DefSubReg, SrcRC, SrcSubReg); + assert(STI.getRegisterInfo() == this); + return shareSameRegisterFile(STI, DefRC, DefSubReg, SrcRC, SrcSubReg); } // Compute target-independent register allocator hints to help eliminate copies. Index: lib/CodeGen/VirtRegMap.cpp =================================================================== --- lib/CodeGen/VirtRegMap.cpp +++ lib/CodeGen/VirtRegMap.cpp @@ -73,8 +73,11 @@ } unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) { - int SS = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(), - RC->getAlignment()); + const TargetSubtargetInfo &STI = MF->getSubtarget(); + const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); + unsigned Size = TRI.getSpillSize(RC->getID(), STI); + unsigned Align = TRI.getSpillAlignment(RC->getID(), STI); + int SS = MF->getFrameInfo().CreateSpillStackObject(Size, Align); ++NumSpillSlots; return SS; } Index: lib/Target/AArch64/AArch64FrameLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64FrameLowering.cpp +++ lib/Target/AArch64/AArch64FrameLowering.cpp @@ -1150,6 +1150,7 @@ // realistically that's not a big deal at this stage of the game. // The CSR spill slots have not been allocated yet, so estimateStackSize // won't include them. + MachineRegisterInfo &MRI = MF.getRegInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); unsigned CFSize = MFI.estimateStackSize(MF) + 8 * NumRegsSpilled; DEBUG(dbgs() << "Estimated stack frame size: " << CFSize << " bytes.\n"); @@ -1181,7 +1182,9 @@ // an emergency spill slot. if (!ExtraCSSpill) { const TargetRegisterClass *RC = &AArch64::GPR64RegClass; - int FI = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), false); + int FI = MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), + false); RS->addScavengingFrameIndex(FI); DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI << " as the emergency spill slot.\n"); Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -100,7 +100,7 @@ } // Compute derived properties from the register classes - computeRegisterProperties(Subtarget->getRegisterInfo()); + computeRegisterProperties(*Subtarget); // Provide all sorts of operation actions setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); Index: lib/Target/AArch64/AArch64InstrInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64InstrInfo.cpp +++ lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2269,7 +2269,8 @@ PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align); unsigned Opc = 0; bool Offset = true; - switch (RC->getSize()) { + unsigned StoreSize = TRI->getSpillSize(RC->getID(), Subtarget); + switch (StoreSize) { case 1: if (AArch64::FPR8RegClass.hasSubClassEq(RC)) Opc = AArch64::STRBui; @@ -2373,7 +2374,8 @@ unsigned Opc = 0; bool Offset = true; - switch (RC->getSize()) { + unsigned LoadSize = TRI->getSpillSize(RC->getID(), Subtarget); + switch (LoadSize) { case 1: if (AArch64::FPR8RegClass.hasSubClassEq(RC)) Opc = AArch64::LDRBui; Index: lib/Target/AMDGPU/AMDGPU.td =================================================================== --- lib/Target/AMDGPU/AMDGPU.td +++ lib/Target/AMDGPU/AMDGPU.td @@ -349,6 +349,10 @@ let ShouldEmitMatchRegisterName = 0; } +def AMDGPUAsmWriter : AsmWriter { + let PassSubtarget = 1; +} + def AMDGPUAsmVariants { string Default = "Default"; int Default_ID = 0; @@ -390,6 +394,7 @@ VOP3AsmParserVariant, SDWAAsmParserVariant, DPPAsmParserVariant]; + let AssemblyWriters = [AMDGPUAsmWriter]; } // Dummy Instruction itineraries for pseudo instructions Index: lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp +++ lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp @@ -768,8 +768,9 @@ } } - AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O, - *TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo()); + const TargetSubtargetInfo &STI = MF->getSubtarget(); + AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), STI, O, + *STI.getRegisterInfo()); return false; } Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -903,7 +903,9 @@ assert(AMDGPU::isSISrcOperand(InstDesc, OpNum)); APInt Literal(64, Val); - auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size + // Expected operand size. + auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), + AsmParser->getSTI(), InstDesc, OpNum); if (Imm.IsFPImm) { // We got fp literal token if (OpSize == 8) { // Expected 64-bit operand Index: lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h =================================================================== --- lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h +++ lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.h @@ -20,89 +20,150 @@ class AMDGPUInstPrinter : public MCInstPrinter { public: AMDGPUInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, - const MCRegisterInfo &MRI) + const MCRegisterInfo &MRI) : MCInstPrinter(MAI, MII, MRI) {} //Autogenerated by tblgen - void printInstruction(const MCInst *MI, raw_ostream &O); + void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI, + raw_ostream &O); static const char *getRegisterName(unsigned RegNo); void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot, const MCSubtargetInfo &STI) override; - static void printRegOperand(unsigned RegNo, raw_ostream &O, - const MCRegisterInfo &MRI); + static void printRegOperand(unsigned RegNo, const MCSubtargetInfo &STI, + raw_ostream &O, const MCRegisterInfo &MRI); private: - void printU4ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU8ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU4ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printU32ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printNamedBit(const MCInst* MI, unsigned OpNo, raw_ostream& O, + void printU4ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU8ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU16ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU4ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printU32ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printNamedBit(const MCInst* MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream& O, StringRef BitName); - void printOffen(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printIdxen(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printAddr64(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printMBUFOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printOffset0(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printOffset1(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSMRDOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printGDS(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printGLC(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSLC(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printTFE(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printDMask(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printUNorm(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printDA(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printR128(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printLWE(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printRegOperand(unsigned RegNo, raw_ostream &O); - void printVOPDst(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printImmediate32(uint32_t I, raw_ostream &O); - void printImmediate64(uint64_t I, raw_ostream &O); - void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printOperandAndFPInputMods(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printOperandAndIntInputMods(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printDPPCtrl(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printRowMask(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printBankMask(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printBoundCtrl(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSDWASel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSDWADstSel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSDWASrc0Sel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSDWASrc1Sel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printSDWADstUnused(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printInterpSlot(const MCInst *MI, unsigned OpNum, raw_ostream &O); - void printMemOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printIfSet(const MCInst *MI, unsigned OpNo, raw_ostream &O, + void printOffen(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printIdxen(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printAddr64(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printMBUFOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset0(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOffset1(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSMRDOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printGDS(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printGLC(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSLC(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printTFE(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printDMask(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printUNorm(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printDA(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printR128(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printLWE(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printVOPDst(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printImmediate32(uint32_t I, const MCSubtargetInfo &STI, raw_ostream &O); + void printImmediate64(uint64_t I, const MCSubtargetInfo &STI, raw_ostream &O); + void printOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOperandAndFPInputMods(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printOperandAndIntInputMods(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printDPPCtrl(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printRowMask(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printBankMask(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printBoundCtrl(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSDWASel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSDWADstSel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSDWASrc0Sel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSDWASrc1Sel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printSDWADstUnused(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printInterpSlot(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); + void printMemOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printIfSet(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O, StringRef Asm, StringRef Default = ""); static void printIfSet(const MCInst *MI, unsigned OpNo, - raw_ostream &O, char Asm); - static void printAbs(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printClamp(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printClampSI(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printOModSI(const MCInst *MI, unsigned OpNo, raw_ostream &O); - void printLiteral(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printLast(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printNeg(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printOMOD(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printRel(const MCInst *MI, unsigned OpNo, raw_ostream &O); + const MCSubtargetInfo &STI, raw_ostream &O, char Asm); + static void printAbs(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printClamp(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printClampSI(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printOModSI(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + void printLiteral(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printLast(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printNeg(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printOMOD(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printRel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); static void printUpdateExecMask(const MCInst *MI, unsigned OpNo, - raw_ostream &O); - static void printUpdatePred(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printWrite(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printSel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printBankSwizzle(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printRSel(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printCT(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printKCache(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printSendMsg(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printWaitFlag(const MCInst *MI, unsigned OpNo, raw_ostream &O); - static void printHwreg(const MCInst *MI, unsigned OpNo, raw_ostream &O); + const MCSubtargetInfo &STI, raw_ostream &O); + static void printUpdatePred(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printWrite(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printSel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printBankSwizzle(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printRSel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printCT(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printKCache(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printSendMsg(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printWaitFlag(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); + static void printHwreg(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O); }; } // End namespace llvm Index: lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp =================================================================== --- lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp +++ lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp @@ -24,49 +24,58 @@ using namespace llvm; void AMDGPUInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, - StringRef Annot, const MCSubtargetInfo &STI) { + StringRef Annot, + const MCSubtargetInfo &STI) { OS.flush(); - printInstruction(MI, OS); + printInstruction(MI, STI, OS); printAnnotation(OS, Annot); } void AMDGPUInstPrinter::printU4ImmOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { O << formatHex(MI->getOperand(OpNo).getImm() & 0xf); } void AMDGPUInstPrinter::printU8ImmOperand(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { O << formatHex(MI->getOperand(OpNo).getImm() & 0xff); } void AMDGPUInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << formatHex(MI->getOperand(OpNo).getImm() & 0xffff); } void AMDGPUInstPrinter::printU32ImmOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << formatHex(MI->getOperand(OpNo).getImm() & 0xffffffff); } void AMDGPUInstPrinter::printU4ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << formatDec(MI->getOperand(OpNo).getImm() & 0xf); } void AMDGPUInstPrinter::printU8ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << formatDec(MI->getOperand(OpNo).getImm() & 0xff); } void AMDGPUInstPrinter::printU16ImmDecOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << formatDec(MI->getOperand(OpNo).getImm() & 0xffff); } void AMDGPUInstPrinter::printNamedBit(const MCInst* MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream& O, StringRef BitName) { if (MI->getOperand(OpNo).getImm()) { O << ' ' << BitName; @@ -74,112 +83,124 @@ } void AMDGPUInstPrinter::printOffen(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printNamedBit(MI, OpNo, O, "offen"); + printNamedBit(MI, OpNo, STI, O, "offen"); } void AMDGPUInstPrinter::printIdxen(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printNamedBit(MI, OpNo, O, "idxen"); + printNamedBit(MI, OpNo, STI, O, "idxen"); } void AMDGPUInstPrinter::printAddr64(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printNamedBit(MI, OpNo, O, "addr64"); + printNamedBit(MI, OpNo, STI, O, "addr64"); } void AMDGPUInstPrinter::printMBUFOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { if (MI->getOperand(OpNo).getImm()) { O << " offset:"; - printU16ImmDecOperand(MI, OpNo, O); + printU16ImmDecOperand(MI, OpNo, STI, O); } } void AMDGPUInstPrinter::printOffset(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { uint16_t Imm = MI->getOperand(OpNo).getImm(); if (Imm != 0) { O << " offset:"; - printU16ImmDecOperand(MI, OpNo, O); + printU16ImmDecOperand(MI, OpNo, STI, O); } } void AMDGPUInstPrinter::printOffset0(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { if (MI->getOperand(OpNo).getImm()) { O << " offset0:"; - printU8ImmDecOperand(MI, OpNo, O); + printU8ImmDecOperand(MI, OpNo, STI, O); } } void AMDGPUInstPrinter::printOffset1(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { if (MI->getOperand(OpNo).getImm()) { O << " offset1:"; - printU8ImmDecOperand(MI, OpNo, O); + printU8ImmDecOperand(MI, OpNo, STI, O); } } void AMDGPUInstPrinter::printSMRDOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printU32ImmOperand(MI, OpNo, O); + printU32ImmOperand(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printSMRDLiteralOffset(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printU32ImmOperand(MI, OpNo, O); + printU32ImmOperand(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printGDS(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "gds"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "gds"); } void AMDGPUInstPrinter::printGLC(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "glc"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "glc"); } void AMDGPUInstPrinter::printSLC(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "slc"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "slc"); } void AMDGPUInstPrinter::printTFE(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "tfe"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "tfe"); } void AMDGPUInstPrinter::printDMask(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, raw_ostream &O) { if (MI->getOperand(OpNo).getImm()) { O << " dmask:"; - printU16ImmOperand(MI, OpNo, O); + printU16ImmOperand(MI, OpNo, STI, O); } } void AMDGPUInstPrinter::printUNorm(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printNamedBit(MI, OpNo, O, "unorm"); + printNamedBit(MI, OpNo, STI, O, "unorm"); } void AMDGPUInstPrinter::printDA(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "da"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "da"); } void AMDGPUInstPrinter::printR128(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "r128"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "r128"); } void AMDGPUInstPrinter::printLWE(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printNamedBit(MI, OpNo, O, "lwe"); + const MCSubtargetInfo &STI, raw_ostream &O) { + printNamedBit(MI, OpNo, STI, O, "lwe"); } -void AMDGPUInstPrinter::printRegOperand(unsigned reg, raw_ostream &O, +void AMDGPUInstPrinter::printRegOperand(unsigned reg, + const MCSubtargetInfo &STI, + raw_ostream &O, const MCRegisterInfo &MRI) { switch (reg) { case AMDGPU::VCC: @@ -291,6 +312,7 @@ } void AMDGPUInstPrinter::printVOPDst(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { if (MII.get(MI->getOpcode()).TSFlags & SIInstrFlags::VOP3) O << "_e64 "; @@ -301,10 +323,12 @@ else O << "_e32 "; - printOperand(MI, OpNo, O); + printOperand(MI, OpNo, STI, O); } -void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, raw_ostream &O) { +void AMDGPUInstPrinter::printImmediate32(uint32_t Imm, + const MCSubtargetInfo &STI, + raw_ostream &O) { int32_t SImm = static_cast(Imm); if (SImm >= -16 && SImm <= 64) { O << SImm; @@ -333,7 +357,9 @@ O << formatHex(static_cast(Imm)); } -void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, raw_ostream &O) { +void AMDGPUInstPrinter::printImmediate64(uint64_t Imm, + const MCSubtargetInfo &STI, + raw_ostream &O) { int64_t SImm = static_cast(Imm); if (SImm >= -16 && SImm <= 64) { O << SImm; @@ -368,8 +394,8 @@ } void AMDGPUInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - if (OpNo >= MI->getNumOperands()) { O << "/*Missing OP" << OpNo << "*/"; return; @@ -383,7 +409,7 @@ break; default: - printRegOperand(Op.getReg(), O, MRI); + printRegOperand(Op.getReg(), STI, O, MRI); break; } } else if (Op.isImm()) { @@ -391,14 +417,14 @@ int RCID = Desc.OpInfo[OpNo].RegClass; if (RCID != -1) { const MCRegisterClass &ImmRC = MRI.getRegClass(RCID); - if (ImmRC.getSize() == 4) - printImmediate32(Op.getImm(), O); - else if (ImmRC.getSize() == 8) - printImmediate64(Op.getImm(), O); + if (MRI.getRegSize(ImmRC.getID(), STI) == 4) + printImmediate32(Op.getImm(), STI, O); + else if (MRI.getRegSize(ImmRC.getID(), STI) == 8) + printImmediate64(Op.getImm(), STI, O); else llvm_unreachable("Invalid register class size"); } else if (Desc.OpInfo[OpNo].OperandType == MCOI::OPERAND_IMMEDIATE) { - printImmediate32(Op.getImm(), O); + printImmediate32(Op.getImm(), STI, O); } else { // We hit this for the immediate instruction bits that don't yet have a // custom printer. @@ -412,11 +438,10 @@ else { const MCInstrDesc &Desc = MII.get(MI->getOpcode()); const MCRegisterClass &ImmRC = MRI.getRegClass(Desc.OpInfo[OpNo].RegClass); - - if (ImmRC.getSize() == 4) - printImmediate32(FloatToBits(Op.getFPImm()), O); - else if (ImmRC.getSize() == 8) - printImmediate64(DoubleToBits(Op.getFPImm()), O); + if (MRI.getRegSize(ImmRC.getID(), STI) == 4) + printImmediate32(FloatToBits(Op.getFPImm()), STI, O); + else if (MRI.getRegSize(ImmRC.getID(), STI) == 8) + printImmediate64(DoubleToBits(Op.getFPImm()), STI, O); else llvm_unreachable("Invalid register class size"); } @@ -429,32 +454,35 @@ } void AMDGPUInstPrinter::printOperandAndFPInputMods(const MCInst *MI, - unsigned OpNo, - raw_ostream &O) { + unsigned OpNo, + const MCSubtargetInfo &STI, + raw_ostream &O) { unsigned InputModifiers = MI->getOperand(OpNo).getImm(); if (InputModifiers & SISrcMods::NEG) O << '-'; if (InputModifiers & SISrcMods::ABS) O << '|'; - printOperand(MI, OpNo + 1, O); + printOperand(MI, OpNo + 1, STI, O); if (InputModifiers & SISrcMods::ABS) O << '|'; } void AMDGPUInstPrinter::printOperandAndIntInputMods(const MCInst *MI, - unsigned OpNo, - raw_ostream &O) { + unsigned OpNo, + const MCSubtargetInfo &STI, + raw_ostream &O) { unsigned InputModifiers = MI->getOperand(OpNo).getImm(); if (InputModifiers & SISrcMods::SEXT) O << "sext("; - printOperand(MI, OpNo + 1, O); + printOperand(MI, OpNo + 1, STI, O); if (InputModifiers & SISrcMods::SEXT) O << ')'; } void AMDGPUInstPrinter::printDPPCtrl(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { unsigned Imm = MI->getOperand(OpNo).getImm(); if (Imm <= 0x0ff) { O << " quad_perm:["; @@ -464,13 +492,13 @@ O << formatDec((Imm & 0xc0) >> 6) << ']'; } else if ((Imm >= 0x101) && (Imm <= 0x10f)) { O << " row_shl:"; - printU4ImmDecOperand(MI, OpNo, O); + printU4ImmDecOperand(MI, OpNo, STI, O); } else if ((Imm >= 0x111) && (Imm <= 0x11f)) { O << " row_shr:"; - printU4ImmDecOperand(MI, OpNo, O); + printU4ImmDecOperand(MI, OpNo, STI, O); } else if ((Imm >= 0x121) && (Imm <= 0x12f)) { O << " row_ror:"; - printU4ImmDecOperand(MI, OpNo, O); + printU4ImmDecOperand(MI, OpNo, STI, O); } else if (Imm == 0x130) { O << " wave_shl:1"; } else if (Imm == 0x134) { @@ -493,19 +521,22 @@ } void AMDGPUInstPrinter::printRowMask(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { O << " row_mask:"; - printU4ImmOperand(MI, OpNo, O); + printU4ImmOperand(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printBankMask(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { O << " bank_mask:"; - printU4ImmOperand(MI, OpNo, O); + printU4ImmOperand(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printBoundCtrl(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { unsigned Imm = MI->getOperand(OpNo).getImm(); if (Imm) { O << " bound_ctrl:0"; // XXX - this syntax is used in sp3 @@ -513,6 +544,7 @@ } void AMDGPUInstPrinter::printSDWASel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { unsigned Imm = MI->getOperand(OpNo).getImm(); switch (Imm) { @@ -528,24 +560,28 @@ } void AMDGPUInstPrinter::printSDWADstSel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << "dst_sel:"; - printSDWASel(MI, OpNo, O); + printSDWASel(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printSDWASrc0Sel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << "src0_sel:"; - printSDWASel(MI, OpNo, O); + printSDWASel(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printSDWASrc1Sel(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << "src1_sel:"; - printSDWASel(MI, OpNo, O); + printSDWASel(MI, OpNo, STI, O); } void AMDGPUInstPrinter::printSDWADstUnused(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { O << "dst_unused:"; unsigned Imm = MI->getOperand(OpNo).getImm(); @@ -558,6 +594,7 @@ } void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, raw_ostream &O) { unsigned Imm = MI->getOperand(OpNum).getImm(); @@ -573,13 +610,15 @@ } void AMDGPUInstPrinter::printMemOperand(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printOperand(MI, OpNo, O); + printOperand(MI, OpNo, STI, O); O << ", "; - printOperand(MI, OpNo + 1, O); + printOperand(MI, OpNo + 1, STI, O); } void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O, StringRef Asm, StringRef Default) { const MCOperand &Op = MI->getOperand(OpNo); @@ -592,6 +631,7 @@ } void AMDGPUInstPrinter::printIfSet(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O, char Asm) { const MCOperand &Op = MI->getOperand(OpNo); assert(Op.isImm()); @@ -600,23 +640,26 @@ } void AMDGPUInstPrinter::printAbs(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printIfSet(MI, OpNo, O, '|'); + const MCSubtargetInfo &STI, raw_ostream &O) { + printIfSet(MI, OpNo, STI, O, '|'); } void AMDGPUInstPrinter::printClamp(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printIfSet(MI, OpNo, O, "_SAT"); + printIfSet(MI, OpNo, STI, O, "_SAT"); } void AMDGPUInstPrinter::printClampSI(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { if (MI->getOperand(OpNo).getImm()) O << " clamp"; } void AMDGPUInstPrinter::printOModSI(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { int Imm = MI->getOperand(OpNo).getImm(); if (Imm == SIOutMods::MUL2) O << " mul:2"; @@ -627,6 +670,7 @@ } void AMDGPUInstPrinter::printLiteral(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); assert(Op.isImm() || Op.isExpr()); @@ -640,17 +684,17 @@ } void AMDGPUInstPrinter::printLast(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printIfSet(MI, OpNo, O, "*", " "); + const MCSubtargetInfo &STI, raw_ostream &O) { + printIfSet(MI, OpNo, STI, O, "*", " "); } void AMDGPUInstPrinter::printNeg(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printIfSet(MI, OpNo, O, '-'); + const MCSubtargetInfo &STI, raw_ostream &O) { + printIfSet(MI, OpNo, STI, O, '-'); } void AMDGPUInstPrinter::printOMOD(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, raw_ostream &O) { switch (MI->getOperand(OpNo).getImm()) { default: break; case 1: @@ -666,22 +710,25 @@ } void AMDGPUInstPrinter::printRel(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { - printIfSet(MI, OpNo, O, '+'); + const MCSubtargetInfo &STI, raw_ostream &O) { + printIfSet(MI, OpNo, STI, O, '+'); } void AMDGPUInstPrinter::printUpdateExecMask(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printIfSet(MI, OpNo, O, "ExecMask,"); + printIfSet(MI, OpNo, STI, O, "ExecMask,"); } void AMDGPUInstPrinter::printUpdatePred(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { - printIfSet(MI, OpNo, O, "Pred,"); + printIfSet(MI, OpNo, STI, O, "Pred,"); } void AMDGPUInstPrinter::printWrite(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.getImm() == 0) { O << " (MASKED)"; @@ -689,7 +736,8 @@ } void AMDGPUInstPrinter::printSel(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, + raw_ostream &O) { const char * chans = "XYZW"; int sel = MI->getOperand(OpNo).getImm(); @@ -713,6 +761,7 @@ } void AMDGPUInstPrinter::printBankSwizzle(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { int BankSwizzle = MI->getOperand(OpNo).getImm(); switch (BankSwizzle) { @@ -738,7 +787,7 @@ } void AMDGPUInstPrinter::printRSel(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, raw_ostream &O) { unsigned Sel = MI->getOperand(OpNo).getImm(); switch (Sel) { case 0: @@ -768,7 +817,7 @@ } void AMDGPUInstPrinter::printCT(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { + const MCSubtargetInfo &STI, raw_ostream &O) { unsigned CT = MI->getOperand(OpNo).getImm(); switch (CT) { case 0: @@ -783,6 +832,7 @@ } void AMDGPUInstPrinter::printKCache(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { int KCacheMode = MI->getOperand(OpNo).getImm(); if (KCacheMode > 0) { @@ -795,6 +845,7 @@ } void AMDGPUInstPrinter::printSendMsg(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { using namespace llvm::AMDGPU::SendMsg; @@ -835,6 +886,7 @@ } void AMDGPUInstPrinter::printWaitFlag(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { unsigned SImm16 = MI->getOperand(OpNo).getImm(); unsigned Vmcnt = SImm16 & 0xF; @@ -863,6 +915,7 @@ } void AMDGPUInstPrinter::printHwreg(const MCInst *MI, unsigned OpNo, + const MCSubtargetInfo &STI, raw_ostream &O) { using namespace llvm::AMDGPU::Hwreg; Index: lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp =================================================================== --- lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp +++ lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp @@ -214,7 +214,7 @@ // Is this operand a literal immediate? const MCOperand &Op = MI.getOperand(i); - if (getLitEncoding(Op, RC.getSize(), STI) != 255) + if (getLitEncoding(Op, MRI.getRegSize(RC.getID(), STI), STI) != 255) continue; // Yes! Encode it @@ -279,9 +279,8 @@ const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); if (AMDGPU::isSISrcOperand(Desc, OpNo)) { - uint32_t Enc = getLitEncoding(MO, - AMDGPU::getRegOperandSize(&MRI, Desc, OpNo), - STI); + unsigned Size = AMDGPU::getRegOperandSize(&MRI, STI, Desc, OpNo); + uint32_t Enc = getLitEncoding(MO, Size, STI); if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4)) return Enc; Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -40,7 +40,7 @@ addRegisterClass(MVT::v4f32, &AMDGPU::R600_Reg128RegClass); addRegisterClass(MVT::v4i32, &AMDGPU::R600_Reg128RegClass); - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); // Legalize loads and stores to the private address space. setOperationAction(ISD::LOAD, MVT::i32, Custom); Index: lib/Target/AMDGPU/SIFixSGPRCopies.cpp =================================================================== --- lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -114,12 +114,13 @@ static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) { const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); + const TargetSubtargetInfo &STI = MRI.getTargetSubtargetInfo(); for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { if (!MI.getOperand(i).isReg() || !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) continue; - if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg()))) + if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg()), STI)) return true; } return false; @@ -150,14 +151,16 @@ static bool isVGPRToSGPRCopy(const TargetRegisterClass *SrcRC, const TargetRegisterClass *DstRC, - const SIRegisterInfo &TRI) { - return TRI.isSGPRClass(DstRC) && TRI.hasVGPRs(SrcRC); + const SIRegisterInfo &TRI, + const TargetSubtargetInfo &STI) { + return TRI.isSGPRClass(DstRC, STI) && TRI.hasVGPRs(SrcRC, STI); } static bool isSGPRToVGPRCopy(const TargetRegisterClass *SrcRC, const TargetRegisterClass *DstRC, - const SIRegisterInfo &TRI) { - return TRI.isSGPRClass(SrcRC) && TRI.hasVGPRs(DstRC); + const SIRegisterInfo &TRI, + const TargetSubtargetInfo &STI) { + return TRI.isSGPRClass(SrcRC, STI) && TRI.hasVGPRs(DstRC, STI); } // Distribute an SGPR->VGPR copy of a REG_SEQUENCE into a VGPR REG_SEQUENCE. @@ -178,9 +181,10 @@ const SIInstrInfo *TII, MachineRegisterInfo &MRI) { assert(MI.isRegSequence()); + const TargetSubtargetInfo &STI = MRI.getTargetSubtargetInfo(); unsigned DstReg = MI.getOperand(0).getReg(); - if (!TRI->isSGPRClass(MRI.getRegClass(DstReg))) + if (!TRI->isSGPRClass(MRI.getRegClass(DstReg), STI)) return false; if (!MRI.hasOneUse(DstReg)) @@ -193,7 +197,7 @@ const TargetRegisterClass *SrcRC, *DstRC; std::tie(SrcRC, DstRC) = getCopyRegClasses(CopyUse, *TRI, MRI); - if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI)) + if (!isSGPRToVGPRCopy(SrcRC, DstRC, *TRI, STI)) return false; // TODO: Could have multiple extracts? @@ -218,11 +222,12 @@ unsigned SrcSubReg = MI.getOperand(I).getSubReg(); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); - assert(TRI->isSGPRClass(SrcRC) && + assert(TRI->isSGPRClass(SrcRC, STI) && "Expected SGPR REG_SEQUENCE to only have SGPR inputs"); - SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg); - const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC); + SrcRC = TRI->getSubRegClass(SrcRC, SrcSubReg, STI); + const TargetRegisterClass *NewSrcRC = TRI->getEquivalentVGPRClass(SrcRC, + STI); unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); @@ -263,7 +268,7 @@ const TargetRegisterClass *SrcRC, *DstRC; std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI); - if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) { + if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI, ST)) { DEBUG(dbgs() << "Fixing VGPR -> SGPR copy: " << MI); TII->moveToVALU(MI); } @@ -273,7 +278,7 @@ case AMDGPU::PHI: { DEBUG(dbgs() << "Fixing PHI: " << MI); unsigned Reg = MI.getOperand(0).getReg(); - if (!TRI->isSGPRClass(MRI.getRegClass(Reg))) + if (!TRI->isSGPRClass(MRI.getRegClass(Reg), ST)) break; // If a PHI node defines an SGPR and any of its operands are VGPRs, @@ -316,7 +321,7 @@ bool HasBreakDef = false; for (unsigned i = 1; i < MI.getNumOperands(); i+=2) { unsigned Reg = MI.getOperand(i).getReg(); - if (TRI->hasVGPRs(MRI.getRegClass(Reg))) { + if (TRI->hasVGPRs(MRI.getRegClass(Reg), ST)) { TII->moveToVALU(MI); break; } @@ -341,7 +346,7 @@ break; } case AMDGPU::REG_SEQUENCE: { - if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) || + if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0), ST) || !hasVGPROperands(MI, TRI)) { foldVGPRCopyIntoRegSequence(MI, TRI, TII, MRI); continue; @@ -357,8 +362,8 @@ DstRC = MRI.getRegClass(MI.getOperand(0).getReg()); Src0RC = MRI.getRegClass(MI.getOperand(1).getReg()); Src1RC = MRI.getRegClass(MI.getOperand(2).getReg()); - if (TRI->isSGPRClass(DstRC) && - (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) { + if (TRI->isSGPRClass(DstRC, ST) && + (TRI->hasVGPRs(Src0RC, ST) || TRI->hasVGPRs(Src1RC, ST))) { DEBUG(dbgs() << " Fixing INSERT_SUBREG: " << MI); TII->moveToVALU(MI); } Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -248,8 +248,8 @@ TRI.getRegClass(FoldDesc.OpInfo[0].RegClass); // Split 64-bit constants into 32-bits for folding. - if (FoldRC->getSize() == 8 && UseOp.getSubReg()) { - if (UseRC->getSize() != 8) + if (MRI.getRegSize(FoldRC) == 8 && UseOp.getSubReg()) { + if (MRI.getRegSize(UseRC) != 8) return; if (UseOp.getSubReg() == AMDGPU::sub0) { Index: lib/Target/AMDGPU/SIFrameLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIFrameLowering.cpp +++ lib/Target/AMDGPU/SIFrameLowering.cpp @@ -320,9 +320,10 @@ "RegScavenger required if spilling"); if (MayNeedScavengingEmergencySlot) { + MachineRegisterInfo &MRI = MF.getRegInfo(); int ScavengeFI = MFI.CreateStackObject( - AMDGPU::SGPR_32RegClass.getSize(), - AMDGPU::SGPR_32RegClass.getAlignment(), false); + MRI.getSpillSize(&AMDGPU::SGPR_32RegClass), + MRI.getSpillAlignment(&AMDGPU::SGPR_32RegClass), false); RS->addScavengingFrameIndex(ScavengeFI); } } Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -72,7 +72,7 @@ addRegisterClass(MVT::v16i32, &AMDGPU::SReg_512RegClass); addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass); - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); // We need to custom lower vector stores from local memory setOperationAction(ISD::LOAD, MVT::v2i32, Custom); @@ -1250,10 +1250,11 @@ // Returns subreg index, offset static std::pair computeIndirectRegAndOffset(const SIRegisterInfo &TRI, + const TargetSubtargetInfo &ST, const TargetRegisterClass *SuperRC, unsigned VecReg, int Offset) { - int NumElts = SuperRC->getSize() / 4; + int NumElts = TRI.getRegSize(SuperRC->getID(), ST) / 4; // Skip out of bounds offsets, or else we would end up using an undefined // register. @@ -1277,7 +1278,7 @@ assert(Idx->getReg() != AMDGPU::NoRegister); - if (!TII->getRegisterInfo().isSGPRClass(IdxRC)) + if (!TII->getRegisterInfo().isSGPRClass(IdxRC, MRI.getTargetSubtargetInfo())) return false; if (Offset == 0) { @@ -1308,7 +1309,8 @@ unsigned SubReg; std::tie(SubReg, Offset) - = computeIndirectRegAndOffset(TRI, VecRC, SrcVec->getReg(), Offset); + = computeIndirectRegAndOffset(TRI, MF->getSubtarget(), VecRC, + SrcVec->getReg(), Offset); if (setM0ToIndexFromSGPR(TII, MRI, MI, Offset)) { MachineBasicBlock::iterator I(&MI); @@ -1356,9 +1358,9 @@ assert(Val->getReg()); unsigned SubReg; - std::tie(SubReg, Offset) = computeIndirectRegAndOffset(TRI, VecRC, - SrcVec->getReg(), - Offset); + std::tie(SubReg, Offset) + = computeIndirectRegAndOffset(TRI, MF->getSubtarget(), VecRC, + SrcVec->getReg(), Offset); if (Idx->getReg() == AMDGPU::NoRegister) { MachineBasicBlock::iterator I(&MI); const DebugLoc &DL = MI.getDebugLoc(); Index: lib/Target/AMDGPU/SIInsertWaits.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertWaits.cpp +++ lib/Target/AMDGPU/SIInsertWaits.cpp @@ -202,7 +202,7 @@ // XXX - What if this is a write into a super register? const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0); - unsigned Size = RC->getSize(); + unsigned Size = TRI->getSpillSize(RC->getID(), *ST); Result.Named.LGKM = Size > 4 ? 2 : 1; } else { // s_dcache_inv etc. do not have a a destination register. Assume we @@ -274,7 +274,7 @@ RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC, const MachineOperand &Reg) const { - unsigned Size = RC->getSize(); + unsigned Size = TRI->getRegSize(RC->getID(), *ST); assert(Size >= 4); RegInterval Result; Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -445,23 +445,11 @@ /// \brief Return the size in bytes of the operand OpNo on the given // instruction opcode. - unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const { - const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo]; - - if (OpInfo.RegClass == -1) { - // If this is an immediate operand, this must be a 32-bit literal. - assert(OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE); - return 4; - } - - return RI.getRegClass(OpInfo.RegClass)->getSize(); - } + unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const; /// \brief This form should usually be preferred since it handles operands /// with unknown register classes. - unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const { - return getOpRegClass(MI, OpNo)->getSize(); - } + unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const; /// \returns true if it is legal for the operand at index \p OpNo /// to read a VGPR. Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -237,11 +237,11 @@ unsigned EltSize; if (LdSt.mayLoad()) - EltSize = getOpRegClass(LdSt, 0)->getSize() / 2; + EltSize = TRI->getRegSize(getOpRegClass(LdSt, 0)->getID(), ST) / 2; else { assert(LdSt.mayStore()); int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); - EltSize = getOpRegClass(LdSt, Data0Idx)->getSize(); + EltSize = TRI->getRegSize(getOpRegClass(LdSt, Data0Idx)->getID(), ST); } if (isStride64(Opc)) @@ -335,7 +335,7 @@ FirstLdSt.getParent()->getParent()->getRegInfo(); const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); - return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold; + return (NumLoads * MRI.getSpillSize(DstRC)) <= LoadClusterThreshold; } void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, @@ -524,12 +524,13 @@ } unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { - - if (DstRC->getSize() == 4) { - return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; - } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { + unsigned Size = RI.getRegSize(DstRC->getID(), ST); + if (Size == 4) { + return RI.isSGPRClass(DstRC, ST) ? AMDGPU::S_MOV_B32 + : AMDGPU::V_MOV_B32_e32; + } else if (Size == 8 && RI.isSGPRClass(DstRC, ST)) { return AMDGPU::S_MOV_B64; - } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { + } else if (Size == 8 && !RI.isSGPRClass(DstRC, ST)) { return AMDGPU::V_MOV_B64_PSEUDO; } return AMDGPU::COPY; @@ -590,17 +591,18 @@ = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, Size, Align); - if (RI.isSGPRClass(RC)) { + if (RI.isSGPRClass(RC, ST)) { MFI->setHasSpilledSGPRs(); + MachineRegisterInfo &MRI = MF->getRegInfo(); // We are only allowed to create one new instruction when spilling // registers, so we need to use pseudo instruction for spilling SGPRs. - const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize())); + unsigned Size = MRI.getSpillSize(RC); + const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(Size)); // The SGPR spill/restore instructions only work on number sgprs, so we need // to make sure we are using the correct register class. - if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) { - MachineRegisterInfo &MRI = MF->getRegInfo(); + if (TargetRegisterInfo::isVirtualRegister(SrcReg) && Size == 4) { MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); } @@ -622,9 +624,9 @@ return; } - assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); + assert(RI.hasVGPRs(RC, ST) && "Only VGPR spilling expected"); - unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize()); + unsigned Opcode = getVGPRSpillSaveOpcode(MF->getRegInfo().getSpillSize(RC)); MFI->setHasSpilledVGPRs(); BuildMI(MBB, MI, DL, get(Opcode)) .addReg(SrcReg, getKillRegState(isKill)) // data @@ -689,12 +691,13 @@ MachineMemOperand *MMO = MF->getMachineMemOperand( PtrInfo, MachineMemOperand::MOLoad, Size, Align); - if (RI.isSGPRClass(RC)) { + if (RI.isSGPRClass(RC, ST)) { + MachineRegisterInfo &MRI = MF->getRegInfo(); + unsigned Size = MRI.getSpillSize(RC); // FIXME: Maybe this should not include a memoperand because it will be // lowered to non-memory instructions. - const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize())); - if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) { - MachineRegisterInfo &MRI = MF->getRegInfo(); + const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(Size)); + if (TargetRegisterInfo::isVirtualRegister(DestReg) && Size == 4) { MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); } @@ -714,9 +717,10 @@ return; } - assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); + assert(RI.hasVGPRs(RC, ST) && "Only VGPR spilling expected"); - unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize()); + unsigned Opcode = getVGPRSpillRestoreOpcode( + MF->getRegInfo().getSpillSize(RC)); BuildMI(MBB, MI, DL, get(Opcode), DestReg) .addFrameIndex(FrameIndex) // vaddr .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc @@ -1245,10 +1249,12 @@ // Multiplied part is the constant: Use v_madmk_f32 // We should only expect these to be on src0 due to canonicalizations. if (Src0->isReg() && Src0->getReg() == Reg) { - if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) + if (!Src1->isReg() || + RI.isSGPRClass(MRI->getRegClass(Src1->getReg()), ST)) return false; - if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) + if (!Src2->isReg() || + RI.isSGPRClass(MRI->getRegClass(Src2->getReg()), ST)) return false; // We need to swap operands 0 and 1 since madmk constant is at operand 1. @@ -1291,11 +1297,12 @@ if (Src2->isReg() && Src2->getReg() == Reg) { // Not allowed to use constant bus for another operand. // We can however allow an inline immediate as src0. - if (!Src0->isImm() && - (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) + if (!Src0->isImm() && Src0->isReg() && + RI.isSGPRClass(MRI->getRegClass(Src0->getReg()), ST)) return false; - if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) + if (!Src1->isReg() || + RI.isSGPRClass(MRI->getRegClass(Src1->getReg()), ST)) return false; const int64_t Imm = DefMI.getOperand(1).getImm(); @@ -1576,7 +1583,9 @@ if (OpInfo.RegClass < 0) return false; - unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); + const MachineFunction &MF = *MI.getParent()->getParent(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned OpSize = MRI.getRegSize(RI.getRegClass(OpInfo.RegClass)); if (isLiteralConstant(MO, OpSize)) return RI.opCanUseLiteralConstant(OpInfo.OperandType); @@ -1616,7 +1625,7 @@ return false; if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) - return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); + return RI.isSGPRClass(MRI.getRegClass(MO.getReg()), ST); // FLAT_SCR is just an SGPR pair. if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) @@ -1725,7 +1734,7 @@ case AMDGPU::OPERAND_REG_INLINE_C_INT: case AMDGPU::OPERAND_REG_INLINE_C_FP: if (isLiteralConstant(MI.getOperand(i), - RI.getRegClass(RegClass)->getSize())) { + MRI.getRegSize(RI.getRegClass(RegClass)))) { ErrInfo = "Illegal immediate value for operand."; return false; } @@ -1945,15 +1954,31 @@ return RI.getRegClass(RCID); } +unsigned SIInstrInfo::getOpSize(uint16_t Opcode, unsigned OpNo) const { + const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo]; + + if (OpInfo.RegClass == -1) { + // If this is an immediate operand, this must be a 32-bit literal. + assert(OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE); + return 4; + } + + return RI.getRegSize(RI.getRegClass(OpInfo.RegClass)->getID(), ST); +} + +unsigned SIInstrInfo::getOpSize(const MachineInstr &MI, unsigned OpNo) const { + return RI.getRegSize(getOpRegClass(MI, OpNo)->getID(), ST); +} + bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { switch (MI.getOpcode()) { case AMDGPU::COPY: case AMDGPU::REG_SEQUENCE: case AMDGPU::PHI: case AMDGPU::INSERT_SUBREG: - return RI.hasVGPRs(getOpRegClass(MI, 0)); + return RI.hasVGPRs(getOpRegClass(MI, 0), ST); default: - return RI.hasVGPRs(getOpRegClass(MI, OpNo)); + return RI.hasVGPRs(getOpRegClass(MI, OpNo), ST); } } @@ -1967,10 +1992,10 @@ unsigned Opcode = AMDGPU::V_MOV_B32_e32; if (MO.isReg()) Opcode = AMDGPU::COPY; - else if (RI.isSGPRClass(RC)) + else if (RI.isSGPRClass(RC, ST)) Opcode = AMDGPU::S_MOV_B32; - const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); + const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC, ST); if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) VRC = &AMDGPU::VReg_64RegClass; else @@ -2057,7 +2082,7 @@ const SIRegisterInfo *TRI = static_cast(MRI.getTargetRegisterInfo()); - RC = TRI->getSubRegClass(RC, MO.getSubReg()); + RC = TRI->getSubRegClass(RC, MO.getSubReg(), ST); // In order to be legal, the common sub-class must be equal to the // class of the current operand. For example: @@ -2092,7 +2117,7 @@ if (!MO) MO = &MI.getOperand(OpIdx); - if (isVALU(MI) && usesConstantBus(MRI, *MO, DefinedRC->getSize())) { + if (isVALU(MI) && usesConstantBus(MRI, *MO, MRI.getRegSize(DefinedRC))) { RegSubRegPair SGPRUsed; if (MO->isReg()) @@ -2231,7 +2256,7 @@ if (!MO.isReg()) continue; - if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) + if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()), ST)) continue; // VGPRs are legal if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { @@ -2249,9 +2274,9 @@ unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, MachineRegisterInfo &MRI) const { const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); - const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); + const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC, ST); unsigned DstReg = MRI.createVirtualRegister(SRC); - unsigned SubRegs = VRC->getSize() / 4; + unsigned SubRegs = MRI.getRegSize(VRC) / 4; SmallVector SRegs; for (unsigned i = 0; i < SubRegs; ++i) { @@ -2280,7 +2305,7 @@ // loads with uniform pointers to SMRD instruction so we know the // pointer value is uniform. MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); - if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { + if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()), ST)) { unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); SBase->setReg(SGPR); } @@ -2318,7 +2343,7 @@ continue; const TargetRegisterClass *OpRC = MRI.getRegClass(MI.getOperand(i).getReg()); - if (RI.hasVGPRs(OpRC)) { + if (RI.hasVGPRs(OpRC, ST)) { VRC = OpRC; } else { SRC = OpRC; @@ -2328,10 +2353,10 @@ // If any of the operands are VGPR registers, then they all most be // otherwise we will create illegal VGPR->SGPR copies when legalizing // them. - if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { + if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0), ST)) { if (!VRC) { assert(SRC); - VRC = RI.getEquivalentVGPRClass(SRC); + VRC = RI.getEquivalentVGPRClass(SRC, ST); } RC = VRC; } else { @@ -2361,7 +2386,7 @@ if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { MachineBasicBlock *MBB = MI.getParent(); const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); - if (RI.hasVGPRs(DstRC)) { + if (RI.hasVGPRs(DstRC, ST)) { // Update all the operands so they are VGPR register classes. These may // not be the same register class because REG_SEQUENCE supports mixing // subregister index types e.g. sub0_sub1 + sub2 + sub3 @@ -2371,7 +2396,7 @@ continue; const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); - const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); + const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC, ST); if (VRC == OpRC) continue; @@ -2408,13 +2433,13 @@ // Legalize MIMG if (isMIMG(MI)) { MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); - if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { + if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()), ST)) { unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); SRsrc->setReg(SGPR); } MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); - if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { + if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()), ST)) { unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); SSamp->setReg(SGPR); } @@ -2793,14 +2818,16 @@ MRI.getRegClass(Src0.getReg()) : &AMDGPU::SGPR_32RegClass; - const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); + const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, + AMDGPU::sub0, ST); MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); - const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); - const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); + const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC, ST); + const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, + AMDGPU::sub0, ST); unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); BuildMI(MBB, MII, DL, InstDesc, DestSub0) @@ -2847,12 +2874,14 @@ MRI.getRegClass(Src0.getReg()) : &AMDGPU::SGPR_32RegClass; - const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); + const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, + AMDGPU::sub0, ST); const TargetRegisterClass *Src1RC = Src1.isReg() ? MRI.getRegClass(Src1.getReg()) : &AMDGPU::SGPR_32RegClass; - const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); + const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, + AMDGPU::sub0, ST); MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); @@ -2860,8 +2889,9 @@ AMDGPU::sub0, Src1SubRC); const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); - const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); - const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); + const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC, ST); + const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, + AMDGPU::sub0, ST); unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) @@ -2915,7 +2945,8 @@ unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); - const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); + const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0, + ST); MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub0, SrcSubRC); @@ -3039,10 +3070,10 @@ case AMDGPU::PHI: case AMDGPU::REG_SEQUENCE: case AMDGPU::INSERT_SUBREG: - if (RI.hasVGPRs(NewDstRC)) + if (RI.hasVGPRs(NewDstRC, ST)) return nullptr; - NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); + NewDstRC = RI.getEquivalentVGPRClass(NewDstRC, ST); if (!NewDstRC) return nullptr; return NewDstRC; @@ -3084,14 +3115,14 @@ // Is this operand statically required to be an SGPR based on the operand // constraints? const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); - bool IsRequiredSGPR = RI.isSGPRClass(OpRC); + bool IsRequiredSGPR = RI.isSGPRClass(OpRC, ST); if (IsRequiredSGPR) return MO.getReg(); // If this could be a VGPR or an SGPR, Check the dynamic register class. unsigned Reg = MO.getReg(); const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); - if (RI.isSGPRClass(RegRC)) + if (RI.isSGPRClass(RegRC, ST)) UsedSGPRs[i] = Reg; } Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -309,7 +309,7 @@ for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); U != E; ++U) { const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); - if (RC && SIRI->isSGPRClass(RC)) + if (RC && SIRI->isSGPRClass(RC, *Subtarget)) return true; } return false; Index: lib/Target/AMDGPU/SIRegisterInfo.h =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.h +++ lib/Target/AMDGPU/SIRegisterInfo.h @@ -86,13 +86,14 @@ const TargetRegisterClass *getPhysRegClass(unsigned Reg) const; /// \returns true if this class contains only SGPR registers - bool isSGPRClass(const TargetRegisterClass *RC) const { - return !hasVGPRs(RC); + bool isSGPRClass(const TargetRegisterClass *RC, + const TargetSubtargetInfo &STI) const { + return !hasVGPRs(RC, STI); } /// \returns true if this class ID contains only SGPR registers - bool isSGPRClassID(unsigned RCID) const { - return isSGPRClass(getRegClass(RCID)); + bool isSGPRClassID(unsigned RCID, const TargetSubtargetInfo &STI) const { + return isSGPRClass(getRegClass(RCID), STI); } bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const { @@ -101,27 +102,33 @@ RC = MRI.getRegClass(Reg); else RC = getPhysRegClass(Reg); - return isSGPRClass(RC); + return isSGPRClass(RC, MRI.getTargetSubtargetInfo()); } /// \returns true if this class contains VGPR registers. - bool hasVGPRs(const TargetRegisterClass *RC) const; + bool hasVGPRs(const TargetRegisterClass *RC, + const TargetSubtargetInfo &STI) const; /// \returns A VGPR reg class with the same width as \p SRC const TargetRegisterClass *getEquivalentVGPRClass( - const TargetRegisterClass *SRC) const; + const TargetRegisterClass *SRC, + const TargetSubtargetInfo &STI) const; /// \returns A SGPR reg class with the same width as \p SRC const TargetRegisterClass *getEquivalentSGPRClass( - const TargetRegisterClass *VRC) const; + const TargetRegisterClass *VRC, + const TargetSubtargetInfo &STI) const; /// \returns The register class that is used for a sub-register of \p RC for /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will /// be returned. const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC, - unsigned SubIdx) const; + unsigned SubIdx, + const TargetSubtargetInfo &STI) + const; - bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC, + bool shouldRewriteCopySrc(const TargetSubtargetInfo &STI, + const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, unsigned SrcSubReg) const override; Index: lib/Target/AMDGPU/SIRegisterInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIRegisterInfo.cpp +++ lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -643,8 +643,9 @@ // TODO: It might be helpful to have some target specific flags in // TargetRegisterClass to mark which classes are VGPRs to make this trivial. -bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const { - switch (RC->getSize()) { +bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC, + const TargetSubtargetInfo &STI) const { + switch (getRegSize(RC->getID(), STI)) { case 0: return false; case 1: return false; case 4: @@ -665,8 +666,9 @@ } const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass( - const TargetRegisterClass *SRC) const { - switch (SRC->getSize()) { + const TargetRegisterClass *SRC, + const TargetSubtargetInfo &STI) const { + switch (getRegSize(SRC->getID(), STI)) { case 4: return &AMDGPU::VGPR_32RegClass; case 8: @@ -685,8 +687,9 @@ } const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass( - const TargetRegisterClass *VRC) const { - switch (VRC->getSize()) { + const TargetRegisterClass *VRC, + const TargetSubtargetInfo &STI) const { + switch (getRegSize(VRC->getID(), STI)) { case 4: return &AMDGPU::SGPR_32RegClass; case 8: @@ -703,13 +706,14 @@ } const TargetRegisterClass *SIRegisterInfo::getSubRegClass( - const TargetRegisterClass *RC, unsigned SubIdx) const { + const TargetRegisterClass *RC, unsigned SubIdx, + const TargetSubtargetInfo &STI) const { if (SubIdx == AMDGPU::NoSubRegister) return RC; // We can assume that each lane corresponds to one 32-bit register. unsigned Count = countPopulation(getSubRegIndexLaneMask(SubIdx)); - if (isSGPRClass(RC)) { + if (isSGPRClass(RC, STI)) { switch (Count) { case 1: return &AMDGPU::SGPR_32RegClass; @@ -743,6 +747,7 @@ } bool SIRegisterInfo::shouldRewriteCopySrc( + const TargetSubtargetInfo &STI, const TargetRegisterClass *DefRC, unsigned DefSubReg, const TargetRegisterClass *SrcRC, @@ -854,7 +859,7 @@ else RC = getPhysRegClass(Reg); - return hasVGPRs(RC); + return hasVGPRs(RC, MRI.getTargetSubtargetInfo()); } unsigned SIRegisterInfo::getTotalNumSGPRs(const SISubtarget &ST) const { Index: lib/Target/AMDGPU/SIShrinkInstructions.cpp =================================================================== --- lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -71,10 +71,11 @@ if (!MO->isReg()) return false; + const TargetSubtargetInfo &STI = MRI.getTargetSubtargetInfo(); if (TargetRegisterInfo::isVirtualRegister(MO->getReg())) - return TRI.hasVGPRs(MRI.getRegClass(MO->getReg())); + return TRI.hasVGPRs(MRI.getRegClass(MO->getReg()), STI); - return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg())); + return TRI.hasVGPRs(TRI.getPhysRegClass(MO->getReg()), STI); } static bool canShrink(MachineInstr &MI, const SIInstrInfo *TII, Index: lib/Target/AMDGPU/SIWholeQuadMode.cpp =================================================================== --- lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -294,7 +294,7 @@ unsigned Reg = MO.getReg(); if (!TRI->isVirtualRegister(Reg) && - TRI->hasVGPRs(TRI->getPhysRegClass(Reg))) { + TRI->hasVGPRs(TRI->getPhysRegClass(Reg), MF.getSubtarget())) { Flags = StateWQM; break; } Index: lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h =================================================================== --- lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -92,7 +92,9 @@ bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo); /// \brief Get size of register operand -unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, +unsigned getRegOperandSize(const MCRegisterInfo *MRI, + const MCSubtargetInfo &STI, + const MCInstrDesc &Desc, unsigned OpNo); /// \brief Is this literal inlinable Index: lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp =================================================================== --- lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -226,11 +226,13 @@ OpType == AMDGPU::OPERAND_REG_INLINE_C_FP; } -unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, +unsigned getRegOperandSize(const MCRegisterInfo *MRI, + const MCSubtargetInfo &STI, + const MCInstrDesc &Desc, unsigned OpNo) { int RCID = Desc.OpInfo[OpNo].RegClass; const MCRegisterClass &RC = MRI->getRegClass(RCID); - return RC.getSize(); + return MRI->getRegSize(RC.getID(), STI); } bool isInlinableLiteral64(int64_t Literal, bool IsVI) { Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -882,8 +882,8 @@ MachineMemOperand *MMO = MF.getMachineMemOperand( MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align); - - switch (RC->getSize()) { + unsigned StoreSize = TRI->getSpillSize(RC->getID(), Subtarget); + switch (StoreSize) { case 4: if (ARM::GPRRegClass.hasSubClassEq(RC)) { AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12)) @@ -1065,7 +1065,8 @@ MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align); - switch (RC->getSize()) { + unsigned LoadSize = TRI->getSpillSize(RC->getID(), Subtarget); + switch (LoadSize) { case 4: if (ARM::GPRRegClass.hasSubClassEq(RC)) { AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg) Index: lib/Target/ARM/ARMBaseRegisterInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -788,7 +788,8 @@ if (!DstSubReg) return true; // Small registers don't frequently cause a problem, so we can coalesce them. - if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32) + if (MRI.getRegSize(NewRC) < 32 && MRI.getRegSize(DstRC) < 32 && + MRI.getRegSize(SrcRC) < 32) return true; auto NewRCWeight = Index: lib/Target/ARM/ARMFrameLowering.cpp =================================================================== --- lib/Target/ARM/ARMFrameLowering.cpp +++ lib/Target/ARM/ARMFrameLowering.cpp @@ -1730,9 +1730,9 @@ // closest to SP or frame pointer. assert(RS && "Register scavenging not provided"); const TargetRegisterClass *RC = &ARM::GPRRegClass; - RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(), - RC->getAlignment(), - false)); + unsigned Size = MRI.getSpillSize(RC); + unsigned Align = MRI.getSpillAlignment(RC); + RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false)); } } } Index: lib/Target/ARM/ARMISelLowering.h =================================================================== --- lib/Target/ARM/ARMISelLowering.h +++ lib/Target/ARM/ARMISelLowering.h @@ -484,7 +484,7 @@ protected: std::pair - findRepresentativeClass(const TargetRegisterInfo *TRI, + findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const override; private: Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -864,7 +864,7 @@ setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom); } - computeRegisterProperties(Subtarget->getRegisterInfo()); + computeRegisterProperties(*Subtarget); // ARM does not have floating-point extending loads. for (MVT VT : MVT::fp_valuetypes()) { @@ -1258,13 +1258,13 @@ // due to the common occurrence of cross class copies and subregister insertions // and extractions. std::pair -ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, +ARMTargetLowering::findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const { const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: - return TargetLowering::findRepresentativeClass(TRI, VT); + return TargetLowering::findRepresentativeClass(STI, VT); // Use DPR as representative register class for all floating point // and vector types. Since there are 32 SPR registers and 32 DPR registers so // the cost is 1 for both f32 and f64. Index: lib/Target/BPF/BPFISelLowering.cpp =================================================================== --- lib/Target/BPF/BPFISelLowering.cpp +++ lib/Target/BPF/BPFISelLowering.cpp @@ -59,7 +59,7 @@ addRegisterClass(MVT::i64, &BPF::GPRRegClass); // Compute derived properties from the register classes - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); setStackPointerRegisterToSaveRestore(BPF::R11); Index: lib/Target/Hexagon/BitTracker.h =================================================================== --- lib/Target/Hexagon/BitTracker.h +++ lib/Target/Hexagon/BitTracker.h @@ -13,6 +13,7 @@ #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include #include @@ -20,7 +21,6 @@ namespace llvm { class ConstantInt; - class MachineRegisterInfo; class MachineBasicBlock; class MachineInstr; class MachineOperand; @@ -343,8 +343,9 @@ // utilize this should implement the evaluation functions (noted below) // in a subclass of this class. struct BitTracker::MachineEvaluator { - MachineEvaluator(const TargetRegisterInfo &T, MachineRegisterInfo &M) - : TRI(T), MRI(M) {} + MachineEvaluator(MachineRegisterInfo &M) + : STI(M.getTargetSubtargetInfo()), TRI(*M.getTargetRegisterInfo()), + MRI(M) {} virtual ~MachineEvaluator() {} uint16_t getRegBitWidth(const RegisterRef &RR) const; @@ -429,6 +430,7 @@ virtual bool evaluate(const MachineInstr &BI, const CellMapType &Inputs, BranchTargetList &Targets, bool &FallsThru) const = 0; + const TargetSubtargetInfo &STI; const TargetRegisterInfo &TRI; MachineRegisterInfo &MRI; }; Index: lib/Target/Hexagon/BitTracker.cpp =================================================================== --- lib/Target/Hexagon/BitTracker.cpp +++ lib/Target/Hexagon/BitTracker.cpp @@ -338,7 +338,7 @@ unsigned PhysS = (RR.Sub == 0) ? PhysR : TRI.getSubReg(PhysR, RR.Sub); const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PhysS); - uint16_t BW = RC->getSize()*8; + uint16_t BW = TRI.getRegSize(RC->getID(), STI)*8; return BW; } Index: lib/Target/Hexagon/HexagonBitSimplify.cpp =================================================================== --- lib/Target/Hexagon/HexagonBitSimplify.cpp +++ lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -363,9 +363,10 @@ bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR, unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) { const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg); + unsigned RegSize = MRI.getRegSize(RC); if (RR.Sub == 0) { Begin = 0; - Width = RC->getSize()*8; + Width = RegSize*8; return true; } @@ -377,7 +378,7 @@ case Hexagon::DoubleRegsRegClassID: case Hexagon::VecDblRegsRegClassID: case Hexagon::VecDblRegs128BRegClassID: - Width = RC->getSize()*8 / 2; + Width = RegSize*8 / 2; if (RR.Sub == Hexagon::subreg_hireg) Begin = Width; break; @@ -1210,7 +1211,7 @@ assert(MI.getOperand(OpN).isReg()); BitTracker::RegisterRef RR = MI.getOperand(OpN); const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI); - uint16_t Width = RC->getSize()*8; + uint16_t Width = MRI.getRegSize(RC)*8; if (!GotBits) T.set(Begin, Begin+Width); @@ -2256,7 +2257,7 @@ Changed = DeadCodeElimination(MF, *MDT).run(); - const HexagonEvaluator HE(HRI, MRI, HII, MF); + const HexagonEvaluator HE(MRI, HII, MF); BitTracker BT(HE, MF); DEBUG(BT.trace(true)); BT.run(); @@ -2805,7 +2806,7 @@ HII = HST.getInstrInfo(); HRI = HST.getRegisterInfo(); MRI = &MF.getRegInfo(); - const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); + const HexagonEvaluator HE(*MRI, *HII, MF); BitTracker BT(HE, MF); DEBUG(BT.trace(true)); BT.run(); Index: lib/Target/Hexagon/HexagonBitTracker.h =================================================================== --- lib/Target/Hexagon/HexagonBitTracker.h +++ lib/Target/Hexagon/HexagonBitTracker.h @@ -23,7 +23,7 @@ typedef BitTracker::RegisterCell RegisterCell; typedef BitTracker::BranchTargetList BranchTargetList; - HexagonEvaluator(const HexagonRegisterInfo &tri, MachineRegisterInfo &mri, + HexagonEvaluator(MachineRegisterInfo &mri, const HexagonInstrInfo &tii, MachineFunction &mf); bool evaluate(const MachineInstr &MI, const CellMapType &Inputs, Index: lib/Target/Hexagon/HexagonBitTracker.cpp =================================================================== --- lib/Target/Hexagon/HexagonBitTracker.cpp +++ lib/Target/Hexagon/HexagonBitTracker.cpp @@ -22,11 +22,10 @@ typedef BitTracker BT; -HexagonEvaluator::HexagonEvaluator(const HexagonRegisterInfo &tri, - MachineRegisterInfo &mri, +HexagonEvaluator::HexagonEvaluator(MachineRegisterInfo &mri, const HexagonInstrInfo &tii, MachineFunction &mf) - : MachineEvaluator(tri, mri), MF(mf), MFI(mf.getFrameInfo()), TII(tii) { + : MachineEvaluator(mri), MF(mf), MFI(mf.getFrameInfo()), TII(tii) { // Populate the VRX map (VR to extension-type). // Go over all the formal parameters of the function. If a given parameter // P is sign- or zero-extended, locate the virtual register holding that Index: lib/Target/Hexagon/HexagonExpandCondsets.cpp =================================================================== --- lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -526,7 +526,7 @@ } unsigned PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysS); - switch (RC->getSize()) { + switch (MRI->getRegSize(RC)) { case 4: return IfTrue ? A2_tfrt : A2_tfrf; case 8: Index: lib/Target/Hexagon/HexagonFrameLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonFrameLowering.cpp +++ lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1318,6 +1318,7 @@ DEBUG(dbgs() << LLVM_FUNCTION_NAME << " on " << MF.getFunction()->getName() << '\n'); MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); BitVector SRegs(Hexagon::NUM_TARGET_REGS); // Generate a set of unique, callee-saved registers (SRegs), where each @@ -1397,7 +1398,7 @@ if (!SRegs[S->Reg]) continue; const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg); - int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), S->Offset); + int FI = MFI.CreateFixedSpillStackObject(MRI.getSpillSize(RC), S->Offset); MinOffset = std::min(MinOffset, S->Offset); CSI.push_back(CalleeSavedInfo(S->Reg, FI)); SRegs[S->Reg] = false; @@ -1409,11 +1410,11 @@ for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) { unsigned R = x; const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R); - int Off = MinOffset - RC->getSize(); - unsigned Align = std::min(RC->getAlignment(), getStackAlignment()); + int Off = MinOffset - MRI.getSpillSize(RC); + unsigned Align = std::min(MRI.getSpillAlignment(RC), getStackAlignment()); assert(isPowerOf2_32(Align)); Off &= -Align; - int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), Off); + int FI = MFI.CreateFixedSpillStackObject(MRI.getSpillSize(RC), Off); MinOffset = std::min(MinOffset, Off); CSI.push_back(CalleeSavedInfo(R, FI)); SRegs[R] = false; @@ -1636,8 +1637,8 @@ bool Is128B = HST.useHVXDblOps(); auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass : &Hexagon::VectorRegs128BRegClass; - unsigned Size = RC->getSize(); - unsigned NeedAlign = RC->getAlignment(); + unsigned Size = MRI.getSpillSize(RC); + unsigned NeedAlign = MRI.getSpillAlignment(RC); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned StoreOpc; @@ -1689,8 +1690,8 @@ bool Is128B = HST.useHVXDblOps(); auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass : &Hexagon::VectorRegs128BRegClass; - unsigned Size = RC->getSize(); - unsigned NeedAlign = RC->getAlignment(); + unsigned Size = MRI.getSpillSize(RC); + unsigned NeedAlign = MRI.getSpillAlignment(RC); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned LoadOpc; @@ -1739,7 +1740,7 @@ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass : &Hexagon::VectorRegs128BRegClass; - unsigned NeedAlign = RC->getAlignment(); + unsigned NeedAlign = MRI.getSpillAlignment(RC); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned StoreOpc; @@ -1776,7 +1777,7 @@ auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass : &Hexagon::VectorRegs128BRegClass; - unsigned NeedAlign = RC->getAlignment(); + unsigned NeedAlign = MRI.getSpillAlignment(RC); unsigned HasAlign = MFI.getObjectAlignment(FI); unsigned LoadOpc; @@ -1887,7 +1888,7 @@ if (!needToReserveScavengingSpillSlots(MF, HRI, RC)) continue; unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1; - unsigned S = RC->getSize(), A = RC->getAlignment(); + unsigned S = MRI.getSpillSize(RC), A = MRI.getSpillAlignment(RC); for (unsigned i = 0; i < Num; i++) { int NewFI = MFI.CreateSpillStackObject(S, A); RS->addScavengingFrameIndex(NewFI); Index: lib/Target/Hexagon/HexagonGenInsert.cpp =================================================================== --- lib/Target/Hexagon/HexagonGenInsert.cpp +++ lib/Target/Hexagon/HexagonGenInsert.cpp @@ -1498,7 +1498,7 @@ // leading to unnecessary stack growth. Changed = removeDeadCode(MDT->getRootNode()); - const HexagonEvaluator HE(*HRI, *MRI, *HII, MF); + const HexagonEvaluator HE(*MRI, *HII, MF); BitTracker BTLoc(HE, MF); BTLoc.trace(isDebug()); BTLoc.run(); Index: lib/Target/Hexagon/HexagonISelLowering.h =================================================================== --- lib/Target/Hexagon/HexagonISelLowering.h +++ lib/Target/Hexagon/HexagonISelLowering.h @@ -278,7 +278,7 @@ protected: std::pair - findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) + findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const override; }; } // end namespace llvm Index: lib/Target/Hexagon/HexagonISelLowering.cpp =================================================================== --- lib/Target/Hexagon/HexagonISelLowering.cpp +++ lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2116,7 +2116,7 @@ } } - computeRegisterProperties(&HRI); + computeRegisterProperties(Subtarget); // // Library calls for unsupported operations @@ -3238,14 +3238,14 @@ std::pair -HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, +HexagonTargetLowering::findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const { const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: - return TargetLowering::findRepresentativeClass(TRI, VT); + return TargetLowering::findRepresentativeClass(STI, VT); case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: Index: lib/Target/Lanai/LanaiISelLowering.cpp =================================================================== --- lib/Target/Lanai/LanaiISelLowering.cpp +++ lib/Target/Lanai/LanaiISelLowering.cpp @@ -57,13 +57,12 @@ LanaiTargetLowering::LanaiTargetLowering(const TargetMachine &TM, const LanaiSubtarget &STI) - : TargetLowering(TM) { + : TargetLowering(TM), TRI(STI.getRegisterInfo()) { // Set up the register classes. addRegisterClass(MVT::i32, &Lanai::GPRRegClass); // Compute derived properties from the register classes - TRI = STI.getRegisterInfo(); - computeRegisterProperties(TRI); + computeRegisterProperties(STI); setStackPointerRegisterToSaveRestore(Lanai::SP); Index: lib/Target/MSP430/MSP430ISelLowering.cpp =================================================================== --- lib/Target/MSP430/MSP430ISelLowering.cpp +++ lib/Target/MSP430/MSP430ISelLowering.cpp @@ -66,7 +66,7 @@ addRegisterClass(MVT::i16, &MSP430::GR16RegClass); // Compute derived properties from the register classes - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); // Provide all sorts of operation actions setStackPointerRegisterToSaveRestore(MSP430::SP); Index: lib/Target/Mips/Mips16ISelLowering.cpp =================================================================== --- lib/Target/Mips/Mips16ISelLowering.cpp +++ lib/Target/Mips/Mips16ISelLowering.cpp @@ -147,7 +147,7 @@ setOperationAction(ISD::BSWAP, MVT::i32, Expand); setOperationAction(ISD::BSWAP, MVT::i64, Expand); - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); } const MipsTargetLowering * Index: lib/Target/Mips/MipsAsmPrinter.cpp =================================================================== --- lib/Target/Mips/MipsAsmPrinter.cpp +++ lib/Target/Mips/MipsAsmPrinter.cpp @@ -258,12 +258,13 @@ // Set the CPU and FPU Bitmasks const MachineFrameInfo &MFI = MF->getFrameInfo(); - const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); + const TargetSubtargetInfo &STI = MF->getSubtarget(); + const TargetRegisterInfo *TRI = STI.getRegisterInfo(); const std::vector &CSI = MFI.getCalleeSavedInfo(); // size of stack area to which FP callee-saved regs are saved. - unsigned CPURegSize = Mips::GPR32RegClass.getSize(); - unsigned FGR32RegSize = Mips::FGR32RegClass.getSize(); - unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize(); + unsigned CPURegSize = TRI->getRegSize(Mips::GPR32RegClass.getID(), STI); + unsigned FGR32RegSize = TRI->getRegSize(Mips::FGR32RegClass.getID(), STI); + unsigned AFGR64RegSize = TRI->getRegSize(Mips::AFGR64RegClass.getID(), STI); bool HasAFGR64Reg = false; unsigned CSFPRegsSize = 0; Index: lib/Target/Mips/MipsFrameLowering.cpp =================================================================== --- lib/Target/Mips/MipsFrameLowering.cpp +++ lib/Target/Mips/MipsFrameLowering.cpp @@ -109,6 +109,7 @@ uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const { const MachineFrameInfo &MFI = MF.getFrameInfo(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); int64_t Offset = 0; @@ -119,7 +120,7 @@ // Conservatively assume all callee-saved registers will be saved. for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) { - unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize(); + unsigned Size = MRI.getSpillSize(TRI.getMinimalPhysRegClass(*R)); Offset = alignTo(Offset + Size, Size); } Index: lib/Target/Mips/MipsMachineFunction.cpp =================================================================== --- lib/Target/Mips/MipsMachineFunction.cpp +++ lib/Target/Mips/MipsMachineFunction.cpp @@ -54,14 +54,15 @@ } void MipsFunctionInfo::createEhDataRegsFI() { + const MachineRegisterInfo &MRI = MF.getRegInfo(); for (int I = 0; I < 4; ++I) { const TargetRegisterClass *RC = static_cast(MF.getTarget()).getABI().IsN64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; - EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(RC->getSize(), - RC->getAlignment(), false); + EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject( + MRI.getSpillSize(RC), MRI.getSpillAlignment(RC), false); } } @@ -70,11 +71,12 @@ // The current implementation only supports Mips32r2+ not Mips64rX. Status // is always 32 bits, ErrorPC is 32 or 64 bits dependant on architecture, // however Mips32r2+ is the supported architecture. + const MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RC = &Mips::GPR32RegClass; for (int I = 0; I < 2; ++I) ISRDataRegFI[I] = MF.getFrameInfo().CreateStackObject( - RC->getSize(), RC->getAlignment(), false); + MRI.getSpillSize(RC), MRI.getSpillAlignment(RC), false); } bool MipsFunctionInfo::isEhDataRegFI(int FI) const { @@ -94,9 +96,10 @@ } int MipsFunctionInfo::getMoveF64ViaSpillFI(const TargetRegisterClass *RC) { + const MachineRegisterInfo &MRI = MF.getRegInfo(); if (MoveF64ViaSpillFI == -1) { MoveF64ViaSpillFI = MF.getFrameInfo().CreateStackObject( - RC->getSize(), RC->getAlignment(), false); + MRI.getSpillSize(RC), MRI.getSpillAlignment(RC), false); } return MoveF64ViaSpillFI; } Index: lib/Target/Mips/MipsSEFrameLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEFrameLowering.cpp +++ lib/Target/Mips/MipsSEFrameLowering.cpp @@ -240,7 +240,8 @@ // copy dst_hi, $vr1 unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg(); - unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2; + MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); + unsigned VRegSize = MRI.getSpillSize(RegInfo.getMinimalPhysRegClass(Dst)) / 2; const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize); unsigned VR0 = MRI.createVirtualRegister(RC); unsigned VR1 = MRI.createVirtualRegister(RC); @@ -843,6 +844,7 @@ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS); MipsFunctionInfo *MipsFI = MF.getInfo(); MipsABIInfo ABI = STI.getABI(); + MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned FP = ABI.GetFramePtr(); unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7; @@ -868,8 +870,9 @@ // mips64, it should be 64-bit, otherwise it should be 32-bt. const TargetRegisterClass *RC = STI.hasMips64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; - int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(), - RC->getAlignment(), false); + int FI = MF.getFrameInfo().CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), + false); RS->addScavengingFrameIndex(FI); } @@ -882,8 +885,9 @@ const TargetRegisterClass *RC = ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass; - int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(), - RC->getAlignment(), false); + int FI = MF.getFrameInfo().CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), + false); RS->addScavengingFrameIndex(FI); } Index: lib/Target/Mips/MipsSEISelLowering.cpp =================================================================== --- lib/Target/Mips/MipsSEISelLowering.cpp +++ lib/Target/Mips/MipsSEISelLowering.cpp @@ -224,7 +224,7 @@ setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); } - computeRegisterProperties(Subtarget.getRegisterInfo()); + computeRegisterProperties(Subtarget); } const MipsTargetLowering * Index: lib/Target/Mips/MipsSEInstrInfo.cpp =================================================================== --- lib/Target/Mips/MipsSEInstrInfo.cpp +++ lib/Target/Mips/MipsSEInstrInfo.cpp @@ -558,8 +558,11 @@ const MCInstrDesc &Desc = get(Opc); assert(Desc.NumOperands == 2 && "Unary instruction expected."); const MipsRegisterInfo *RI = &getRegisterInfo(); - unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize(); - unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize(); + const TargetSubtargetInfo &STI = MF.getSubtarget(); + const TargetRegisterClass *DstRC = getRegClass(Desc, 0, RI, MF); + const TargetRegisterClass *SrcRC = getRegClass(Desc, 1, RI, MF); + unsigned DstRegSize = RI->getRegSize(DstRC->getID(), STI); + unsigned SrcRegSize = RI->getRegSize(SrcRC->getID(), STI); return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize); } Index: lib/Target/NVPTX/NVPTXISelLowering.cpp =================================================================== --- lib/Target/NVPTX/NVPTXISelLowering.cpp +++ lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -303,7 +303,7 @@ // Now deduce the information based on the above mentioned // actions - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); } const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const { Index: lib/Target/NVPTX/NVPTXInstrInfo.cpp =================================================================== --- lib/Target/NVPTX/NVPTXInstrInfo.cpp +++ lib/Target/NVPTX/NVPTXInstrInfo.cpp @@ -38,7 +38,7 @@ const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg); const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg); - if (DestRC->getSize() != SrcRC->getSize()) + if (MRI.getRegSize(DestRC) != MRI.getRegSize(SrcRC)) report_fatal_error("Copy one register into another with a different width"); unsigned Op; Index: lib/Target/PowerPC/PPCFrameLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCFrameLowering.cpp +++ lib/Target/PowerPC/PPCFrameLowering.cpp @@ -1761,14 +1761,15 @@ // because we've not yet computed callee-saved register spills or the // needed alignment padding. unsigned StackSize = determineFrameLayout(MF, false, true); + MachineRegisterInfo &MRI = MF.getRegInfo(); MachineFrameInfo &MFI = MF.getFrameInfo(); if (MFI.hasVarSizedObjects() || spillsCR(MF) || spillsVRSAVE(MF) || hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize))) { const TargetRegisterClass *GPRC = &PPC::GPRCRegClass; const TargetRegisterClass *G8RC = &PPC::G8RCRegClass; const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC; - RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(), - RC->getAlignment(), + RS->addScavengingFrameIndex(MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), false)); // Might we have over-aligned allocas? @@ -1777,9 +1778,10 @@ // These kinds of spills might need two registers. if (spillsCR(MF) || spillsVRSAVE(MF) || HasAlVars) - RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(), - RC->getAlignment(), - false)); + RS->addScavengingFrameIndex( + MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), + false)); } } Index: lib/Target/PowerPC/PPCISelLowering.cpp =================================================================== --- lib/Target/PowerPC/PPCISelLowering.cpp +++ lib/Target/PowerPC/PPCISelLowering.cpp @@ -936,7 +936,7 @@ else setSchedulingPreference(Sched::Hybrid); - computeRegisterProperties(STI.getRegisterInfo()); + computeRegisterProperties(STI); // The Freescale cores do better with aggressive inlining of memcpy and // friends. GCC uses same threshold of 128 bytes (= 32 word stores). Index: lib/Target/Sparc/SparcISelLowering.cpp =================================================================== --- lib/Target/Sparc/SparcISelLowering.cpp +++ lib/Target/Sparc/SparcISelLowering.cpp @@ -1825,7 +1825,7 @@ setMinFunctionAlignment(2); - computeRegisterProperties(Subtarget->getRegisterInfo()); + computeRegisterProperties(*Subtarget); } bool SparcTargetLowering::useSoftFloat() const { Index: lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- lib/Target/SystemZ/SystemZISelLowering.cpp +++ lib/Target/SystemZ/SystemZISelLowering.cpp @@ -112,7 +112,7 @@ } // Compute derived properties from the register classes - computeRegisterProperties(Subtarget.getRegisterInfo()); + computeRegisterProperties(Subtarget); // Set up special registers. setStackPointerRegisterToSaveRestore(SystemZ::R15D); Index: lib/Target/SystemZ/SystemZInstrInfo.cpp =================================================================== --- lib/Target/SystemZ/SystemZInstrInfo.cpp +++ lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -916,11 +916,9 @@ return nullptr; unsigned OpNum = Ops[0]; - assert(Size == - MF.getRegInfo() - .getRegClass(MI.getOperand(OpNum).getReg()) - ->getSize() && - "Invalid size combination"); + assert(Size == MF.getRegInfo().getSpillSize( + MF.getRegInfo().getRegClass(MI.getOperand(OpNum).getReg())) + && "Invalid size combination"); if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 && isInt<8>(MI.getOperand(2).getImm())) { Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -2097,7 +2097,7 @@ if (!LHSReg || !RHSReg) return false; - unsigned Opc = X86::getCMovFromCond(CC, RC->getSize()); + unsigned Opc = X86::getCMovFromCond(CC, MRI.getSpillSize(RC)); unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill, LHSReg, LHSIsKill); updateValueMap(I, ResultReg); Index: lib/Target/X86/X86FrameLowering.cpp =================================================================== --- lib/Target/X86/X86FrameLowering.cpp +++ lib/Target/X86/X86FrameLowering.cpp @@ -1830,6 +1830,7 @@ MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector &CSI) const { MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); X86MachineFunctionInfo *X86FI = MF.getInfo(); unsigned CalleeSavedFrameSize = 0; @@ -1876,13 +1877,14 @@ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); // ensure alignment - SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment(); + unsigned Size = MRI.getSpillSize(RC); + unsigned Align = MRI.getSpillAlignment(RC); + SpillSlotOffset -= std::abs(SpillSlotOffset) % Align; // spill into slot - SpillSlotOffset -= RC->getSize(); - int SlotIndex = - MFI.CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset); + SpillSlotOffset -= Size; + int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset); CSI[i - 1].setFrameIdx(SlotIndex); - MFI.ensureMaxAlignment(RC->getAlignment()); + MFI.ensureMaxAlignment(Align); } return true; Index: lib/Target/X86/X86ISelLowering.h =================================================================== --- lib/Target/X86/X86ISelLowering.h +++ lib/Target/X86/X86ISelLowering.h @@ -1021,7 +1021,7 @@ protected: std::pair - findRepresentativeClass(const TargetRegisterInfo *TRI, + findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const override; private: Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1676,7 +1676,7 @@ setTargetDAGCombine(ISD::MSCATTER); setTargetDAGCombine(ISD::MGATHER); - computeRegisterProperties(Subtarget.getRegisterInfo()); + computeRegisterProperties(Subtarget); MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores MaxStoresPerMemsetOptSize = 8; @@ -1941,13 +1941,13 @@ } std::pair -X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, +X86TargetLowering::findRepresentativeClass(const TargetSubtargetInfo &STI, MVT VT) const { const TargetRegisterClass *RRC = nullptr; uint8_t Cost = 1; switch (VT.SimpleTy) { default: - return TargetLowering::findRepresentativeClass(TRI, VT); + return TargetLowering::findRepresentativeClass(STI, VT); case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass; break; Index: lib/Target/X86/X86InstrInfo.cpp =================================================================== --- lib/Target/X86/X86InstrInfo.cpp +++ lib/Target/X86/X86InstrInfo.cpp @@ -4569,7 +4569,7 @@ MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); assert(Cond.size() == 1 && "Invalid Cond array"); unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(), - MRI.getRegClass(DstReg)->getSize(), + MRI.getSpillSize(MRI.getRegClass(DstReg)), false /*HasMemoryOperand*/); BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg); } @@ -4834,7 +4834,7 @@ bool HasAVX512 = STI.hasAVX512(); bool HasVLX = STI.hasVLX(); - switch (RC->getSize()) { + switch (STI.getRegisterInfo()->getSpillSize(RC->getID(), STI)) { default: llvm_unreachable("Unknown spill size"); case 1: @@ -4967,9 +4967,11 @@ const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { const MachineFunction &MF = *MBB.getParent(); - assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= RC->getSize() && + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned SpillSize = MRI.getSpillSize(RC); + assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= SpillSize && "Stack slot too small for store"); - unsigned Alignment = std::max(RC->getSize(), 16); + unsigned Alignment = std::max(SpillSize, 16); bool isAligned = (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || RI.canRealignStack(MF); @@ -4986,7 +4988,8 @@ MachineInstr::mmo_iterator MMOBegin, MachineInstr::mmo_iterator MMOEnd, SmallVectorImpl &NewMIs) const { - unsigned Alignment = std::max(RC->getSize(), 16); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned Alignment = std::max(MRI.getSpillSize(RC), 16); bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= Alignment; unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget); @@ -5006,7 +5009,8 @@ const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { const MachineFunction &MF = *MBB.getParent(); - unsigned Alignment = std::max(RC->getSize(), 16); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned Alignment = std::max(MRI.getSpillSize(RC), 16); bool isAligned = (Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) || RI.canRealignStack(MF); @@ -5021,7 +5025,8 @@ MachineInstr::mmo_iterator MMOBegin, MachineInstr::mmo_iterator MMOEnd, SmallVectorImpl &NewMIs) const { - unsigned Alignment = std::max(RC->getSize(), 16); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned Alignment = std::max(MRI.getSpillSize(RC), 16); bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= Alignment; unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget); @@ -5471,7 +5476,8 @@ NewOpc = getSETFromCond(NewCC, HasMemoryOperand); else { unsigned DstReg = Instr.getOperand(0).getReg(); - NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(), + const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); + NewOpc = getCMovFromCond(NewCC, MRI->getSpillSize(DstRC), HasMemoryOperand); } @@ -5904,7 +5910,9 @@ unsigned DstIdx = (Imm >> 4) & 3; unsigned SrcIdx = (Imm >> 6) & 3; - unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); + unsigned RCSize = MRI.getSpillSize(RC); if (Size <= RCSize && 4 <= Align) { int PtrOffset = SrcIdx * 4; unsigned NewImm = (DstIdx << 4) | ZMask; @@ -5926,7 +5934,9 @@ // To fold the load, adjust the pointer to the upper and use (V)MOVLPS. // TODO: In most cases AVX doesn't have a 8-byte alignment requirement. if (OpNum == 2) { - unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF); + unsigned RCSize = MRI.getSpillSize(RC); if (Size <= RCSize && 8 <= Align) { unsigned NewOpCode = (MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm : @@ -6015,7 +6025,10 @@ return nullptr; bool NarrowToMOV32rm = false; if (Size) { - unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, + &RI, MF); + unsigned RCSize = MRI.getSpillSize(RC); if (Size < RCSize) { // Check if it's safe to fold the load. If the size of the object is // narrower than the load width, then it's not. @@ -6431,8 +6444,10 @@ const MachineFunction &MF) { unsigned Opc = LoadMI.getOpcode(); unsigned UserOpc = UserMI.getOpcode(); - unsigned RegSize = - MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + const TargetRegisterClass *RC = + MRI.getRegClass(LoadMI.getOperand(0).getReg()); + unsigned RegSize = MRI.getSpillSize(RC); if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) && RegSize > 4) { @@ -6779,6 +6794,7 @@ bool FoldedStore = I->second.second & TB_FOLDED_STORE; const MCInstrDesc &MCID = get(Opc); MachineFunction &MF = DAG.getMachineFunction(); + MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF); unsigned NumDefs = MCID.NumDefs; std::vector AddrOps; @@ -6813,7 +6829,7 @@ return false; // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte // memory access is slow above. - unsigned Alignment = RC->getSize() == 32 ? 32 : 16; + unsigned Alignment = MRI.getSpillSize(RC) == 32 ? 32 : 16; bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= Alignment; Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl, @@ -6858,7 +6874,7 @@ return false; // FIXME: If a VR128 can have size 32, we should be checking if a 32-byte // memory access is slow above. - unsigned Alignment = RC->getSize() == 32 ? 32 : 16; + unsigned Alignment = MRI.getSpillSize(RC) == 32 ? 32 : 16; bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= Alignment; SDNode *Store = Index: lib/Target/X86/X86RegisterInfo.cpp =================================================================== --- lib/Target/X86/X86RegisterInfo.cpp +++ lib/Target/X86/X86RegisterInfo.cpp @@ -129,21 +129,24 @@ return RC; const X86Subtarget &Subtarget = MF.getSubtarget(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned RCSize = MRI.getSpillSize(RC); const TargetRegisterClass *Super = RC; TargetRegisterClass::sc_iterator I = RC->getSuperClasses(); do { + unsigned SuperSize = MRI.getSpillSize(Super); switch (Super->getID()) { case X86::FR32RegClassID: case X86::FR64RegClassID: // If AVX-512 isn't supported we should only inflate to these classes. - if (!Subtarget.hasAVX512() && Super->getSize() == RC->getSize()) + if (!Subtarget.hasAVX512() && SuperSize == RCSize) return Super; break; case X86::VR128RegClassID: case X86::VR256RegClassID: // If VLX isn't supported we should only inflate to these classes. - if (!Subtarget.hasVLX() && Super->getSize() == RC->getSize()) + if (!Subtarget.hasVLX() && SuperSize == RCSize) return Super; break; case X86::FR32XRegClassID: @@ -169,7 +172,7 @@ case X86::VR512RegClassID: // Don't return a super-class that would shrink the spill size. // That can happen with the vector and float classes. - if (Super->getSize() == RC->getSize()) + if (SuperSize == RCSize) return Super; } Super = *I++; Index: lib/Target/XCore/XCoreFrameLowering.cpp =================================================================== --- lib/Target/XCore/XCoreFrameLowering.cpp +++ lib/Target/XCore/XCoreFrameLowering.cpp @@ -574,6 +574,7 @@ RegScavenger *RS) const { assert(RS && "requiresRegisterScavenging failed"); MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); const TargetRegisterClass *RC = &XCore::GRRegsRegClass; XCoreFunctionInfo *XFI = MF.getInfo(); // Reserve slots close to SP or frame pointer for Scavenging spills. @@ -581,11 +582,11 @@ // When using SP for large frames, we may need 2 scratch registers. // When using FP, for large or small frames, we may need 1 scratch register. if (XFI->isLargeFrame(MF) || hasFP(MF)) - RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(), - RC->getAlignment(), + RS->addScavengingFrameIndex(MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), false)); if (XFI->isLargeFrame(MF) && !hasFP(MF)) - RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(), - RC->getAlignment(), + RS->addScavengingFrameIndex(MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), false)); } Index: lib/Target/XCore/XCoreISelLowering.cpp =================================================================== --- lib/Target/XCore/XCoreISelLowering.cpp +++ lib/Target/XCore/XCoreISelLowering.cpp @@ -77,7 +77,7 @@ addRegisterClass(MVT::i32, &XCore::GRRegsRegClass); // Compute derived properties from the register classes - computeRegisterProperties(Subtarget.getRegisterInfo()); + computeRegisterProperties(Subtarget); setStackPointerRegisterToSaveRestore(XCore::SP); Index: lib/Target/XCore/XCoreMachineFunctionInfo.cpp =================================================================== --- lib/Target/XCore/XCoreMachineFunctionInfo.cpp +++ lib/Target/XCore/XCoreMachineFunctionInfo.cpp @@ -9,6 +9,7 @@ #include "XCoreMachineFunctionInfo.h" #include "XCoreInstrInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/Function.h" using namespace llvm; @@ -37,11 +38,13 @@ } const TargetRegisterClass *RC = &XCore::GRRegsRegClass; MachineFrameInfo &MFI = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); if (! MF.getFunction()->isVarArg()) { // A fixed offset of 0 allows us to save / restore LR using entsp / retsp. - LRSpillSlot = MFI.CreateFixedObject(RC->getSize(), 0, true); + LRSpillSlot = MFI.CreateFixedObject(MRI.getSpillSize(RC), 0, true); } else { - LRSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true); + LRSpillSlot = MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), true); } LRSpillSlotSet = true; return LRSpillSlot; @@ -53,7 +56,9 @@ } const TargetRegisterClass *RC = &XCore::GRRegsRegClass; MachineFrameInfo &MFI = MF.getFrameInfo(); - FPSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true); + MachineRegisterInfo &MRI = MF.getRegInfo(); + FPSpillSlot = MFI.CreateStackObject(MRI.getSpillSize(RC), + MRI.getSpillAlignment(RC), true); FPSpillSlotSet = true; return FPSpillSlot; } @@ -64,8 +69,10 @@ } const TargetRegisterClass *RC = &XCore::GRRegsRegClass; MachineFrameInfo &MFI = MF.getFrameInfo(); - EHSpillSlot[0] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true); - EHSpillSlot[1] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true); + MachineRegisterInfo &MRI = MF.getRegInfo(); + unsigned Size = MRI.getSpillSize(RC), Align = MRI.getSpillAlignment(RC); + EHSpillSlot[0] = MFI.CreateStackObject(Size, Align, true); + EHSpillSlot[1] = MFI.CreateStackObject(Size, Align, true); EHSpillSlotSet = true; return EHSpillSlot; }