Index: include/llvm/MC/MCInstrDesc.h =================================================================== --- include/llvm/MC/MCInstrDesc.h +++ include/llvm/MC/MCInstrDesc.h @@ -20,9 +20,9 @@ #include namespace llvm { - class MCInst; - class MCSubtargetInfo; - class FeatureBitset; +class MCInst; +class MCSubtargetInfo; +class FeatureBitset; //===----------------------------------------------------------------------===// // Machine Operand Flags and Description @@ -60,7 +60,7 @@ OPERAND_FIRST_TARGET = 12, }; -} +} // namespace MCOI /// This holds information about one operand of a machine instruction, /// indicating the register class for register operands, etc. @@ -152,7 +152,21 @@ Add, Trap }; -} +} // namespace MCID + +/// \brief Defines an operand group. +/// +/// Many machine operands are aggregates of multiple target operands (e.g. +/// register, immediate) and during codegen it is not always clear how many +/// those operands to read. This type provides this information. +class MIOperandInfo { +public: + int16_t MINo; + int16_t Type; + int16_t OpsNum; + + bool isTargetType() { return Type >= MCOI::OPERAND_FIRST_TARGET; }; +}; /// Describe properties that are true of each instruction in the target /// description file. This captures information about side effects, register @@ -162,7 +176,8 @@ class MCInstrDesc { public: unsigned short Opcode; // The opcode number - unsigned short NumOperands; // Num of args (may be more if variable_ops) + unsigned short NumMIOperands; // Num of logical operands + unsigned short NumMCOperands; // Num of args (may be more if variable_ops) unsigned char NumDefs; // Num of args that are definitions unsigned char Size; // Number of bytes in encoding. unsigned short SchedClass; // enum identifying instr sched class @@ -170,7 +185,8 @@ uint64_t TSFlags; // Target Specific Flag values const MCPhysReg *ImplicitUses; // Registers implicitly read by this instr const MCPhysReg *ImplicitDefs; // Registers implicitly defined by this instr - const MCOperandInfo *OpInfo; // 'NumOperands' entries about operands + const MIOperandInfo *MIOpInfo; // 'NumMIOperands' entries about operands + const MCOperandInfo *MCOpInfo; // 'NumMCOperands' entries about operands // Subtarget feature that this is deprecated on, if any // -1 implies this is not deprecated by any single feature. It may still be // deprecated due to a "complex" reason, below. @@ -185,10 +201,10 @@ /// it is set. Returns -1 if it is not set. int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const { - if (OpNum < NumOperands && - (OpInfo[OpNum].Constraints & (1 << Constraint))) { + if (OpNum < NumMCOperands && + (MCOpInfo[OpNum].Constraints & (1 << Constraint))) { unsigned Pos = 16 + Constraint * 4; - return (int)(OpInfo[OpNum].Constraints >> Pos) & 0xf; + return (int)(MCOpInfo[OpNum].Constraints >> Pos) & 0xf; } return -1; } @@ -206,12 +222,12 @@ /// instructions may have additional operands at the end of the list, and note /// that the machine instruction may include implicit register def/uses as /// well. - unsigned getNumOperands() const { return NumOperands; } + unsigned getNumOperands() const { return NumMCOperands; } using const_opInfo_iterator = const MCOperandInfo *; - const_opInfo_iterator opInfo_begin() const { return OpInfo; } - const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; } + const_opInfo_iterator opInfo_begin() const { return MCOpInfo; } + const_opInfo_iterator opInfo_end() const { return MCOpInfo + NumMCOperands; } iterator_range operands() const { return make_range(opInfo_begin(), opInfo_end()); @@ -276,7 +292,9 @@ /// Return true if this is an indirect branch, such as a /// branch through a register. - bool isIndirectBranch() const { return Flags & (1ULL << MCID::IndirectBranch); } + bool isIndirectBranch() const { + return Flags & (1ULL << MCID::IndirectBranch); + } /// Return true if this is a branch which may fall /// through to the next instruction or may transfer control flow to some other @@ -373,8 +391,9 @@ /// Note that for the optimizers to be able to take advantage of /// this property, TargetInstrInfo::getInsertSubregLikeInputs has to be /// override accordingly. - bool isInsertSubregLike() const { return Flags & (1ULL << MCID::InsertSubreg); } - + bool isInsertSubregLike() const { + return Flags & (1ULL << MCID::InsertSubreg); + } /// Return true if this instruction is convergent. /// @@ -463,7 +482,9 @@ /// instruction selection by calling a target hook. For example, this can be /// used to fill in ARM 's' optional operand depending on whether the /// conditional flag register is used. - bool hasPostISelHook() const { return Flags & (1ULL << MCID::HasPostISelHook); } + bool hasPostISelHook() const { + return Flags & (1ULL << MCID::HasPostISelHook); + } /// Returns true if this instruction is a candidate for remat. This /// flag is only used in TargetInstrInfo method isTriviallyRematerializable. @@ -579,7 +600,7 @@ int findFirstPredOperandIdx() const { if (isPredicable()) { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (OpInfo[i].isPredicate()) + if (MCOpInfo[i].isPredicate()) return i; } return -1; Index: lib/CodeGen/GlobalISel/LegalizerInfo.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -344,7 +344,7 @@ const MachineRegisterInfo &MRI) const { SmallVector Types; SmallBitVector SeenTypes(8); - const MCOperandInfo *OpInfo = MI.getDesc().OpInfo; + const MCOperandInfo *OpInfo = MI.getDesc().MCOpInfo; // FIXME: probably we'll need to cache the results here somehow? for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) { if (!OpInfo[i].isGenericType()) Index: lib/CodeGen/MachineInstr.cpp =================================================================== --- lib/CodeGen/MachineInstr.cpp +++ lib/CodeGen/MachineInstr.cpp @@ -847,7 +847,7 @@ const MCInstrDesc &MCID = getDesc(); if (MCID.isPredicable()) { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (MCID.OpInfo[i].isPredicate()) + if (MCID.MCOpInfo[i].isPredicate()) return i; } @@ -1240,7 +1240,7 @@ if (isVariadic() || OpIdx >= getNumExplicitOperands()) return MRI.getType(Op.getReg()); - auto &OpInfo = getDesc().OpInfo[OpIdx]; + auto &OpInfo = getDesc().MCOpInfo[OpIdx]; if (!OpInfo.isGenericType()) return MRI.getType(Op.getReg()); Index: lib/CodeGen/MachineVerifier.cpp =================================================================== --- lib/CodeGen/MachineVerifier.cpp +++ lib/CodeGen/MachineVerifier.cpp @@ -921,11 +921,11 @@ // Check types. SmallVector Types; for (unsigned I = 0; I < MCID.getNumOperands(); ++I) { - if (!MCID.OpInfo[I].isGenericType()) + if (!MCID.MCOpInfo[I].isGenericType()) continue; // Generic instructions specify type equality constraints between some of // their operands. Make sure these are consistent. - size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); + size_t TypeIdx = MCID.MCOpInfo[I].getGenericTypeIndex(); Types.resize(std::max(TypeIdx + 1, Types.size())); const MachineOperand *MO = &MI->getOperand(I); @@ -1100,7 +1100,7 @@ // The first MCID.NumDefs operands must be explicit register defines if (MONum < NumDefs) { - const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; + const MCOperandInfo &MCOI = MCID.MCOpInfo[MONum]; if (!MO->isReg()) report("Explicit definition must be a register", MO, MONum); else if (!MO->isDef() && !MCOI.isOptionalDef()) @@ -1108,7 +1108,7 @@ else if (MO->isImplicit()) report("Explicit definition marked as implicit", MO, MONum); } else if (MONum < MCID.getNumOperands()) { - const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; + const MCOperandInfo &MCOI = MCID.MCOpInfo[MONum]; // Don't check if it's the last operand in a variadic instruction. See, // e.g., LDM_RET in the arm back end. if (MO->isReg() && Index: lib/CodeGen/SelectionDAG/InstrEmitter.cpp =================================================================== --- lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -234,7 +234,7 @@ RC = VTRC; } - if (II.OpInfo[i].isOptionalDef()) { + if (II.MCOpInfo[i].isOptionalDef()) { // Optional def must be a physical register. VRBase = cast(Node->getOperand(i-NumResults))->getReg(); assert(TargetRegisterInfo::isPhysicalRegister(VRBase)); @@ -323,7 +323,7 @@ const MCInstrDesc &MCID = MIB->getDesc(); bool isOptDef = IIOpNum < MCID.getNumOperands() && - MCID.OpInfo[IIOpNum].isOptionalDef(); + MCID.MCOpInfo[IIOpNum].isOptionalDef(); // If the instruction requires a register in a different class, create // a new virtual register and copy the value into it, but first attempt to Index: lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp =================================================================== --- lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -1404,7 +1404,7 @@ // of %noreg. When the OptionalDef is set to a valid register, we need to // handle it in the same way as an ImplicitDef. for (unsigned i = 0; i < MCID.getNumDefs(); ++i) - if (MCID.OpInfo[i].isOptionalDef()) { + if (MCID.MCOpInfo[i].isOptionalDef()) { const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues()); unsigned Reg = cast(OptionalDef)->getReg(); CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI); Index: lib/CodeGen/TargetInstrInfo.cpp =================================================================== --- lib/CodeGen/TargetInstrInfo.cpp +++ lib/CodeGen/TargetInstrInfo.cpp @@ -48,8 +48,8 @@ if (OpNum >= MCID.getNumOperands()) return nullptr; - short RegClass = MCID.OpInfo[OpNum].RegClass; - if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) + short RegClass = MCID.MCOpInfo[OpNum].RegClass; + if (MCID.MCOpInfo[OpNum].isLookupPtrRegClass()) return TRI->getPointerRegClass(MF, RegClass); // Instructions like INSERT_SUBREG do not have fixed register classes. @@ -321,7 +321,7 @@ return false; for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { - if (MCID.OpInfo[i].isPredicate()) { + if (MCID.MCOpInfo[i].isPredicate()) { MachineOperand &MO = MI.getOperand(i); if (MO.isReg()) { MO.setReg(Pred[j].getReg()); Index: lib/CodeGen/TargetSchedule.cpp =================================================================== --- lib/CodeGen/TargetSchedule.cpp +++ lib/CodeGen/TargetSchedule.cpp @@ -240,7 +240,7 @@ // unit latency (defaultDefLatency may be too conservative). #ifndef NDEBUG if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() - && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef() + && !DefMI->getDesc().MCOpInfo[DefOperIdx].isOptionalDef() && SchedModel.isComplete()) { errs() << "DefIdx " << DefIdx << " exceeds machine model writes for " << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)"; Index: lib/MC/MCInstrAnalysis.cpp =================================================================== --- lib/MC/MCInstrAnalysis.cpp +++ lib/MC/MCInstrAnalysis.cpp @@ -32,7 +32,7 @@ bool MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const { if (Inst.getNumOperands() == 0 || - Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL) + Info->get(Inst.getOpcode()).MCOpInfo[0].OperandType != MCOI::OPERAND_PCREL) return false; int64_t Imm = Inst.getOperand(0).getImm(); Index: lib/MC/MCInstrDesc.cpp =================================================================== --- lib/MC/MCInstrDesc.cpp +++ lib/MC/MCInstrDesc.cpp @@ -43,7 +43,7 @@ // There's currently no indication of which entries in a variable // list are defs and which are uses. While that's the case, this function // needs to assume they're defs in order to be conservatively correct. - for (int i = NumOperands, e = MI.getNumOperands(); i != e; ++i) { + for (int i = NumMCOperands, e = MI.getNumOperands(); i != e; ++i) { if (MI.getOperand(i).isReg() && RI.isSubRegisterEq(PC, MI.getOperand(i).getReg())) return true; Index: lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp =================================================================== --- lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -517,7 +517,7 @@ } while (I != ChainBegin); // Make sure we allocate in-order, to get the cheapest registers first. - unsigned RegClassID = ChainBegin->getDesc().OpInfo[0].RegClass; + unsigned RegClassID = ChainBegin->getDesc().MCOpInfo[0].RegClass; auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID)); for (auto Reg : Ord) { if (!Units.available(Reg)) Index: lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -145,7 +145,7 @@ // condition code) and cbz (where it is a register). const auto &Desc = Info->get(Inst.getOpcode()); for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) { - if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) { + if (Desc.MCOpInfo[i].OperandType == MCOI::OPERAND_PCREL) { int64_t Imm = Inst.getOperand(i).getImm() * 4; Target = Addr + Imm; return true; Index: lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -332,7 +332,7 @@ unsigned OpIdx = Desc.getNumDefs() + OpNo; if (OpIdx >= Desc.getNumOperands()) return nullptr; - int RegClass = Desc.OpInfo[OpIdx].RegClass; + int RegClass = Desc.MCOpInfo[OpIdx].RegClass; if (RegClass == -1) return nullptr; Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -1408,7 +1408,7 @@ } APInt Literal(64, Val); - uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType; + uint8_t OpTy = InstDesc.MCOpInfo[OpNum].OperandType; if (Imm.IsFPImm) { // We got fp literal token switch (OpTy) { @@ -2249,7 +2249,7 @@ case 4: return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm()); case 2: { - const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType; + const unsigned OperandType = Desc.MCOpInfo[OpIdx].OperandType; if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 || OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) { return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm()); @@ -4895,11 +4895,11 @@ static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) { // 1. This operand is input modifiers - return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS + return Desc.MCOpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS // 2. This is not last operand - && Desc.NumOperands > (OpNum + 1) + && Desc.NumMCOperands > (OpNum + 1) // 3. Next operand is register class - && Desc.OpInfo[OpNum + 1].RegClass != -1 + && Desc.MCOpInfo[OpNum + 1].RegClass != -1 // 4. Next register is not tied to any other operand && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1; } Index: lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp =================================================================== --- lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -341,7 +341,7 @@ return MCDisassembler::Success; } - auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; + auto RCID = MCII->get(NewOpcode).MCOpInfo[VDataIdx].RegClass; // Get first subregister of VData unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); Index: lib/Target/AMDGPU/GCNHazardRecognizer.cpp =================================================================== --- lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -494,7 +494,7 @@ int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); int VDataRCID = -1; if (VDataIdx != -1) - VDataRCID = Desc.OpInfo[VDataIdx].RegClass; + VDataRCID = Desc.MCOpInfo[VDataIdx].RegClass; if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) { // There is no hazard if the instruction does not use vector regs @@ -519,13 +519,13 @@ if (TII->isMIMG(MI)) { int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); assert(SRsrcIdx != -1 && - AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256); + AMDGPU::getRegBitWidth(Desc.MCOpInfo[SRsrcIdx].RegClass) == 256); (void)SRsrcIdx; } if (TII->isFLAT(MI)) { int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); - if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64) + if (AMDGPU::getRegBitWidth(Desc.MCOpInfo[DataIdx].RegClass) > 64) return DataIdx; } Index: lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp =================================================================== --- lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp +++ lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp @@ -520,7 +520,7 @@ printRegOperand(Op.getReg(), O, MRI); } else if (Op.isImm()) { const MCInstrDesc &Desc = MII.get(MI->getOpcode()); - switch (Desc.OpInfo[OpNo].OperandType) { + switch (Desc.MCOpInfo[OpNo].OperandType) { case AMDGPU::OPERAND_REG_IMM_INT32: case AMDGPU::OPERAND_REG_IMM_FP32: case AMDGPU::OPERAND_REG_INLINE_C_INT32: @@ -564,7 +564,7 @@ O << "0.0"; else { const MCInstrDesc &Desc = MII.get(MI->getOpcode()); - int RCID = Desc.OpInfo[OpNo].RegClass; + int RCID = Desc.MCOpInfo[OpNo].RegClass; unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID)); if (RCBits == 32) printImmediate32(FloatToBits(Op.getFPImm()), STI, O); Index: lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp =================================================================== --- lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp +++ lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp @@ -286,7 +286,7 @@ // Is this operand a literal immediate? const MCOperand &Op = MI.getOperand(i); - if (getLitEncoding(Op, Desc.OpInfo[i], STI) != 255) + if (getLitEncoding(Op, Desc.MCOpInfo[i], STI) != 255) continue; // Yes! Encode it @@ -345,7 +345,7 @@ return RegEnc; } else { const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); - uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); + uint32_t Enc = getLitEncoding(MO, Desc.MCOpInfo[OpNo], STI); if (Enc != ~0U && Enc != 255) { return Enc | SDWA9EncValues::SRC_SGPR_MASK; } @@ -428,7 +428,7 @@ const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); if (AMDGPU::isSISrcOperand(Desc, OpNo)) { - uint32_t Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); + uint32_t Enc = getLitEncoding(MO, Desc.MCOpInfo[OpNo], STI); if (Enc != ~0U && (Enc != 255 || Desc.getSize() == 4)) return Enc; Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -140,7 +140,7 @@ unsigned Opc = IsFMA ? AMDGPU::V_FMA_F32 : (IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16); const MCInstrDesc &MadDesc = TII->get(Opc); - return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType); + return TII->isInlineConstant(OpToFold, MadDesc.MCOpInfo[OpNo].OperandType); } return false; } @@ -384,7 +384,7 @@ // don't have defined register classes. if (UseDesc.isVariadic() || UseOp.isImplicit() || - UseDesc.OpInfo[UseOpIdx].RegClass == -1) + UseDesc.MCOpInfo[UseOpIdx].RegClass == -1) return; } @@ -400,7 +400,7 @@ const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); const TargetRegisterClass *FoldRC = - TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); + TRI->getRegClass(FoldDesc.MCOpInfo[0].RegClass); // Split 64-bit constants into 32-bits for folding. Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -616,23 +616,23 @@ const MachineOperand &DefMO) const { assert(UseMO.getParent() == &MI); int OpIdx = MI.getOperandNo(&UseMO); - if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) { + if (!MI.getDesc().MCOpInfo || OpIdx >= MI.getDesc().NumMCOperands) { return false; } - return isInlineConstant(DefMO, MI.getDesc().OpInfo[OpIdx]); + return isInlineConstant(DefMO, MI.getDesc().MCOpInfo[OpIdx]); } /// \p returns true if the operand \p OpIdx in \p MI is a valid inline /// immediate. bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx) const { const MachineOperand &MO = MI.getOperand(OpIdx); - return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType); + return isInlineConstant(MO, MI.getDesc().MCOpInfo[OpIdx].OperandType); } bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx, const MachineOperand &MO) const { - if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) + if (!MI.getDesc().MCOpInfo || OpIdx >= MI.getDesc().NumMCOperands) return false; if (MI.isCopy()) { @@ -644,7 +644,7 @@ return isInlineConstant(MO, OpType); } - return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType); + return isInlineConstant(MO, MI.getDesc().MCOpInfo[OpIdx].OperandType); } bool isInlineConstant(const MachineOperand &MO) const { @@ -705,7 +705,7 @@ /// Return the size in bytes of the operand OpNo on the given // instruction opcode. unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const { - const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo]; + const MCOperandInfo &OpInfo = get(Opcode).MCOpInfo[OpNo]; if (OpInfo.RegClass == -1) { // If this is an immediate operand, this must be a 32-bit literal. Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -2477,7 +2477,7 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, const MachineOperand &MO) const { - const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; + const MCOperandInfo &OpInfo = get(MI.getOpcode()).MCOpInfo[OpNo]; assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); @@ -2660,9 +2660,9 @@ return false; } - int RegClass = Desc.OpInfo[i].RegClass; + int RegClass = Desc.MCOpInfo[i].RegClass; - switch (Desc.OpInfo[i].OperandType) { + switch (Desc.MCOpInfo[i].OperandType) { case MCOI::OPERAND_REGISTER: if (MI.getOperand(i).isImm()) { ErrInfo = "Illegal immediate value for operand."; @@ -2828,7 +2828,7 @@ if (OpIdx == -1) break; const MachineOperand &MO = MI.getOperand(OpIdx); - if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { + if (usesConstantBus(MRI, MO, MI.getDesc().MCOpInfo[OpIdx])) { if (MO.isReg()) { if (MO.getReg() != SGPRUsed) ++ConstantBusCount; @@ -3047,7 +3047,7 @@ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const MCInstrDesc &Desc = get(MI.getOpcode()); if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || - Desc.OpInfo[OpNo].RegClass == -1) { + Desc.MCOpInfo[OpNo].RegClass == -1) { unsigned Reg = MI.getOperand(OpNo).getReg(); if (TargetRegisterInfo::isVirtualRegister(Reg)) @@ -3055,7 +3055,7 @@ return RI.getPhysRegClass(Reg); } - unsigned RCID = Desc.OpInfo[OpNo].RegClass; + unsigned RCID = Desc.MCOpInfo[OpNo].RegClass; return RI.getRegClass(RCID); } @@ -3076,7 +3076,7 @@ MachineBasicBlock *MBB = MI.getParent(); MachineOperand &MO = MI.getOperand(OpIdx); MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; + unsigned RCID = get(MI.getOpcode()).MCOpInfo[OpIdx].RegClass; const TargetRegisterClass *RC = RI.getRegClass(RCID); unsigned Opcode = AMDGPU::V_MOV_B32_e32; if (MO.isReg()) @@ -3200,7 +3200,7 @@ const MachineOperand *MO) const { const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const MCInstrDesc &InstDesc = MI.getDesc(); - const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; + const MCOperandInfo &OpInfo = InstDesc.MCOpInfo[OpIdx]; const TargetRegisterClass *DefinedRC = OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; if (!MO) @@ -3218,10 +3218,10 @@ const MachineOperand &Op = MI.getOperand(i); if (Op.isReg()) { if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && - usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { + usesConstantBus(MRI, Op, InstDesc.MCOpInfo[i])) { return false; } - } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { + } else if (InstDesc.MCOpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { return false; } } @@ -3291,7 +3291,7 @@ // VOP2 src0 instructions support all operand types, so we don't need to check // their legality. If src1 is already legal, we don't need to do anything. - if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) + if (isLegalRegOperand(MRI, InstrDesc.MCOpInfo[Src1Idx], Src1)) return; // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for @@ -3325,7 +3325,7 @@ // TODO: Other immediate-like operand kinds could be commuted if there was a // MachineOperand::ChangeTo* for them. if ((!Src1.isImm() && !Src1.isReg()) || - !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { + !isLegalRegOperand(MRI, InstrDesc.MCOpInfo[Src1Idx], Src0)) { legalizeOpWithMove(MI, Src1Idx); return; } @@ -3626,7 +3626,7 @@ if (SRsrcIdx != -1) { // We have an MUBUF instruction MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx); - unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass; + unsigned SRsrcRC = get(MI.getOpcode()).MCOpInfo[SRsrcIdx].RegClass; if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), RI.getRegClass(SRsrcRC))) { // The operands are legal. @@ -4612,7 +4612,7 @@ // Is this operand statically required to be an SGPR based on the operand // constraints? - const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); + const TargetRegisterClass *OpRC = RI.getRegClass(Desc.MCOpInfo[Idx].RegClass); bool IsRequiredSGPR = RI.isSGPRClass(OpRC); if (IsRequiredSGPR) return MO.getReg(); @@ -4793,14 +4793,14 @@ if (Src0Idx == -1) return 4; // No operands. - if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) + if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.MCOpInfo[Src0Idx])) return 8; int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); if (Src1Idx == -1) return 4; - if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) + if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.MCOpInfo[Src1Idx])) return 8; return 4; @@ -5012,7 +5012,7 @@ if (Idx == -1) // e.g. s_memtime return false; - const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; + const auto RCID = MI.getDesc().MCOpInfo[Idx].RegClass; return RCID == AMDGPU::SReg_128RegClassID; } Index: lib/Target/AMDGPU/SIPeepholeSDWA.cpp =================================================================== --- lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -1090,8 +1090,8 @@ continue; unsigned I = MI.getOperandNo(&Op); - if (Desc.OpInfo[I].RegClass == -1 || - !TRI->hasVGPRs(TRI->getRegClass(Desc.OpInfo[I].RegClass))) + if (Desc.MCOpInfo[I].RegClass == -1 || + !TRI->hasVGPRs(TRI->getRegClass(Desc.MCOpInfo[I].RegClass))) continue; if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() && Index: lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h =================================================================== --- lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -420,7 +420,7 @@ LLVM_READNONE inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) { - return getOperandSize(Desc.OpInfo[OpNo]); + return getOperandSize(Desc.MCOpInfo[OpNo]); } /// Is this literal inlinable Index: lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp =================================================================== --- lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -754,15 +754,15 @@ #undef MAP_REG2REG bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + assert(OpNo < Desc.NumMCOperands); + unsigned OpType = Desc.MCOpInfo[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_SRC_FIRST && OpType <= AMDGPU::OPERAND_SRC_LAST; } bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + assert(OpNo < Desc.NumMCOperands); + unsigned OpType = Desc.MCOpInfo[OpNo].OperandType; switch (OpType) { case AMDGPU::OPERAND_REG_IMM_FP32: case AMDGPU::OPERAND_REG_IMM_FP64: @@ -778,8 +778,8 @@ } bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + assert(OpNo < Desc.NumMCOperands); + unsigned OpType = Desc.MCOpInfo[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; } @@ -822,8 +822,8 @@ unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo) { - assert(OpNo < Desc.NumOperands); - unsigned RCID = Desc.OpInfo[OpNo].RegClass; + assert(OpNo < Desc.NumMCOperands); + unsigned RCID = Desc.MCOpInfo[OpNo].RegClass; return getRegBitWidth(MRI->getRegClass(RCID)) / 8; } Index: lib/Target/ARM/ARMBaseInstrInfo.cpp =================================================================== --- lib/Target/ARM/ARMBaseInstrInfo.cpp +++ lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -2112,7 +2112,7 @@ // Copy all the DefMI operands, excluding its (null) predicate. const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); - i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) + i != e && !DefDesc.MCOpInfo[i].isPredicate(); ++i) NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); Index: lib/Target/ARM/ARMISelLowering.cpp =================================================================== --- lib/Target/ARM/ARMISelLowering.cpp +++ lib/Target/ARM/ARMISelLowering.cpp @@ -9650,7 +9650,7 @@ // Any ARM instruction that sets the 's' bit should specify an optional // "cc_out" operand in the last operand position. - if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { + if (!MI.hasOptionalDef() || !MCID->MCOpInfo[ccOutIdx].isOptionalDef()) { assert(!NewOpc && "Optional cc_out operand required"); return; } Index: lib/Target/ARM/AsmParser/ARMAsmParser.cpp =================================================================== --- lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -9031,12 +9031,12 @@ if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { assert(MCID.hasOptionalDef() && "optionally flag setting instruction missing optional def operand"); - assert(MCID.NumOperands == Inst.getNumOperands() && + assert(MCID.NumMCOperands == Inst.getNumOperands() && "operand count mismatch!"); // Find the optional-def operand (cc_out). unsigned OpNo; for (OpNo = 0; - !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; + !MCID.MCOpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumMCOperands; ++OpNo) ; // If we're parsing Thumb1, reject it completely. @@ -9089,8 +9089,8 @@ Inst.getOperand(0).getReg() == ARM::SP && (isThumb() && !hasV8Ops())) return Match_InvalidOperand; - for (unsigned I = 0; I < MCID.NumOperands; ++I) - if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) { + for (unsigned I = 0; I < MCID.NumMCOperands; ++I) + if (MCID.MCOpInfo[I].RegClass == ARM::rGPRRegClassID) { // rGPRRegClass excludes PC, and also excluded SP before ARMv8 if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops()) return Match_RequiresV8; Index: lib/Target/ARM/Disassembler/ARMDisassembler.cpp =================================================================== --- lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -554,8 +554,8 @@ // auto-generated decoder won't inject the CPSR operand. We need to fix // that as a post-pass. static void AddThumb1SBit(MCInst &MI, bool InITBlock) { - const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].OpInfo; - unsigned short NumOps = ARMInsts[MI.getOpcode()].NumOperands; + const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].MCOpInfo; + unsigned short NumOps = ARMInsts[MI.getOpcode()].NumMCOperands; MCInst::iterator I = MI.begin(); for (unsigned i = 0; i < NumOps; ++i, ++I) { if (I == MI.end()) break; @@ -625,8 +625,8 @@ if (ITBlock.instrInITBlock()) ITBlock.advanceITState(); - const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].OpInfo; - unsigned short NumOps = ARMInsts[MI.getOpcode()].NumOperands; + const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].MCOpInfo; + unsigned short NumOps = ARMInsts[MI.getOpcode()].NumMCOperands; MCInst::iterator I = MI.begin(); for (unsigned i = 0; i < NumOps; ++i, ++I) { if (I == MI.end()) break; @@ -664,9 +664,9 @@ if (ITBlock.instrInITBlock()) ITBlock.advanceITState(); - const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].OpInfo; + const MCOperandInfo *OpInfo = ARMInsts[MI.getOpcode()].MCOpInfo; MCInst::iterator I = MI.begin(); - unsigned short NumOps = ARMInsts[MI.getOpcode()].NumOperands; + unsigned short NumOps = ARMInsts[MI.getOpcode()].NumMCOperands; for (unsigned i = 0; i < NumOps; ++i, ++I) { if (OpInfo[i].isPredicate() ) { I->setImm(CC); Index: lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -264,7 +264,7 @@ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override { // We only handle PCRel branches for now. - if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType!=MCOI::OPERAND_PCREL) + if (Info->get(Inst.getOpcode()).MCOpInfo[0].OperandType!=MCOI::OPERAND_PCREL) return false; int64_t Imm = Inst.getOperand(0).getImm(); @@ -280,7 +280,7 @@ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override { // We only handle PCRel branches for now. - if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType!=MCOI::OPERAND_PCREL) + if (Info->get(Inst.getOpcode()).MCOpInfo[0].OperandType!=MCOI::OPERAND_PCREL) return false; int64_t Imm = Inst.getOperand(0).getImm(); Index: lib/Target/ARM/Thumb2SizeReduction.cpp =================================================================== --- lib/Target/ARM/Thumb2SizeReduction.cpp +++ lib/Target/ARM/Thumb2SizeReduction.cpp @@ -818,9 +818,9 @@ // Transfer the rest of operands. unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.MCOpInfo[i].isOptionalDef()) continue; - if (SkipPred && MCID.OpInfo[i].isPredicate()) + if (SkipPred && MCID.MCOpInfo[i].isPredicate()) continue; MIB.add(MI->getOperand(i)); } @@ -854,7 +854,7 @@ const MCInstrDesc &MCID = MI->getDesc(); for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { - if (MCID.OpInfo[i].isPredicate()) + if (MCID.MCOpInfo[i].isPredicate()) continue; const MachineOperand &MO = MI->getOperand(i); if (MO.isReg()) { @@ -864,7 +864,7 @@ if (Entry.LowRegs1 && !isARMLowRegister(Reg)) return false; } else if (MO.isImm() && - !MCID.OpInfo[i].isPredicate()) { + !MCID.MCOpInfo[i].isPredicate()) { if (((unsigned)MO.getImm()) > Limit) return false; } @@ -910,7 +910,7 @@ // Transfer the rest of operands. unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.MCOpInfo[i].isOptionalDef()) continue; if ((MCID.getOpcode() == ARM::t2RSBSri || MCID.getOpcode() == ARM::t2RSBri || @@ -920,7 +920,7 @@ MCID.getOpcode() == ARM::t2UXTH) && i == 2) // Skip the zero immediate operand, it's now implicit. continue; - bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); + bool isPred = (i < NumOps && MCID.MCOpInfo[i].isPredicate()); if (SkipPred && isPred) continue; const MachineOperand &MO = MI->getOperand(i); Index: lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp =================================================================== --- lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp +++ lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp @@ -455,7 +455,7 @@ } MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, *std::get<0>(Producer)); - if (Desc.OpInfo[std::get<1>(Producer)].RegClass == + if (Desc.MCOpInfo[std::get<1>(Producer)].RegClass == Hexagon::DoubleRegsRegClassID) { reportNote(std::get<0>(Producer)->getLoc(), "Double registers cannot be new-value producers"); Index: lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp =================================================================== --- lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp +++ lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp @@ -821,7 +821,7 @@ return {0, 0, false}; MCInstrDesc const &Desc = getDesc(MCII, MCI); for (auto I = Desc.getNumDefs(), N = Desc.getNumOperands(); I != N; ++I) - if (Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID) + if (Desc.MCOpInfo[I].RegClass == Hexagon::PredRegsRegClassID) return {MCI.getOperand(I).getReg(), I, isPredicatedTrue(MCII, MCI)}; return {0, 0, false}; } Index: lib/Target/Lanai/LanaiInstrInfo.cpp =================================================================== --- lib/Target/Lanai/LanaiInstrInfo.cpp +++ lib/Target/Lanai/LanaiInstrInfo.cpp @@ -517,7 +517,7 @@ // Copy all the DefMI operands, excluding its (null) predicate. const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); - i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) + i != e && !DefDesc.MCOpInfo[i].isPredicate(); ++i) NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); Index: lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp =================================================================== --- lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp +++ lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp @@ -98,7 +98,7 @@ if (Inst.getNumOperands() == 0) return false; - if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType == + if (Info->get(Inst.getOpcode()).MCOpInfo[0].OperandType == MCOI::OPERAND_PCREL) { int64_t Imm = Inst.getOperand(0).getImm(); Target = Addr + Size + Imm; Index: lib/Target/Mips/AsmParser/MipsAsmParser.cpp =================================================================== --- lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -2128,7 +2128,7 @@ // Check the offset of memory operand, if it is a symbol // reference or immediate we may have to expand instructions. for (unsigned i = 0; i < MCID.getNumOperands(); i++) { - const MCOperandInfo &OpInfo = MCID.OpInfo[i]; + const MCOperandInfo &OpInfo = MCID.MCOpInfo[i]; if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) { MCOperand &Op = Inst.getOperand(i); @@ -2162,7 +2162,7 @@ if (MCID.mayLoad() && Inst.getOpcode() != Mips::LWP_MM) { // Try to create 16-bit GP relative load instruction. for (unsigned i = 0; i < MCID.getNumOperands(); i++) { - const MCOperandInfo &OpInfo = MCID.OpInfo[i]; + const MCOperandInfo &OpInfo = MCID.MCOpInfo[i]; if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) { MCOperand &Op = Inst.getOperand(i); @@ -3579,7 +3579,7 @@ unsigned TmpReg = DstReg; const MCInstrDesc &Desc = getInstDesc(Inst.getOpcode()); - int16_t DstRegClass = Desc.OpInfo[0].RegClass; + int16_t DstRegClass = Desc.MCOpInfo[0].RegClass; unsigned DstRegClassID = getContext().getRegisterInfo()->getRegClass(DstRegClass).getID(); bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) || Index: lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp =================================================================== --- lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -134,7 +134,7 @@ unsigned NumOps = Inst.getNumOperands(); if (NumOps == 0) return false; - switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) { + switch (Info->get(Inst.getOpcode()).MCOpInfo[NumOps - 1].OperandType) { case MCOI::OPERAND_UNKNOWN: case MCOI::OPERAND_IMMEDIATE: // jal, bal ... Index: lib/Target/Mips/MipsSEInstrInfo.cpp =================================================================== --- lib/Target/Mips/MipsSEInstrInfo.cpp +++ lib/Target/Mips/MipsSEInstrInfo.cpp @@ -690,7 +690,7 @@ MipsSEInstrInfo::compareOpndSize(unsigned Opc, const MachineFunction &MF) const { const MCInstrDesc &Desc = get(Opc); - assert(Desc.NumOperands == 2 && "Unary instruction expected."); + assert(Desc.NumMCOperands == 2 && "Unary instruction expected."); const MipsRegisterInfo *RI = &getRegisterInfo(); unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI, MF)); unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI, MF)); Index: lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCInstrInfo.cpp +++ lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1337,7 +1337,7 @@ assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI"); assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"); - const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx]; + const MCOperandInfo *UseInfo = &UseMCID.MCOpInfo[UseIdx]; // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0 // register (which might also be specified as a pointer class kind). Index: utils/TableGen/InstrInfoEmitter.cpp =================================================================== --- utils/TableGen/InstrInfoEmitter.cpp +++ utils/TableGen/InstrInfoEmitter.cpp @@ -74,7 +74,8 @@ void emitRecord(const CodeGenInstruction &Inst, unsigned Num, Record *InstrInfo, std::map, unsigned> &EL, - const OperandInfoMapTy &OpInfo, + const OperandInfoMapTy &MIOpInfo, + const OperandInfoMapTy &MCOpInfo, raw_ostream &OS); void emitOperandTypesEnum(raw_ostream &OS, const CodeGenTarget &Target); void initOperandMapData( @@ -86,8 +87,12 @@ ArrayRef NumberedInstructions); // Operand information. - void EmitOperandInfo(raw_ostream &OS, OperandInfoMapTy &OperandInfoIDs); - std::vector GetOperandInfo(const CodeGenInstruction &Inst); + void EmitMIOperandInfo(raw_ostream &OS, OperandInfoMapTy &MCOperandInfoIDs); + std::vector GetMIOperandInfo(const CodeGenTarget &Target, + const CodeGenInstruction &Inst); + + void EmitMCOperandInfo(raw_ostream &OS, OperandInfoMapTy &MCOperandInfoIDs); + std::vector GetMCOperandInfo(const CodeGenInstruction &Inst); }; } // end anonymous namespace @@ -105,7 +110,62 @@ //===----------------------------------------------------------------------===// std::vector -InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) { +InstrInfoEmitter::GetMIOperandInfo(const CodeGenTarget &Target, + const CodeGenInstruction &Inst) { + + std::vector Result; + + const std::string &Namespace = Target.getInstNamespace(); + + for (auto &Op : Inst.Operands) { + + // One record + std::string Res; + + // This might be a multiple operand thing. Targets like X86 have + // registers in their multi-operand operands. It may also be an anonymous + // operand, which has a single operand, but no declared class for the + // operand. + DagInit *MIOI = Op.MIOperandInfo; + + if (!MIOI || MIOI->getNumArgs() == 0) { + Res = std::to_string(Op.MIOperandNo) + ", " + Op.OperandType + ", 1"; + } else { + Res = std::to_string(Op.MIOperandNo) + ", " + Namespace + + "::MIOpTypes::" + Op.Rec->getName().str() + ", " + + std::to_string(Op.MINumOperands); + } + + Result.push_back(Res); + } + + return Result; +} + +void InstrInfoEmitter::EmitMIOperandInfo(raw_ostream &OS, + OperandInfoMapTy &MIOperandInfoIDs) { + // ID #0 is for no operand info. + unsigned OperandListNum = 0; + MIOperandInfoIDs[std::vector()] = ++OperandListNum; + + OS << "\n"; + const CodeGenTarget &Target = CDP.getTargetInfo(); + for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { + std::vector OperandInfo = GetMIOperandInfo(Target, *Inst); + unsigned &N = MIOperandInfoIDs[OperandInfo]; + if (N != 0) + continue; + + N = ++OperandListNum; + OS << "static const MIOperandInfo MIOperandInfo" << N << "[] = { "; + for (const std::string &Info : OperandInfo) + OS << "{ " << Info << " }, "; + OS << "};\n"; + } +} + +std::vector +InstrInfoEmitter::GetMCOperandInfo(const CodeGenInstruction &Inst) { std::vector Result; for (auto &Op : Inst.Operands) { @@ -189,21 +249,21 @@ return Result; } -void InstrInfoEmitter::EmitOperandInfo(raw_ostream &OS, - OperandInfoMapTy &OperandInfoIDs) { +void InstrInfoEmitter::EmitMCOperandInfo(raw_ostream &OS, + OperandInfoMapTy &MCOperandInfoIDs) { // ID #0 is for no operand info. unsigned OperandListNum = 0; - OperandInfoIDs[std::vector()] = ++OperandListNum; + MCOperandInfoIDs[std::vector()] = ++OperandListNum; OS << "\n"; const CodeGenTarget &Target = CDP.getTargetInfo(); for (const CodeGenInstruction *Inst : Target.getInstructionsByEnumValue()) { - std::vector OperandInfo = GetOperandInfo(*Inst); - unsigned &N = OperandInfoIDs[OperandInfo]; + std::vector OperandInfo = GetMCOperandInfo(*Inst); + unsigned &N = MCOperandInfoIDs[OperandInfo]; if (N != 0) continue; N = ++OperandListNum; - OS << "static const MCOperandInfo OperandInfo" << N << "[] = { "; + OS << "static const MCOperandInfo MCOperandInfo" << N << "[] = { "; for (const std::string &Info : OperandInfo) OS << "{ " << Info << " }, "; OS << "};\n"; @@ -211,7 +271,7 @@ } /// Initialize data structures for generating operand name mappings. -/// +/// /// \param Operands [out] A map used to generate the OpName enum with operand /// names as its keys and operand enum values as its values. /// \param OperandMap [out] A map for representing the operand name mappings for @@ -330,25 +390,23 @@ StringRef Namespace = Target.getInstNamespace(); std::vector Operands = Records.getAllDerivedDefinitions("Operand"); - OS << "#ifdef GET_INSTRINFO_OPERAND_TYPES_ENUM\n"; - OS << "#undef GET_INSTRINFO_OPERAND_TYPES_ENUM\n"; - OS << "namespace llvm {\n"; OS << "namespace " << Namespace << " {\n"; - OS << "namespace OpTypes {\n"; - OS << "enum OperandType {\n"; + OS << "namespace MIOpTypes {\n"; + OS << "enum MIOperandType {\n"; - unsigned EnumVal = 0; + bool First = true; for (const Record *Op : Operands) { - if (!Op->isAnonymous()) - OS << " " << Op->getName() << " = " << EnumVal << ",\n"; - ++EnumVal; + OS << " " << Op->getName(); + if (First) { + OS << " = llvm::MCOI::OPERAND_FIRST_TARGET"; + First = false; + } + OS << ",\n"; } OS << " OPERAND_TYPE_LIST_END" << "\n};\n"; OS << "} // end namespace OpTypes\n"; OS << "} // end namespace " << Namespace << "\n"; - OS << "} // end namespace llvm\n"; - OS << "#endif // GET_INSTRINFO_OPERAND_TYPES_ENUM\n\n"; } void InstrInfoEmitter::emitMCIIHelperMethods(raw_ostream &OS, @@ -428,11 +486,23 @@ emitSourceFileHeader("Target Instruction Enum Values and Descriptors", OS); emitEnums(OS); + // Get the MI types separately from data + OS << "#ifdef GET_INSTRINFO_MI_OPS_INFO\n"; + OS << "#undef GET_INSTRINFO_MI_OPS_INFO\n"; + OS << "namespace llvm {\n\n"; + + emitOperandTypesEnum(OS, CDP.getTargetInfo()); + + OS << "} // end llvm namespace\n"; + OS << "#endif // GET_INSTRINFO_MI_OPS_INFO\n\n"; + OS << "#ifdef GET_INSTRINFO_MC_DESC\n"; OS << "#undef GET_INSTRINFO_MC_DESC\n"; OS << "namespace llvm {\n\n"; + emitOperandTypesEnum(OS, CDP.getTargetInfo()); + CodeGenTarget &Target = CDP.getTargetInfo(); const std::string &TargetName = Target.getName(); Record *InstrInfo = Target.getInstructionSet(); @@ -456,10 +526,12 @@ } } - OperandInfoMapTy OperandInfoIDs; + OperandInfoMapTy MIOperandInfoIDs; + OperandInfoMapTy MCOperandInfoIDs; // Emit all of the operand info records. - EmitOperandInfo(OS, OperandInfoIDs); + EmitMIOperandInfo(OS, MIOperandInfoIDs); + EmitMCOperandInfo(OS, MCOperandInfoIDs); // Emit all of the MCInstrDesc records in their ENUM ordering. // @@ -473,7 +545,9 @@ // Keep a list of the instruction names. InstrNames.add(Inst->TheDef->getName()); // Emit the record into the table. - emitRecord(*Inst, Num++, InstrInfo, EmittedLists, OperandInfoIDs, OS); + emitRecord( + *Inst, Num++, InstrInfo, EmittedLists, + MIOperandInfoIDs, MCOperandInfoIDs, OS); } OS << "};\n\n"; @@ -542,24 +616,27 @@ emitOperandNameMappings(OS, Target, NumberedInstructions); - emitOperandTypesEnum(OS, Target); - emitMCIIHelperMethods(OS, TargetName); } void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num, Record *InstrInfo, - std::map, unsigned> &EmittedLists, - const OperandInfoMapTy &OpInfo, + std::map, unsigned> &EmittedLists, + const OperandInfoMapTy &MIOpInfo, + const OperandInfoMapTy &MCOpInfo, raw_ostream &OS) { int MinOperands = 0; - if (!Inst.Operands.empty()) + if (!Inst.Operands.empty()) { + // Each logical operand can be multiple MI operands. MinOperands = Inst.Operands.back().MIOperandNo + Inst.Operands.back().MINumOperands; + } OS << " { "; - OS << Num << ",\t" << MinOperands << ",\t" + OS << Num << ",\t" + << Inst.Operands.size() << ",\t" + << MinOperands << ",\t" << Inst.Operands.NumDefs << ",\t" << Inst.TheDef->getValueAsInt("Size") << ",\t" << SchedModels.getSchedClassIdx(Inst) << ",\t0"; @@ -634,11 +711,19 @@ OS << "ImplicitList" << EmittedLists[DefList] << ", "; // Emit the operand info. - std::vector OperandInfo = GetOperandInfo(Inst); - if (OperandInfo.empty()) + std::vector MIOperandInfo = + GetMIOperandInfo(CDP.getTargetInfo(), Inst); + if (MIOperandInfo.empty()) + OS << "nullptr"; + else + OS << "MIOperandInfo" << MIOpInfo.find(MIOperandInfo)->second; + OS << ", "; + + std::vector MCOperandInfo = GetMCOperandInfo(Inst); + if (MCOperandInfo.empty()) OS << "nullptr"; else - OS << "OperandInfo" << OpInfo.find(OperandInfo)->second; + OS << "MCOperandInfo" << MCOpInfo.find(MCOperandInfo)->second; if (Inst.HasComplexDeprecationPredicate) // Emit a function pointer to the complex predicate method.