diff --git a/llvm/include/llvm/MC/MCInstrDesc.h b/llvm/include/llvm/MC/MCInstrDesc.h --- a/llvm/include/llvm/MC/MCInstrDesc.h +++ b/llvm/include/llvm/MC/MCInstrDesc.h @@ -14,6 +14,7 @@ #ifndef LLVM_MC_MCINSTRDESC_H #define LLVM_MC_MCINSTRDESC_H +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/iterator_range.h" #include "llvm/MC/MCRegister.h" @@ -212,9 +213,9 @@ int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const { if (OpNum < NumOperands && - (OpInfo[OpNum].Constraints & (1 << Constraint))) { + (operands()[OpNum].Constraints & (1 << Constraint))) { unsigned ValuePos = 4 + Constraint * 4; - return (int)(OpInfo[OpNum].Constraints >> ValuePos) & 0x0f; + return (int)(operands()[OpNum].Constraints >> ValuePos) & 0x0f; } return -1; } @@ -234,8 +235,8 @@ const_opInfo_iterator opInfo_begin() const { return OpInfo; } const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; } - iterator_range operands() const { - return make_range(opInfo_begin(), opInfo_end()); + ArrayRef operands() const { + return ArrayRef(OpInfo, NumOperands); } /// Return the number of MachineOperands that are register @@ -627,7 +628,7 @@ int findFirstPredOperandIdx() const { if (isPredicable()) { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (OpInfo[i].isPredicate()) + if (operands()[i].isPredicate()) return i; } return -1; diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp --- a/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp +++ b/llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp @@ -330,7 +330,7 @@ const MachineRegisterInfo &MRI) const { SmallVector Types; SmallBitVector SeenTypes(8); - const MCOperandInfo *OpInfo = MI.getDesc().OpInfo; + ArrayRef OpInfo = MI.getDesc().operands(); // FIXME: probably we'll need to cache the results here somehow? for (unsigned i = 0; i < MI.getDesc().getNumOperands(); ++i) { if (!OpInfo[i].isGenericType()) @@ -379,14 +379,14 @@ for (unsigned Opcode = FirstOp; Opcode <= LastOp; ++Opcode) { const MCInstrDesc &MCID = MII.get(Opcode); const unsigned NumTypeIdxs = std::accumulate( - MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + MCID.operands().begin(), MCID.operands().end(), 0U, [](unsigned Acc, const MCOperandInfo &OpInfo) { return OpInfo.isGenericType() ? std::max(OpInfo.getGenericTypeIndex() + 1U, Acc) : Acc; }); const unsigned NumImmIdxs = std::accumulate( - MCID.opInfo_begin(), MCID.opInfo_end(), 0U, + MCID.operands().begin(), MCID.operands().end(), 0U, [](unsigned Acc, const MCOperandInfo &OpInfo) { return OpInfo.isGenericImm() ? std::max(OpInfo.getGenericImmIndex() + 1U, Acc) diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -1086,7 +1086,7 @@ const MCInstrDesc &MCID = getDesc(); if (MCID.isPredicable()) { for (unsigned i = 0, e = getNumOperands(); i != e; ++i) - if (MCID.OpInfo[i].isPredicate()) + if (MCID.operands()[i].isPredicate()) return i; } @@ -1524,7 +1524,7 @@ if (isVariadic() || OpIdx >= getNumExplicitOperands()) return MRI.getType(Op.getReg()); - auto &OpInfo = getDesc().OpInfo[OpIdx]; + auto &OpInfo = getDesc().operands()[OpIdx]; if (!OpInfo.isGenericType()) return MRI.getType(Op.getReg()); diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -978,11 +978,11 @@ SmallVector Types; for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps); I != E; ++I) { - if (!MCID.OpInfo[I].isGenericType()) + if (!MCID.operands()[I].isGenericType()) continue; // Generic instructions specify type equality constraints between some of // their operands. Make sure these are consistent. - size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex(); + size_t TypeIdx = MCID.operands()[I].getGenericTypeIndex(); Types.resize(std::max(TypeIdx + 1, Types.size())); const MachineOperand *MO = &MI->getOperand(I); @@ -1987,7 +1987,7 @@ // The first MCID.NumDefs operands must be explicit register defines if (MONum < NumDefs) { - const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; + const MCOperandInfo &MCOI = MCID.operands()[MONum]; if (!MO->isReg()) report("Explicit definition must be a register", MO, MONum); else if (!MO->isDef() && !MCOI.isOptionalDef()) @@ -1995,7 +1995,7 @@ else if (MO->isImplicit()) report("Explicit definition marked as implicit", MO, MONum); } else if (MONum < MCID.getNumOperands()) { - const MCOperandInfo &MCOI = MCID.OpInfo[MONum]; + const MCOperandInfo &MCOI = MCID.operands()[MONum]; // Don't check if it's the last operand in a variadic instruction. See, // e.g., LDM_RET in the arm back end. Check non-variadic operands only. bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1; diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -218,7 +218,7 @@ RC = VTRC; } - if (II.OpInfo != nullptr && II.OpInfo[i].isOptionalDef()) { + if (!II.operands().empty() && II.operands()[i].isOptionalDef()) { // Optional def must be a physical register. VRBase = cast(Node->getOperand(i-NumResults))->getReg(); assert(VRBase.isPhysical()); @@ -304,7 +304,7 @@ const MCInstrDesc &MCID = MIB->getDesc(); bool isOptDef = IIOpNum < MCID.getNumOperands() && - MCID.OpInfo[IIOpNum].isOptionalDef(); + MCID.operands()[IIOpNum].isOptionalDef(); // If the instruction requires a register in a different class, create // a new virtual register and copy the value into it, but first attempt to diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -1433,7 +1433,7 @@ // of %noreg. When the OptionalDef is set to a valid register, we need to // handle it in the same way as an ImplicitDef. for (unsigned i = 0; i < MCID.getNumDefs(); ++i) - if (MCID.OpInfo[i].isOptionalDef()) { + if (MCID.operands()[i].isOptionalDef()) { const SDValue &OptionalDef = Node->getOperand(i - Node->getNumValues()); Register Reg = cast(OptionalDef)->getReg(); CheckForLiveRegDef(SU, Reg, LiveRegDefs.get(), RegAdded, LRegs, TRI); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -49,8 +49,8 @@ if (OpNum >= MCID.getNumOperands()) return nullptr; - short RegClass = MCID.OpInfo[OpNum].RegClass; - if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) + short RegClass = MCID.operands()[OpNum].RegClass; + if (MCID.operands()[OpNum].isLookupPtrRegClass()) return TRI->getPointerRegClass(MF, RegClass); // Instructions like INSERT_SUBREG do not have fixed register classes. @@ -337,7 +337,7 @@ return false; for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) { - if (MCID.OpInfo[i].isPredicate()) { + if (MCID.operands()[i].isPredicate()) { MachineOperand &MO = MI.getOperand(i); if (MO.isReg()) { MO.setReg(Pred[j].getReg()); diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp --- a/llvm/lib/CodeGen/TargetSchedule.cpp +++ b/llvm/lib/CodeGen/TargetSchedule.cpp @@ -222,9 +222,9 @@ // If DefIdx does not exist in the model (e.g. implicit defs), then return // unit latency (defaultDefLatency may be too conservative). #ifndef NDEBUG - if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() - && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef() - && SchedModel.isComplete()) { + if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() && + !DefMI->getDesc().operands()[DefOperIdx].isOptionalDef() && + SchedModel.isComplete()) { errs() << "DefIdx " << DefIdx << " exceeds machine model writes for " << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)"; llvm_unreachable("incomplete machine model"); diff --git a/llvm/lib/MC/MCParser/AsmParser.cpp b/llvm/lib/MC/MCParser/AsmParser.cpp --- a/llvm/lib/MC/MCParser/AsmParser.cpp +++ b/llvm/lib/MC/MCParser/AsmParser.cpp @@ -6048,7 +6048,7 @@ InputDecls.push_back(OpDecl); InputDeclsAddressOf.push_back(Operand.needAddressOf()); InputConstraints.push_back(Constraint.str()); - if (Desc.OpInfo[i - 1].isBranchTarget()) + if (Desc.operands()[i - 1].isBranchTarget()) AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size(), 0, Restricted); else diff --git a/llvm/lib/MC/MCParser/MasmParser.cpp b/llvm/lib/MC/MCParser/MasmParser.cpp --- a/llvm/lib/MC/MCParser/MasmParser.cpp +++ b/llvm/lib/MC/MCParser/MasmParser.cpp @@ -7458,7 +7458,7 @@ InputDecls.push_back(OpDecl); InputDeclsAddressOf.push_back(Operand.needAddressOf()); InputConstraints.push_back(Constraint.str()); - if (Desc.OpInfo[i - 1].isBranchTarget()) + if (Desc.operands()[i - 1].isBranchTarget()) AsmStrRewrites.emplace_back(AOK_CallInput, Start, SymName.size()); else AsmStrRewrites.emplace_back(AOK_Input, Start, SymName.size()); diff --git a/llvm/lib/MCA/InstrBuilder.cpp b/llvm/lib/MCA/InstrBuilder.cpp --- a/llvm/lib/MCA/InstrBuilder.cpp +++ b/llvm/lib/MCA/InstrBuilder.cpp @@ -331,7 +331,7 @@ if (!Op.isReg()) continue; - if (MCDesc.OpInfo[CurrentDef].isOptionalDef()) { + if (MCDesc.operands()[CurrentDef].isOptionalDef()) { OptionalDefIdx = CurrentDef++; continue; } diff --git a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp --- a/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp +++ b/llvm/lib/Target/AArch64/AArch64A57FPLoadBalancing.cpp @@ -515,7 +515,7 @@ } while (I != ChainBegin); // Make sure we allocate in-order, to get the cheapest registers first. - unsigned RegClassID = ChainBegin->getDesc().OpInfo[0].RegClass; + unsigned RegClassID = ChainBegin->getDesc().operands()[0].RegClass; auto Ord = RCI.getOrder(TRI->getRegClass(RegClassID)); for (auto Reg : Ord) { if (!Units.available(Reg)) diff --git a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp --- a/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ b/llvm/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -336,8 +336,8 @@ // operand for the accumulator (ZA) or implicit immediate zero which isn't // encoded, manually insert operand. for (unsigned i = 0; i < Desc.getNumOperands(); i++) { - if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_REGISTER) { - switch (Desc.OpInfo[i].RegClass) { + if (Desc.operands()[i].OperandType == MCOI::OPERAND_REGISTER) { + switch (Desc.operands()[i].RegClass) { default: break; case AArch64::MPRRegClassID: @@ -350,7 +350,7 @@ MI.insert(MI.begin() + i, MCOperand::createReg(AArch64::ZT0)); break; } - } else if (Desc.OpInfo[i].OperandType == + } else if (Desc.operands()[i].OperandType == AArch64::OPERAND_IMPLICIT_IMM_0) { MI.insert(MI.begin() + i, MCOperand::createImm(0)); } diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -411,7 +411,7 @@ // condition code) and cbz (where it is a register). const auto &Desc = Info->get(Inst.getOpcode()); for (unsigned i = 0, e = Inst.getNumOperands(); i != e; i++) { - if (Desc.OpInfo[i].OperandType == MCOI::OPERAND_PCREL) { + if (Desc.operands()[i].OperandType == MCOI::OPERAND_PCREL) { int64_t Imm = Inst.getOperand(i).getImm(); if (Inst.getOpcode() == AArch64::ADRP) Target = (Addr & -4096) + Imm * 4096; diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -368,7 +368,7 @@ unsigned OpIdx = Desc.getNumDefs() + OpNo; if (OpIdx >= Desc.getNumOperands()) return nullptr; - int RegClass = Desc.OpInfo[OpIdx].RegClass; + int RegClass = Desc.operands()[OpIdx].RegClass; if (RegClass == -1) return nullptr; diff --git a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp --- a/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ b/llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -2114,7 +2114,7 @@ } APInt Literal(64, Val); - uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType; + uint8_t OpTy = InstDesc.operands()[OpNum].OperandType; if (Imm.IsFPImm) { // We got fp literal token switch (OpTy) { @@ -3373,7 +3373,7 @@ case 4: return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm()); case 2: { - const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType; + const unsigned OperandType = Desc.operands()[OpIdx].OperandType; if (OperandType == AMDGPU::OPERAND_REG_IMM_INT16 || OperandType == AMDGPU::OPERAND_REG_INLINE_C_INT16 || OperandType == AMDGPU::OPERAND_REG_INLINE_AC_INT16) @@ -3512,7 +3512,7 @@ } } else { // Expression or a literal - if (Desc.OpInfo[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE) + if (Desc.operands()[OpIdx].OperandType == MCOI::OPERAND_IMMEDIATE) continue; // special operand like VINTERP attr_chan // An instruction may use only one literal. @@ -3872,7 +3872,7 @@ return true; const MCRegisterInfo *TRI = getContext().getRegisterInfo(); - if (TRI->getRegClass(Desc.OpInfo[0].RegClass).getSizeInBits() <= 128) + if (TRI->getRegClass(Desc.operands()[0].RegClass).getSizeInBits() <= 128) return true; if (TRI->regsOverlap(Src2Reg, DstReg)) { @@ -8075,14 +8075,16 @@ } static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) { + return // 1. This operand is input modifiers - return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS + Desc.operands()[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS // 2. This is not last operand && Desc.NumOperands > (OpNum + 1) // 3. Next operand is register class - && Desc.OpInfo[OpNum + 1].RegClass != -1 + && Desc.operands()[OpNum + 1].RegClass != -1 // 4. Next register is not tied to any other operand - && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1; + && Desc.getOperandConstraint(OpNum + 1, + MCOI::OperandConstraint::TIED_TO) == -1; } void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands) @@ -8751,7 +8753,7 @@ } else if (Op.isReg()) { Op.addRegOperands(Inst, 1); } else if (Op.isImm() && - Desc.OpInfo[Inst.getNumOperands()].RegClass != -1) { + Desc.operands()[Inst.getNumOperands()].RegClass != -1) { assert(!Op.IsImmKindLiteral() && "Cannot use literal with DPP"); Op.addImmOperands(Inst, 1); } else if (Op.isImm()) { diff --git a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp --- a/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ b/llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -662,7 +662,8 @@ } else { for (unsigned i = 0; i < NSAArgs; ++i) { const unsigned VAddrIdx = VAddr0Idx + 1 + i; - auto VAddrRCID = MCII->get(MI.getOpcode()).OpInfo[VAddrIdx].RegClass; + auto VAddrRCID = + MCII->get(MI.getOpcode()).operands()[VAddrIdx].RegClass; MI.insert(MI.begin() + VAddrIdx, createRegOperand(VAddrRCID, Bytes[i])); } @@ -955,7 +956,7 @@ // Widen the register to the correct number of enabled channels. unsigned NewVdata = AMDGPU::NoRegister; if (DstSize != Info->VDataDwords) { - auto DataRCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass; + auto DataRCID = MCII->get(NewOpcode).operands()[VDataIdx].RegClass; // Get first subregister of VData unsigned Vdata0 = MI.getOperand(VDataIdx).getReg(); @@ -978,7 +979,7 @@ unsigned VAddrSub0 = MRI.getSubReg(VAddr0, AMDGPU::sub0); VAddr0 = (VAddrSub0 != 0) ? VAddrSub0 : VAddr0; - auto AddrRCID = MCII->get(NewOpcode).OpInfo[VAddr0Idx].RegClass; + auto AddrRCID = MCII->get(NewOpcode).operands()[VAddr0Idx].RegClass; NewVAddr0 = MRI.getMatchingSuperReg(VAddr0, AMDGPU::sub0, &MRI.getRegClass(AddrRCID)); if (NewVAddr0 == AMDGPU::NoRegister) @@ -1070,7 +1071,7 @@ assert(DescNumOps == MI.getNumOperands()); for (unsigned I = 0; I < DescNumOps; ++I) { auto &Op = MI.getOperand(I); - auto OpType = Desc.OpInfo[I].OperandType; + auto OpType = Desc.operands()[I].OperandType; bool IsDeferredOp = (OpType == AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED || OpType == AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED); if (Op.isImm() && Op.getImm() == AMDGPU::EncValues::LITERAL_CONST && diff --git a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp --- a/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ b/llvm/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -817,7 +817,7 @@ int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); int VDataRCID = -1; if (VDataIdx != -1) - VDataRCID = Desc.OpInfo[VDataIdx].RegClass; + VDataRCID = Desc.operands()[VDataIdx].RegClass; if (TII->isMUBUF(MI) || TII->isMTBUF(MI)) { // There is no hazard if the instruction does not use vector regs @@ -842,13 +842,13 @@ if (TII->isMIMG(MI)) { int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); assert(SRsrcIdx != -1 && - AMDGPU::getRegBitWidth(Desc.OpInfo[SRsrcIdx].RegClass) == 256); + AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256); (void)SRsrcIdx; } if (TII->isFLAT(MI)) { int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); - if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64) + if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64) return DataIdx; } @@ -2813,7 +2813,7 @@ return true; } else { const MCInstrDesc &InstDesc = I.getDesc(); - const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; + const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; if (!TII.isInlineConstant(Op, OpInfo)) return true; } diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUInstPrinter.cpp @@ -671,7 +671,7 @@ // Check if operand register class contains register used. // Intention: print disassembler message when invalid code is decoded, // for example sgpr register used in VReg or VISrc(VReg or imm) operand. - int RCID = Desc.OpInfo[OpNo].RegClass; + int RCID = Desc.operands()[OpNo].RegClass; if (RCID != -1) { const MCRegisterClass RC = MRI.getRegClass(RCID); auto Reg = mc2PseudoReg(Op.getReg()); @@ -681,7 +681,7 @@ } } } else if (Op.isImm()) { - const uint8_t OpTy = Desc.OpInfo[OpNo].OperandType; + const uint8_t OpTy = Desc.operands()[OpNo].OperandType; switch (OpTy) { case AMDGPU::OPERAND_REG_IMM_INT32: case AMDGPU::OPERAND_REG_IMM_FP32: @@ -758,7 +758,7 @@ O << "0.0"; else { const MCInstrDesc &Desc = MII.get(MI->getOpcode()); - int RCID = Desc.OpInfo[OpNo].RegClass; + int RCID = Desc.operands()[OpNo].RegClass; unsigned RCBits = AMDGPU::getRegBitWidth(MRI.getRegClass(RCID)); if (RCBits == 32) printImmediate32(FloatToBits(Value), STI, O); @@ -925,7 +925,7 @@ AMDGPU::OpName::src0); if (Src0Idx >= 0 && - Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID && + Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID && !AMDGPU::isLegal64BitDPPControl(Imm)) { O << " /* 64 bit dpp only supports row_newbcast */"; return; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUMCTargetDesc.cpp @@ -128,7 +128,7 @@ bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const override { if (Inst.getNumOperands() == 0 || !Inst.getOperand(0).isImm() || - Info->get(Inst.getOpcode()).OpInfo[0].OperandType != + Info->get(Inst.getOpcode()).operands()[0].OperandType != MCOI::OPERAND_PCREL) return false; diff --git a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp --- a/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp +++ b/llvm/lib/Target/AMDGPU/MCTargetDesc/SIMCCodeEmitter.cpp @@ -384,7 +384,7 @@ // Is this operand a literal immediate? const MCOperand &Op = MI.getOperand(i); - auto Enc = getLitEncoding(Op, Desc.OpInfo[i], STI); + auto Enc = getLitEncoding(Op, Desc.operands()[i], STI); if (!Enc || *Enc != 255) continue; @@ -456,7 +456,7 @@ return; } else { const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); - auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI); + auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI); if (Enc && *Enc != 255) { Op = *Enc | SDWA9EncValues::SRC_SGPR_MASK; return; @@ -579,7 +579,7 @@ const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); if (AMDGPU::isSISrcOperand(Desc, OpNo)) { - if (auto Enc = getLitEncoding(MO, Desc.OpInfo[OpNo], STI)) { + if (auto Enc = getLitEncoding(MO, Desc.operands()[OpNo], STI)) { Op = *Enc; return; } diff --git a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp --- a/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp +++ b/llvm/lib/Target/AMDGPU/SIFoldOperands.cpp @@ -214,7 +214,7 @@ if (!(Val & SISrcMods::OP_SEL_0) && (Val & SISrcMods::OP_SEL_1)) { // Only apply the following transformation if that operand requires // a packed immediate. - switch (TII->get(Opcode).OpInfo[OpNo].OperandType) { + switch (TII->get(Opcode).operands()[OpNo].OperandType) { case AMDGPU::OPERAND_REG_IMM_V2FP16: case AMDGPU::OPERAND_REG_IMM_V2INT16: case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: @@ -436,7 +436,7 @@ // scalar instruction if (TII->isSALU(MI->getOpcode())) { const MCInstrDesc &InstDesc = MI->getDesc(); - const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; + const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; // Fine if the operand can be encoded as an inline constant if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) { @@ -498,11 +498,10 @@ const MachineOperand &OpToFold, MachineInstr *UseMI, unsigned UseOpIdx, SmallVectorImpl &FoldList) const { const MCInstrDesc &Desc = UseMI->getDesc(); - const MCOperandInfo *OpInfo = Desc.OpInfo; - if (!OpInfo || UseOpIdx >= Desc.getNumOperands()) + if (UseOpIdx >= Desc.getNumOperands()) return false; - uint8_t OpTy = OpInfo[UseOpIdx].OperandType; + uint8_t OpTy = Desc.operands()[UseOpIdx].OperandType; if ((OpTy < AMDGPU::OPERAND_REG_INLINE_AC_FIRST || OpTy > AMDGPU::OPERAND_REG_INLINE_AC_LAST) && (OpTy < AMDGPU::OPERAND_REG_INLINE_C_FIRST || @@ -859,9 +858,8 @@ // Don't fold into target independent nodes. Target independent opcodes // don't have defined register classes. - if (UseDesc.isVariadic() || - UseOp.isImplicit() || - UseDesc.OpInfo[UseOpIdx].RegClass == -1) + if (UseDesc.isVariadic() || UseOp.isImplicit() || + UseDesc.operands()[UseOpIdx].RegClass == -1) return; } @@ -892,7 +890,7 @@ const MCInstrDesc &FoldDesc = OpToFold.getParent()->getDesc(); const TargetRegisterClass *FoldRC = - TRI->getRegClass(FoldDesc.OpInfo[0].RegClass); + TRI->getRegClass(FoldDesc.operands()[0].RegClass); // Split 64-bit constants into 32-bits for folding. if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) { diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -12114,7 +12114,7 @@ // Prefer VGPRs over AGPRs in mAI instructions where possible. // This saves a chain-copy of registers and better balance register // use between vgpr and agpr as agpr tuples tend to be big. - if (MI.getDesc().OpInfo) { + if (!MI.getDesc().operands().empty()) { unsigned Opc = MI.getOpcode(); const SIRegisterInfo *TRI = Subtarget->getRegisterInfo(); for (auto I : { AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h @@ -842,23 +842,22 @@ const MachineOperand &DefMO) const { assert(UseMO.getParent() == &MI); int OpIdx = MI.getOperandNo(&UseMO); - if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) { + if (OpIdx >= MI.getDesc().NumOperands) return false; - } - return isInlineConstant(DefMO, MI.getDesc().OpInfo[OpIdx]); + return isInlineConstant(DefMO, MI.getDesc().operands()[OpIdx]); } /// \p returns true if the operand \p OpIdx in \p MI is a valid inline /// immediate. bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx) const { const MachineOperand &MO = MI.getOperand(OpIdx); - return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType); + return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType); } bool isInlineConstant(const MachineInstr &MI, unsigned OpIdx, const MachineOperand &MO) const { - if (!MI.getDesc().OpInfo || OpIdx >= MI.getDesc().NumOperands) + if (OpIdx >= MI.getDesc().NumOperands) return false; if (MI.isCopy()) { @@ -870,7 +869,7 @@ return isInlineConstant(MO, OpType); } - return isInlineConstant(MO, MI.getDesc().OpInfo[OpIdx].OperandType); + return isInlineConstant(MO, MI.getDesc().operands()[OpIdx].OperandType); } bool isInlineConstant(const MachineOperand &MO) const { @@ -920,7 +919,7 @@ /// Return the size in bytes of the operand OpNo on the given // instruction opcode. unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const { - const MCOperandInfo &OpInfo = get(Opcode).OpInfo[OpNo]; + const MCOperandInfo &OpInfo = get(Opcode).operands()[OpNo]; if (OpInfo.RegClass == -1) { // If this is an immediate operand, this must be a 32-bit literal. diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3851,7 +3851,7 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, const MachineOperand &MO) const { const MCInstrDesc &InstDesc = MI.getDesc(); - const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; + const MCOperandInfo &OpInfo = InstDesc.operands()[OpNo]; assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); @@ -4169,9 +4169,9 @@ return false; } - int RegClass = Desc.OpInfo[i].RegClass; + int RegClass = Desc.operands()[i].RegClass; - switch (Desc.OpInfo[i].OperandType) { + switch (Desc.operands()[i].OperandType) { case MCOI::OPERAND_REGISTER: if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { ErrInfo = "Illegal immediate value for operand."; @@ -4401,7 +4401,7 @@ if (OpIdx == -1) continue; const MachineOperand &MO = MI.getOperand(OpIdx); - if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { + if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) { if (MO.isReg()) { SGPRUsed = MO.getReg(); if (!llvm::is_contained(SGPRsUsed, SGPRUsed)) { @@ -4459,7 +4459,7 @@ const MachineOperand &MO = MI.getOperand(OpIdx); - if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { + if (usesConstantBus(MRI, MO, MI.getDesc().operands()[OpIdx])) { if (MO.isReg() && MO.getReg() != AMDGPU::M0) { if (MO.getReg() != SGPRUsed) ++SGPRCount; @@ -4502,8 +4502,8 @@ const MachineOperand &Src1 = MI.getOperand(Src1Idx); if (!Src0.isReg() && !Src1.isReg() && - !isInlineConstant(Src0, Desc.OpInfo[Src0Idx]) && - !isInlineConstant(Src1, Desc.OpInfo[Src1Idx]) && + !isInlineConstant(Src0, Desc.operands()[Src0Idx]) && + !isInlineConstant(Src1, Desc.operands()[Src1Idx]) && !Src0.isIdenticalTo(Src1)) { ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; return false; @@ -4704,11 +4704,12 @@ if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO && ((DstIdx >= 0 && - (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || - Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) || + (Desc.operands()[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || + Desc.operands()[DstIdx].RegClass == + AMDGPU::VReg_64_Align2RegClassID)) || ((Src0Idx >= 0 && - (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || - Desc.OpInfo[Src0Idx].RegClass == + (Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || + Desc.operands()[Src0Idx].RegClass == AMDGPU::VReg_64_Align2RegClassID)))) && !AMDGPU::isLegal64BitDPPControl(DC)) { ErrInfo = "Invalid dpp_ctrl value: " @@ -4927,7 +4928,7 @@ const { if (OpNum >= TID.getNumOperands()) return nullptr; - auto RegClass = TID.OpInfo[OpNum].RegClass; + auto RegClass = TID.operands()[OpNum].RegClass; bool IsAllocatable = false; if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) { // vdst and vdata should be both VGPR or AGPR, same for the DS instructions @@ -4956,7 +4957,7 @@ const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const MCInstrDesc &Desc = get(MI.getOpcode()); if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || - Desc.OpInfo[OpNo].RegClass == -1) { + Desc.operands()[OpNo].RegClass == -1) { Register Reg = MI.getOperand(OpNo).getReg(); if (Reg.isVirtual()) @@ -4964,7 +4965,7 @@ return RI.getPhysRegBaseClass(Reg); } - unsigned RCID = Desc.OpInfo[OpNo].RegClass; + unsigned RCID = Desc.operands()[OpNo].RegClass; return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true); } @@ -4973,7 +4974,7 @@ MachineBasicBlock *MBB = MI.getParent(); MachineOperand &MO = MI.getOperand(OpIdx); MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); - unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; + unsigned RCID = get(MI.getOpcode()).operands()[OpIdx].RegClass; const TargetRegisterClass *RC = RI.getRegClass(RCID); unsigned Size = RI.getRegSizeInBits(*RC); unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; @@ -5099,7 +5100,7 @@ const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); const MCInstrDesc &InstDesc = MI.getDesc(); - const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; + const MCOperandInfo &OpInfo = InstDesc.operands()[OpIdx]; const TargetRegisterClass *DefinedRC = OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; if (!MO) @@ -5122,14 +5123,15 @@ if (Op.isReg()) { RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); if (!SGPRsUsed.count(SGPR) && - usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { + // FIXME: This can access off the end of the operands() array. + usesConstantBus(MRI, Op, InstDesc.operands().begin()[i])) { if (--ConstantBusLimit <= 0) return false; SGPRsUsed.insert(SGPR); } - } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32 || + } else if (InstDesc.operands()[i].OperandType == AMDGPU::OPERAND_KIMM32 || (AMDGPU::isSISrcOperand(InstDesc, i) && - !isInlineConstant(Op, InstDesc.OpInfo[i]))) { + !isInlineConstant(Op, InstDesc.operands()[i]))) { if (!LiteralLimit--) return false; if (--ConstantBusLimit <= 0) @@ -5236,7 +5238,7 @@ // VOP2 src0 instructions support all operand types, so we don't need to check // their legality. If src1 is already legal, we don't need to do anything. - if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) + if (isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src1)) return; // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for @@ -5267,7 +5269,7 @@ // TODO: Other immediate-like operand kinds could be commuted if there was a // MachineOperand::ChangeTo* for them. if ((!Src1.isImm() && !Src1.isReg()) || - !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { + !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) { legalizeOpWithMove(MI, Src1Idx); return; } @@ -5345,7 +5347,7 @@ MachineOperand &MO = MI.getOperand(Idx); if (!MO.isReg()) { - if (isInlineConstant(MO, get(Opc).OpInfo[Idx])) + if (isInlineConstant(MO, get(Opc).operands()[Idx])) continue; if (LiteralLimit > 0 && ConstantBusLimit > 0) { @@ -6016,7 +6018,7 @@ if (RsrcIdx != -1) { // We have an MUBUF instruction MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); - unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; + unsigned RsrcRC = get(MI.getOpcode()).operands()[RsrcIdx].RegClass; if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), RI.getRegClass(RsrcRC))) { // The operands are legal. @@ -7427,7 +7429,8 @@ // Is this operand statically required to be an SGPR based on the operand // constraints? - const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); + const TargetRegisterClass *OpRC = + RI.getRegClass(Desc.operands()[Idx].RegClass); bool IsRequiredSGPR = RI.isSGPRClass(OpRC); if (IsRequiredSGPR) return MO.getReg(); @@ -7621,7 +7624,7 @@ bool HasLiteral = false; for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) { const MachineOperand &Op = MI.getOperand(I); - const MCOperandInfo &OpInfo = Desc.OpInfo[I]; + const MCOperandInfo &OpInfo = Desc.operands()[I]; if (!Op.isReg() && !isInlineConstant(Op, OpInfo)) { HasLiteral = true; break; @@ -7895,7 +7898,7 @@ if (Idx == -1) // e.g. s_memtime return false; - const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; + const auto RCID = MI.getDesc().operands()[Idx].RegClass; return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); } diff --git a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp --- a/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp +++ b/llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp @@ -1159,8 +1159,8 @@ continue; unsigned I = MI.getOperandNo(&Op); - if (Desc.OpInfo[I].RegClass == -1 || - !TRI->isVSSuperClass(TRI->getRegClass(Desc.OpInfo[I].RegClass))) + if (Desc.operands()[I].RegClass == -1 || + !TRI->isVSSuperClass(TRI->getRegClass(Desc.operands()[I].RegClass))) continue; if (ST.hasSDWAScalar() && ConstantBusCount == 0 && Op.isReg() && diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h @@ -1209,7 +1209,7 @@ LLVM_READNONE inline unsigned getOperandSize(const MCInstrDesc &Desc, unsigned OpNo) { - return getOperandSize(Desc.OpInfo[OpNo]); + return getOperandSize(Desc.operands()[OpNo]); } /// Is this literal inlinable, and not one of the values intended for floating diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp --- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp +++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp @@ -539,7 +539,7 @@ auto OperandsNum = OpDesc.getNumOperands(); unsigned CompOprIdx; for (CompOprIdx = Component::SRC1; CompOprIdx < OperandsNum; ++CompOprIdx) { - if (OpDesc.OpInfo[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) { + if (OpDesc.operands()[CompOprIdx].OperandType == AMDGPU::OPERAND_KIMM32) { MandatoryLiteralIdx = CompOprIdx; break; } @@ -2133,21 +2133,21 @@ bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) { assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + unsigned OpType = Desc.operands()[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_SRC_FIRST && OpType <= AMDGPU::OPERAND_SRC_LAST; } bool isKImmOperand(const MCInstrDesc &Desc, unsigned OpNo) { assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + unsigned OpType = Desc.operands()[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_KIMM_FIRST && OpType <= AMDGPU::OPERAND_KIMM_LAST; } bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) { assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + unsigned OpType = Desc.operands()[OpNo].OperandType; switch (OpType) { case AMDGPU::OPERAND_REG_IMM_FP32: case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: @@ -2176,7 +2176,7 @@ bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) { assert(OpNo < Desc.NumOperands); - unsigned OpType = Desc.OpInfo[OpNo].OperandType; + unsigned OpType = Desc.operands()[OpNo].OperandType; return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST && OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST; } @@ -2331,7 +2331,7 @@ unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc, unsigned OpNo) { assert(OpNo < Desc.NumOperands); - unsigned RCID = Desc.OpInfo[OpNo].RegClass; + unsigned RCID = Desc.operands()[OpNo].RegClass; return getRegBitWidth(MRI->getRegClass(RCID)) / 8; } diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -619,7 +619,8 @@ // IT block. This affects how they are printed. const MCInstrDesc &MCID = MI.getDesc(); if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { - assert(MCID.OpInfo[1].isOptionalDef() && "CPSR def isn't expected operand"); + assert(MCID.operands()[1].isOptionalDef() && + "CPSR def isn't expected operand"); assert((MI.getOperand(1).isDead() || MI.getOperand(1).getReg() != ARM::CPSR) && "if conversion tried to stop defining used CPSR"); @@ -2382,7 +2383,7 @@ // Copy all the DefMI operands, excluding its (null) predicate. const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); - i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) + i != e && !DefDesc.operands()[i].isPredicate(); ++i) NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -12289,7 +12289,7 @@ // Any ARM instruction that sets the 's' bit should specify an optional // "cc_out" operand in the last operand position. - if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) { + if (!MI.hasOptionalDef() || !MCID->operands()[ccOutIdx].isOptionalDef()) { assert(!NewOpc && "Optional cc_out operand required"); return; } diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -7626,7 +7626,7 @@ static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) { for (unsigned i = 0; i < MCID.NumOperands; ++i) { - if (ARM::isVpred(MCID.OpInfo[i].OperandType)) + if (ARM::isVpred(MCID.operands()[i].OperandType)) return i; } return -1; @@ -7679,7 +7679,7 @@ // to keep instructions the same shape even though one cannot // legally be predicated, e.g. vmul.f16 vs vmul.f32. for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { - if (MCID.OpInfo[i].isPredicate()) { + if (MCID.operands()[i].isPredicate()) { if (Inst.getOperand(i).getImm() != ARMCC::AL) return Error(Loc, "instruction is not predicable"); break; @@ -10755,7 +10755,7 @@ // Find the optional-def operand (cc_out). unsigned OpNo; for (OpNo = 0; - !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; + !MCID.operands()[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; ++OpNo) ; // If we're parsing Thumb1, reject it completely. @@ -10833,7 +10833,7 @@ } for (unsigned I = 0; I < MCID.NumOperands; ++I) - if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) { + if (MCID.operands()[I].RegClass == ARM::rGPRRegClassID) { // rGPRRegClass excludes PC, and also excluded SP before ARMv8 const auto &Op = Inst.getOperand(I); if (!Op.isReg()) { diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp --- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -887,9 +887,9 @@ MCInst::iterator I = MI.begin(); for (unsigned i = 0; i < MCID.NumOperands; ++i, ++I) { if (I == MI.end()) break; - if (MCID.OpInfo[i].isOptionalDef() && - MCID.OpInfo[i].RegClass == ARM::CCRRegClassID) { - if (i > 0 && MCID.OpInfo[i - 1].isPredicate()) + if (MCID.operands()[i].isOptionalDef() && + MCID.operands()[i].RegClass == ARM::CCRRegClassID) { + if (i > 0 && MCID.operands()[i - 1].isPredicate()) continue; MI.insert(I, MCOperand::createReg(InITBlock ? 0 : ARM::CPSR)); return; @@ -902,7 +902,7 @@ bool ARMDisassembler::isVectorPredicable(const MCInst &MI) const { const MCInstrDesc &MCID = MCII->get(MI.getOpcode()); for (unsigned i = 0; i < MCID.NumOperands; ++i) { - if (ARM::isVpred(MCID.OpInfo[i].OperandType)) + if (ARM::isVpred(MCID.operands()[i].OperandType)) return true; } return false; @@ -981,7 +981,7 @@ MCInst::iterator CCI = MI.begin(); for (unsigned i = 0; i < MCID.NumOperands; ++i, ++CCI) { - if (MCID.OpInfo[i].isPredicate() || CCI == MI.end()) + if (MCID.operands()[i].isPredicate() || CCI == MI.end()) break; } @@ -999,7 +999,7 @@ MCInst::iterator VCCI = MI.begin(); unsigned VCCPos; for (VCCPos = 0; VCCPos < MCID.NumOperands; ++VCCPos, ++VCCI) { - if (ARM::isVpred(MCID.OpInfo[VCCPos].OperandType) || VCCI == MI.end()) + if (ARM::isVpred(MCID.operands()[VCCPos].OperandType) || VCCI == MI.end()) break; } @@ -1013,7 +1013,7 @@ ++VCCI; VCCI = MI.insert(VCCI, MCOperand::createReg(0)); ++VCCI; - if (MCID.OpInfo[VCCPos].OperandType == ARM::OPERAND_VPRED_R) { + if (MCID.operands()[VCCPos].OperandType == ARM::OPERAND_VPRED_R) { int TiedOp = MCID.getOperandConstraint(VCCPos + 3, MCOI::TIED_TO); assert(TiedOp >= 0 && "Inactive register in vpred_r is not tied to an output!"); @@ -1046,7 +1046,7 @@ } const MCInstrDesc &MCID = MCII->get(MI.getOpcode()); - const MCOperandInfo *OpInfo = MCID.OpInfo; + ArrayRef OpInfo = MCID.operands(); MCInst::iterator I = MI.begin(); unsigned short NumOps = MCID.NumOperands; for (unsigned i = 0; i < NumOps; ++i, ++I) { diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -178,7 +178,7 @@ for (unsigned I = 0; I < MI.getNumOperands(); ++I) { const MCOperand &MO = MI.getOperand(I); if (MO.isReg() && MO.getReg() == ARM::CPSR && - Desc.OpInfo[I].isOptionalDef()) + Desc.operands()[I].isOptionalDef()) return true; } return false; @@ -422,7 +422,7 @@ // Find the PC-relative immediate operand in the instruction. for (unsigned OpNum = 0; OpNum < Desc.getNumOperands(); ++OpNum) { if (Inst.getOperand(OpNum).isImm() && - Desc.OpInfo[OpNum].OperandType == MCOI::OPERAND_PCREL) { + Desc.operands()[OpNum].OperandType == MCOI::OPERAND_PCREL) { int64_t Imm = Inst.getOperand(OpNum).getImm(); Target = ARM_MC::evaluateBranchTarget(Desc, Addr, Imm); return true; @@ -578,7 +578,7 @@ // Find the memory addressing operand in the instruction. unsigned OpIndex = Desc.NumDefs; while (OpIndex < Desc.getNumOperands() && - Desc.OpInfo[OpIndex].OperandType != MCOI::OPERAND_MEMORY) + Desc.operands()[OpIndex].OperandType != MCOI::OPERAND_MEMORY) ++OpIndex; if (OpIndex == Desc.getNumOperands()) return std::nullopt; diff --git a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp --- a/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp +++ b/llvm/lib/Target/ARM/Thumb2InstrInfo.cpp @@ -776,11 +776,8 @@ int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) { const MCInstrDesc &MCID = MI.getDesc(); - if (!MCID.OpInfo) - return -1; - for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) - if (ARM::isVpred(MCID.OpInfo[i].OperandType)) + if (ARM::isVpred(MCID.operands()[i].OperandType)) return i; return -1; diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -839,9 +839,9 @@ // Transfer the rest of operands. unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.operands()[i].isOptionalDef()) continue; - if (SkipPred && MCID.OpInfo[i].isPredicate()) + if (SkipPred && MCID.operands()[i].isPredicate()) continue; MIB.add(MI->getOperand(i)); } @@ -875,7 +875,7 @@ const MCInstrDesc &MCID = MI->getDesc(); for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { - if (MCID.OpInfo[i].isPredicate()) + if (MCID.operands()[i].isPredicate()) continue; const MachineOperand &MO = MI->getOperand(i); if (MO.isReg()) { @@ -884,8 +884,7 @@ continue; if (Entry.LowRegs1 && !isARMLowRegister(Reg)) return false; - } else if (MO.isImm() && - !MCID.OpInfo[i].isPredicate()) { + } else if (MO.isImm() && !MCID.operands()[i].isPredicate()) { if (((unsigned)MO.getImm()) > Limit) return false; } @@ -946,7 +945,7 @@ // Transfer the rest of operands. unsigned NumOps = MCID.getNumOperands(); for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - if (i < NumOps && MCID.OpInfo[i].isOptionalDef()) + if (i < NumOps && MCID.operands()[i].isOptionalDef()) continue; if ((MCID.getOpcode() == ARM::t2RSBSri || MCID.getOpcode() == ARM::t2RSBri || @@ -956,7 +955,7 @@ MCID.getOpcode() == ARM::t2UXTH) && i == 2) // Skip the zero immediate operand, it's now implicit. continue; - bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate()); + bool isPred = (i < NumOps && MCID.operands()[i].isPredicate()); if (SkipPred && isPred) continue; const MachineOperand &MO = MI->getOperand(i); diff --git a/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp b/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp --- a/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp +++ b/llvm/lib/Target/AVR/MCTargetDesc/AVRInstPrinter.cpp @@ -100,7 +100,7 @@ void AVRInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).OpInfo[OpNo]; + const MCOperandInfo &MOI = this->MII.get(MI->getOpcode()).operands()[OpNo]; if (MOI.RegClass == AVR::ZREGRegClassID) { // Special case for the Z register, which sometimes doesn't have an operand // in the MCInst. diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp @@ -480,7 +480,7 @@ MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, *ProducerInst); const unsigned ProducerOpIndex = std::get<1>(Producer); - if (Desc.OpInfo[ProducerOpIndex].RegClass == + if (Desc.operands()[ProducerOpIndex].RegClass == Hexagon::DoubleRegsRegClassID) { reportNote(ProducerInst->getLoc(), "Double registers cannot be new-value producers"); diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCCodeEmitter.cpp @@ -773,7 +773,9 @@ assert(!MO.isImm()); if (MO.isReg()) { unsigned Reg = MO.getReg(); - switch (HexagonMCInstrInfo::getDesc(MCII, MI).OpInfo[OperandNumber].RegClass) { + switch (HexagonMCInstrInfo::getDesc(MCII, MI) + .operands()[OperandNumber] + .RegClass) { case GeneralSubRegsRegClassID: case GeneralDoubleLow8RegsRegClassID: return HexagonMCInstrInfo::getDuplexRegisterNumbering(Reg); diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp @@ -762,7 +762,7 @@ MCInstrDesc const &Desc = HexagonMCInstrInfo::getDesc(MCII, Inst); return Inst.getOperand(I).isReg() && - Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID; + Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID; } /// Return whether the insn can be packaged only with A and X-type insns. @@ -932,7 +932,7 @@ return {0, 0, false}; MCInstrDesc const &Desc = getDesc(MCII, MCI); for (auto I = Desc.getNumDefs(), N = Desc.getNumOperands(); I != N; ++I) - if (Desc.OpInfo[I].RegClass == Hexagon::PredRegsRegClassID) + if (Desc.operands()[I].RegClass == Hexagon::PredRegsRegClassID) return {MCI.getOperand(I).getReg(), I, isPredicatedTrue(MCII, MCI)}; return {0, 0, false}; } diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -514,7 +514,7 @@ // Copy all the DefMI operands, excluding its (null) predicate. const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); - i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) + i != e && !DefDesc.operands()[i].isPredicate(); ++i) NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); diff --git a/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp b/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp --- a/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp +++ b/llvm/lib/Target/Lanai/MCTargetDesc/LanaiMCTargetDesc.cpp @@ -102,7 +102,7 @@ !isCall(Inst)) return false; - if (Info->get(Inst.getOpcode()).OpInfo[0].OperandType == + if (Info->get(Inst.getOpcode()).operands()[0].OperandType == MCOI::OPERAND_PCREL) { int64_t Imm = Inst.getOperand(0).getImm(); Target = Addr + Size + Imm; diff --git a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp --- a/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -1834,7 +1834,7 @@ if (NumOp != 3 && NumOp != 4) return false; - const MCOperandInfo &OpInfo = MCID.OpInfo[NumOp - 1]; + const MCOperandInfo &OpInfo = MCID.operands()[NumOp - 1]; if (OpInfo.OperandType != MCOI::OPERAND_MEMORY && OpInfo.OperandType != MCOI::OPERAND_UNKNOWN && OpInfo.OperandType != MipsII::OPERAND_MEM_SIMM9) @@ -2148,7 +2148,7 @@ // Check the offset of memory operand, if it is a symbol // reference or immediate we may have to expand instructions. if (needsExpandMemInst(Inst, MCID)) { - switch (MCID.OpInfo[MCID.getNumOperands() - 1].OperandType) { + switch (MCID.operands()[MCID.getNumOperands() - 1].OperandType) { case MipsII::OPERAND_MEM_SIMM9: expandMem9Inst(Inst, IDLoc, Out, STI, MCID.mayLoad()); break; @@ -2164,7 +2164,7 @@ if (MCID.mayLoad() && Opcode != Mips::LWP_MM) { // Try to create 16-bit GP relative load instruction. for (unsigned i = 0; i < MCID.getNumOperands(); i++) { - const MCOperandInfo &OpInfo = MCID.OpInfo[i]; + const MCOperandInfo &OpInfo = MCID.operands()[i]; if ((OpInfo.OperandType == MCOI::OPERAND_MEMORY) || (OpInfo.OperandType == MCOI::OPERAND_UNKNOWN)) { MCOperand &Op = Inst.getOperand(i); @@ -3684,7 +3684,7 @@ unsigned TmpReg = DstReg; const MCInstrDesc &Desc = MII.get(OpCode); - int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass; + int16_t DstRegClass = Desc.operands()[StartOp].RegClass; unsigned DstRegClassID = getContext().getRegisterInfo()->getRegClass(DstRegClass).getID(); bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) || @@ -3811,7 +3811,7 @@ unsigned TmpReg = DstReg; const MCInstrDesc &Desc = MII.get(OpCode); - int16_t DstRegClass = Desc.OpInfo[StartOp].RegClass; + int16_t DstRegClass = Desc.operands()[StartOp].RegClass; unsigned DstRegClassID = getContext().getRegisterInfo()->getRegClass(DstRegClass).getID(); bool IsGPR = (DstRegClassID == Mips::GPR32RegClassID) || diff --git a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp --- a/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ b/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -143,7 +143,7 @@ unsigned NumOps = Inst.getNumOperands(); if (NumOps == 0) return false; - switch (Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType) { + switch (Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType) { case MCOI::OPERAND_UNKNOWN: case MCOI::OPERAND_IMMEDIATE: { // j, jal, jalx, jals diff --git a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp --- a/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/llvm/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -385,7 +385,7 @@ uint64_t &Target) const override { unsigned NumOps = Inst.getNumOperands(); if (NumOps == 0 || - Info->get(Inst.getOpcode()).OpInfo[NumOps - 1].OperandType != + Info->get(Inst.getOpcode()).operands()[NumOps - 1].OperandType != MCOI::OPERAND_PCREL) return false; Target = Addr + Inst.getOperand(NumOps - 1).getImm() * Size; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.h b/llvm/lib/Target/PowerPC/PPCInstrInfo.h --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.h +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.h @@ -794,7 +794,7 @@ /// operands). static unsigned getRegNumForOperand(const MCInstrDesc &Desc, unsigned Reg, unsigned OpNo) { - int16_t regClass = Desc.OpInfo[OpNo].RegClass; + int16_t regClass = Desc.operands()[OpNo].RegClass; switch (regClass) { // We store F0-F31, VF0-VF31 in MCOperand and it should be F0-F31, // VSX32-VSX63 during encoding/disassembling diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -2092,7 +2092,7 @@ assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI"); assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"); - const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx]; + const MCOperandInfo *UseInfo = &UseMCID.operands()[UseIdx]; // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0 // register (which might also be specified as a pointer class kind). diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp --- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp +++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVInstPrinter.cpp @@ -85,8 +85,8 @@ const unsigned NumFixedOps = MCDesc.getNumOperands(); const unsigned LastFixedIndex = NumFixedOps - 1; const int FirstVariableIndex = NumFixedOps; - if (NumFixedOps > 0 && - MCDesc.OpInfo[LastFixedIndex].OperandType == MCOI::OPERAND_UNKNOWN) { + if (NumFixedOps > 0 && MCDesc.operands()[LastFixedIndex].OperandType == + MCOI::OPERAND_UNKNOWN) { // For instructions where a custom type (not reg or immediate) comes as // the last operand before the variable_ops. This is usually a StringImm // operand, but there are a few other cases. diff --git a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp --- a/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp +++ b/llvm/lib/Target/SPIRV/MCTargetDesc/SPIRVMCCodeEmitter.cpp @@ -65,11 +65,11 @@ // If we define an output, and have at least one other argument. if (MCDesc.getNumDefs() == 1 && MCDesc.getNumOperands() >= 2) { // Check if we define an ID, and take a type as operand 1. - auto DefOpInfo = MCDesc.opInfo_begin(); - auto FirstArgOpInfo = MCDesc.opInfo_begin() + 1; - return (DefOpInfo->RegClass == SPIRV::IDRegClassID || - DefOpInfo->RegClass == SPIRV::ANYIDRegClassID) && - FirstArgOpInfo->RegClass == SPIRV::TYPERegClassID; + auto &DefOpInfo = MCDesc.operands()[0]; + auto &FirstArgOpInfo = MCDesc.operands()[1]; + return (DefOpInfo.RegClass == SPIRV::IDRegClassID || + DefOpInfo.RegClass == SPIRV::ANYIDRegClassID) && + FirstArgOpInfo.RegClass == SPIRV::TYPERegClassID; } return false; } diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -1204,7 +1204,7 @@ // to FP conversion. const MCInstrDesc &MCID = MI.getDesc(); for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) { - const MCOperandInfo &MCOI = MCID.OpInfo[I]; + const MCOperandInfo &MCOI = MCID.operands()[I]; if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum) continue; const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass); @@ -1999,7 +1999,7 @@ if (I >= MCID.getNumOperands()) break; const MachineOperand &Op = MI.getOperand(I); - const MCOperandInfo &MCOI = MCID.OpInfo[I]; + const MCOperandInfo &MCOI = MCID.operands()[I]; // Addressing modes have register and immediate operands. Op should be a // register (or frame index) operand if MCOI.RegClass contains a valid // register class, or an immediate otherwise. diff --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp --- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp +++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmTypeCheck.cpp @@ -331,7 +331,7 @@ const auto &II = MII.get(RegOpc); // First pop all the uses off the stack and check them. for (unsigned I = II.getNumOperands(); I > II.getNumDefs(); I--) { - const auto &Op = II.OpInfo[I - 1]; + const auto &Op = II.operands()[I - 1]; if (Op.OperandType == MCOI::OPERAND_REGISTER) { auto VT = WebAssembly::regClassToValType(Op.RegClass); if (popType(ErrorLoc, VT)) @@ -340,7 +340,7 @@ } // Now push all the defs onto the stack. for (unsigned I = 0; I < II.getNumDefs(); I++) { - const auto &Op = II.OpInfo[I]; + const auto &Op = II.operands()[I]; assert(Op.OperandType == MCOI::OPERAND_REGISTER && "Register expected"); auto VT = WebAssembly::regClassToValType(Op.RegClass); Stack.push_back(VT); diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyInstPrinter.cpp @@ -240,7 +240,7 @@ // See if this operand denotes a basic block target. if (I < NumFixedOperands) { // A non-variable_ops operand, check its type. - if (Desc.OpInfo[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) + if (Desc.operands()[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) continue; } else { // A variable_ops operand, which currently can be immediates (used in diff --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp --- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp +++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCCodeEmitter.cpp @@ -92,7 +92,7 @@ } else if (MO.isImm()) { if (I < Desc.getNumOperands()) { - const MCOperandInfo &Info = Desc.OpInfo[I]; + const MCOperandInfo &Info = Desc.operands()[I]; LLVM_DEBUG(dbgs() << "Encoding immediate: type=" << int(Info.OperandType) << "\n"); switch (Info.OperandType) { @@ -134,7 +134,7 @@ uint64_t D = MO.getDFPImm(); support::endian::write(OS, D, support::little); } else if (MO.isExpr()) { - const MCOperandInfo &Info = Desc.OpInfo[I]; + const MCOperandInfo &Info = Desc.operands()[I]; llvm::MCFixupKind FixupKind; size_t PaddedSize = 5; switch (Info.OperandType) { diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -194,7 +194,7 @@ case MachineOperand::MO_Immediate: { unsigned DescIndex = I - NumVariadicDefs; if (DescIndex < Desc.NumOperands) { - const MCOperandInfo &Info = Desc.OpInfo[DescIndex]; + const MCOperandInfo &Info = Desc.operands()[DescIndex]; if (Info.OperandType == WebAssembly::OPERAND_TYPEINDEX) { SmallVector Returns; SmallVector Params; diff --git a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp --- a/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp @@ -62,7 +62,7 @@ assert((*MI.memoperands_begin())->getSize() == (UINT64_C(1) << WebAssembly::GetDefaultP2Align(MI.getOpcode())) && "Default p2align value should be natural"); - assert(MI.getDesc().OpInfo[OperandNo].OperandType == + assert(MI.getDesc().operands()[OperandNo].OperandType == WebAssembly::OPERAND_P2ALIGN && "Load and store instructions should have a p2align operand"); uint64_t P2Align = Log2((*MI.memoperands_begin())->getAlign()); diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -630,7 +630,8 @@ bool X86MCInstrAnalysis::evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const { if (Inst.getNumOperands() == 0 || - Info->get(Inst.getOpcode()).OpInfo[0].OperandType != MCOI::OPERAND_PCREL) + Info->get(Inst.getOpcode()).operands()[0].OperandType != + MCOI::OPERAND_PCREL) return false; Target = Addr + Size + Inst.getOperand(0).getImm(); return true; diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -2134,7 +2134,7 @@ const MachineOperand &MaskOp = MI->getOperand(MaskIdx); if (auto *C = getConstantFromPool(*MI, MaskOp)) { - unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); + unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]); SmallVector Mask; DecodePSHUFBMask(C, Width, Mask); if (!Mask.empty()) @@ -2212,7 +2212,7 @@ const MachineOperand &MaskOp = MI->getOperand(MaskIdx); if (auto *C = getConstantFromPool(*MI, MaskOp)) { - unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); + unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]); SmallVector Mask; DecodeVPERMILPMask(C, ElSize, Width, Mask); if (!Mask.empty()) @@ -2241,7 +2241,7 @@ const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp); if (auto *C = getConstantFromPool(*MI, MaskOp)) { - unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); + unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]); SmallVector Mask; DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Width, Mask); if (!Mask.empty()) @@ -2256,7 +2256,7 @@ const MachineOperand &MaskOp = MI->getOperand(3 + X86::AddrDisp); if (auto *C = getConstantFromPool(*MI, MaskOp)) { - unsigned Width = getRegisterWidth(MI->getDesc().OpInfo[0]); + unsigned Width = getRegisterWidth(MI->getDesc().operands()[0]); SmallVector Mask; DecodeVPPERMMask(C, Width, Mask); if (!Mask.empty()) diff --git a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp --- a/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp +++ b/llvm/tools/llvm-exegesis/lib/MCInstrDescView.cpp @@ -111,7 +111,7 @@ SmallVector Operands; SmallVector Variables; for (; OpIndex < Description->getNumOperands(); ++OpIndex) { - const auto &OpInfo = Description->opInfo_begin()[OpIndex]; + const auto &OpInfo = Description->operands()[OpIndex]; Operand Operand; Operand.Index = OpIndex; Operand.IsDef = (OpIndex < Description->getNumDefs());