Index: include/llvm/Target/Target.td =================================================================== --- include/llvm/Target/Target.td +++ include/llvm/Target/Target.td @@ -619,6 +619,16 @@ /// /// Optional arguments must be at the end of the operand list. bit IsOptional = 0; + + /// The name of the method on the target specific asm parser that return + /// default operand for this optional operand. This method have impact only + /// if IsOptional == 0. If not set, this will default to + /// "defaultFooOperands", where Foo is the AsmOperandClass name. The method + /// signature should be: + /// std::unique_ptr defaultFooOperands( + /// unsigned Opcode, + /// const OperandVector &Operands) const; + string DefaultMethod = ?; } def ImmAsmOperand : AsmOperandClass { Index: lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -13,6 +13,7 @@ #include "Utils/AArch64BaseInfo.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -15,6 +15,7 @@ #include "Utils/AMDKernelCodeTUtils.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" @@ -58,6 +59,8 @@ MCContext *Ctx; + typedef std::unique_ptr Ptr; + enum ImmTy { ImmTyNone, ImmTyDSOffset0, @@ -355,9 +358,9 @@ } } - static std::unique_ptr CreateImm(int64_t Val, SMLoc Loc, - enum ImmTy Type = ImmTyNone, - bool IsFPImm = false) { + static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc, + enum ImmTy Type = ImmTyNone, + bool IsFPImm = false) { auto Op = llvm::make_unique(Immediate); Op->Imm.Val = Val; Op->Imm.IsFPImm = IsFPImm; @@ -368,8 +371,8 @@ return Op; } - static std::unique_ptr CreateToken(StringRef Str, SMLoc Loc, - bool HasExplicitEncodingSize = true) { + static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc, + bool HasExplicitEncodingSize = true) { auto Res = llvm::make_unique(Token); Res->Tok.Data = Str.data(); Res->Tok.Length = Str.size(); @@ -378,11 +381,11 @@ return Res; } - static std::unique_ptr CreateReg(unsigned RegNo, SMLoc S, - SMLoc E, - const MCRegisterInfo *TRI, - const MCSubtargetInfo *STI, - bool ForceVOP3) { + static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S, + SMLoc E, + const MCRegisterInfo *TRI, + const MCSubtargetInfo *STI, + bool ForceVOP3) { auto Op = llvm::make_unique(Register); Op->Reg.RegNo = RegNo; Op->Reg.TRI = TRI; @@ -394,7 +397,7 @@ return Op; } - static std::unique_ptr CreateExpr(const class MCExpr *Expr, SMLoc S) { + static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) { auto Op = llvm::make_unique(Expression); Op->Expr = Expr; Op->StartLoc = S; @@ -531,8 +534,6 @@ OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands); OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands); - void cvtFlat(MCInst &Inst, const OperandVector &Operands); - void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands); void cvtMubuf(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseOffset(OperandVector &Operands); @@ -540,17 +541,34 @@ OperandMatchResultTy parseGLC(OperandVector &Operands); OperandMatchResultTy parseSLC(OperandVector &Operands); OperandMatchResultTy parseTFE(OperandVector &Operands); + AMDGPUOperand::Ptr defaultMubufOffset(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultGLC(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultSLC(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultTFE(unsigned Opcode, + const OperandVector &Operands) const; + OperandMatchResultTy parseDMask(OperandVector &Operands); OperandMatchResultTy parseUNorm(OperandVector &Operands); OperandMatchResultTy parseDA(OperandVector &Operands); OperandMatchResultTy parseR128(OperandVector &Operands); OperandMatchResultTy parseLWE(OperandVector &Operands); + AMDGPUOperand::Ptr defaultDMask(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultUNorm(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultDA(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultR128(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultLWE(unsigned Opcode, + const OperandVector &Operands) const; void cvtId(MCInst &Inst, const OperandVector &Operands); void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands); - void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands); - void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands); void cvtVOP3(MCInst &Inst, const OperandVector &Operands); void cvtMIMG(MCInst &Inst, const OperandVector &Operands); @@ -559,9 +577,13 @@ OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands); OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands); - void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands); - void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands); - void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods); + AMDGPUOperand::Ptr defaultRowMask(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultBankMask(unsigned Opcode, + const OperandVector &Operands) const; + AMDGPUOperand::Ptr defaultBoundCtrl(unsigned Opcode, + const OperandVector &Operands) const; + void cvtDPP(MCInst &Inst, const OperandVector &Operands); }; struct OptionalOperand { @@ -1560,52 +1582,6 @@ return parseOptionalOps(FlatAtomicOptionalOps, Operands); } -void AMDGPUAsmParser::cvtFlat(MCInst &Inst, - const OperandVector &Operands) { - OptionalImmIndexMap OptionalIdx; - - for (unsigned i = 1, e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); - - // Add the register arguments - if (Op.isReg()) { - Op.addRegOperands(Inst, 1); - continue; - } - - OptionalIdx[Op.getImmTy()] = i; - } - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); -} - - -void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst, - const OperandVector &Operands) { - OptionalImmIndexMap OptionalIdx; - - for (unsigned i = 1, e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); - - // Add the register arguments - if (Op.isReg()) { - Op.addRegOperands(Inst, 1); - continue; - } - - // Handle 'glc' token for flat atomics. - if (Op.isToken()) { - continue; - } - - // Handle optional arguments - OptionalIdx[Op.getImmTy()] = i; - } - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); -} - //===----------------------------------------------------------------------===// // mubuf //===----------------------------------------------------------------------===// @@ -1646,6 +1622,30 @@ return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyOffset); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyGLC); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTySLC); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyTFE); +} + void AMDGPUAsmParser::cvtMubuf(MCInst &Inst, const OperandVector &Operands) { OptionalImmIndexMap OptionalIdx; @@ -1711,6 +1711,36 @@ return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyDMask); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyUNorm); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyDA); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyR128); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyLWE); +} + //===----------------------------------------------------------------------===// // smrd //===----------------------------------------------------------------------===// @@ -1831,18 +1861,6 @@ } } -void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) { - if (operandsHaveModifiers(Operands)) { - cvtVOP3(Inst, Operands); - } else { - cvtId(Inst, Operands); - } -} - -void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) { - cvtVOP3(Inst, Operands); -} - void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { OptionalImmIndexMap OptionalIdx; unsigned I = 1; @@ -2041,16 +2059,25 @@ return Res; } -void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) { - cvtDPP(Inst, Operands, true); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0xf, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyDppRowMask); } -void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) { - cvtDPP(Inst, Operands, false); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0xf, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyDppBankMask); } -void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, - bool HasMods) { +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl(unsigned Opcode, + const OperandVector &Operands) const { + return AMDGPUOperand::CreateImm(0, Operands[0]->getStartLoc(), + AMDGPUOperand::ImmTyDppBoundCtrl); +} + +void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) { OptionalImmIndexMap OptionalIdx; unsigned I = 1; @@ -2062,9 +2089,8 @@ for (unsigned E = Operands.size(); I != E; ++I) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); // Add the register arguments - if (!HasMods && Op.isReg()) { - Op.addRegOperands(Inst, 1); - } else if (HasMods && Op.isRegOrImmWithInputMods()) { + if (Op.isRegOrImmWithInputMods()) { + // We convert only instructions with modifiers Op.addRegOrImmWithInputModsOperands(Inst, 2); } else if (Op.isDPPCtrl()) { Op.addImmOperands(Inst, 1); Index: lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- lib/Target/AMDGPU/SIInstrFormats.td +++ lib/Target/AMDGPU/SIInstrFormats.td @@ -145,8 +145,9 @@ let AsmMatchConverter = !if(!eq(VOP3Only,1), - "cvtVOP3_only", - !if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod")); + "cvtVOP3", + !if(!eq(HasMods,1), "cvtVOP3_2_mod", "")); + let isCodeGenOnly = 0; int Size = 8; @@ -708,7 +709,6 @@ let UseNamedOperandTable = 1; let hasSideEffects = 0; - let AsmMatchConverter = "cvtFlat"; let SchedRW = [WriteVMEM]; } Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -440,6 +440,7 @@ let ParserMethod = "parseMubufOptionalOps"; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "defaultMubufOffset"; } class DSOffsetBaseMatchClass : AsmOperandClass { @@ -478,6 +479,7 @@ let ParserMethod = parser; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "defaultGLC"; } def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">; @@ -489,6 +491,7 @@ let ParserMethod = parser; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "defaultSLC"; } def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">; @@ -501,6 +504,7 @@ let ParserMethod = parser; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "defaultTFE"; } def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">; @@ -548,6 +552,7 @@ let ParserMethod = "parseDPPOptionalOps"; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "default"#OpName; } class OptionalImmAsmOperand : AsmOperandClass { @@ -563,6 +568,7 @@ let ParserMethod = "parseDMask"; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "defaultDMask"; } class NamedBitMatchClass : AsmOperandClass { @@ -571,6 +577,7 @@ let ParserMethod = "parse"#BitName; let RenderMethod = "addImmOperands"; let IsOptional = 1; + let DefaultMethod = "default"#BitName; } class NamedBitOperand : Operand { @@ -3074,7 +3081,7 @@ dag outs_noret = (outs), string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> { - let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in { + let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in { def "" : FLAT_Pseudo , @@ -3091,7 +3098,7 @@ asm_noret>; } - let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in { + let glc = 1, hasPostISelHook = 1 in { defm _RTN : FLAT_AtomicRet_m ParserMethod = ""; Entry->DiagnosticType = ""; Entry->IsOptional = false; + Entry->DefaultMethod = ""; } return Entry; @@ -1255,6 +1259,7 @@ // FIXME: diagnostic type. CI->DiagnosticType = ""; CI->IsOptional = false; + CI->DefaultMethod = ""; // unused RegisterSetClasses.insert(std::make_pair(RS, CI)); ++Index; } @@ -1373,6 +1378,15 @@ if (BitInit *BI = dyn_cast(IsOptional)) CI->IsOptional = BI->getValue(); + // Get or construct the default method name. + Init *DMName = Rec->getValueInit("DefaultMethod"); + if (StringInit *SI = dyn_cast(DMName)) { + CI->DefaultMethod = SI->getValue(); + } else { + assert(isa(DMName) && "Unexpected DefaultMethod field!"); + CI->DefaultMethod = "default" + CI->ClassName + "Operands"; + } + ++Index; } } @@ -1827,20 +1841,23 @@ CvtOS << "void " << Target.getName() << ClassName << "::\n" << "convertToMCInst(unsigned Kind, MCInst &Inst, " << "unsigned Opcode,\n" - << " const OperandVector" - << " &Operands) {\n" + << " const OperandVector &Operands,\n" + << " const SmallBitVector &OptionalOperandsMask) {\n" << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n" << " const uint8_t *Converter = ConversionTable[Kind];\n" + << " unsigned NumDefaults = 0;\n" + << " unsigned OpIdx;\n" << " Inst.setOpcode(Opcode);\n" << " for (const uint8_t *p = Converter; *p; p+= 2) {\n" + << " OpIdx = *(p + 1) - NumDefaults;\n" << " switch (*p) {\n" << " default: llvm_unreachable(\"invalid conversion entry!\");\n" << " case CVT_Reg:\n" << " static_cast<" << TargetOperandClass - << "&>(*Operands[*(p + 1)]).addRegOperands(Inst, 1);\n" + << "&>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n" << " break;\n" << " case CVT_Tied:\n" - << " Inst.addOperand(Inst.getOperand(*(p + 1)));\n" + << " Inst.addOperand(Inst.getOperand(OpIdx));\n" << " break;\n"; std::string OperandFnBody; @@ -1935,6 +1952,10 @@ // the index of its entry in the vector). std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" : Op.Class->RenderMethod); + if (Op.Class->IsOptional) { + // For optional operands we must also care about DefaultMethod + Name += "_" + Op.Class->DefaultMethod; + } Name = getEnumNameForToken(Name); bool IsNewConverter = false; @@ -1950,11 +1971,26 @@ // This is a new operand kind. Add a handler for it to the // converter driver. - CvtOS << " case " << Name << ":\n" - << " static_cast<" << TargetOperandClass - << "&>(*Operands[*(p + 1)])." << Op.Class->RenderMethod - << "(Inst, " << OpInfo.MINumOperands << ");\n" - << " break;\n"; + CvtOS << " case " << Name << ":\n"; + if (Op.Class->IsOptional) { + // If optional operand is not present in actual instruction then we + // should call its DefaultMethod before RenderMethod + CvtOS << " if (OptionalOperandsMask[*(p + 1) - 1]) {\n" + << " " << Op.Class->DefaultMethod << "(Opcode, Operands)" + << "->" << Op.Class->RenderMethod << "(Inst, " + << OpInfo.MINumOperands << ");\n" + << " ++NumDefaults;\n" + << " } else {\n" + << " static_cast<" << TargetOperandClass + << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod + << "(Inst, " << OpInfo.MINumOperands << ");\n" + << " }\n"; + } else { + CvtOS << " static_cast<" << TargetOperandClass + << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod + << "(Inst, " << OpInfo.MINumOperands << ");\n"; + } + CvtOS << " break;\n"; // Add a handler for the operand number lookup. OpOS << " case " << Name << ":\n" @@ -2814,8 +2850,8 @@ OS << " uint64_t ComputeAvailableFeatures(const FeatureBitset& FB) const;\n"; OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " << "unsigned Opcode,\n" - << " const OperandVector " - << "&Operands);\n"; + << " const OperandVector &Operands,\n" + << " const SmallBitVector &OptionalOperandsMask);\n"; OS << " void convertToMapAndConstraints(unsigned Kind,\n "; OS << " const OperandVector &Operands) override;\n"; if (HasMnemonicFirst) @@ -3064,6 +3100,7 @@ OS << " bool HadMatchOtherThanPredicate = false;\n"; OS << " unsigned RetCode = Match_InvalidOperand;\n"; OS << " uint64_t MissingFeatures = ~0ULL;\n"; + OS << " SmallBitVector OptionalOperandsMask(" << MaxNumOperands << ");\n"; OS << " // Set ErrorInfo to the operand that mismatches if it is\n"; OS << " // wrong for all instances of the instruction.\n"; OS << " ErrorInfo = ~0ULL;\n"; @@ -3108,6 +3145,7 @@ // Emit check that the subclasses match. OS << " bool OperandsValid = true;\n"; + OS << " OptionalOperandsMask.reset(0, " << MaxNumOperands << ");\n"; OS << " for (unsigned FormalIdx = " << (HasMnemonicFirst ? "0" : "SIndex") << ", ActualIdx = " << (HasMnemonicFirst ? "1" : "SIndex") << "; FormalIdx != " << MaxNumOperands << "; ++FormalIdx) {\n"; @@ -3117,6 +3155,8 @@ OS << " OperandsValid = (Formal == " <<"InvalidMatchClass) || " "isSubclass(Formal, OptionalMatchClass);\n"; OS << " if (!OperandsValid) ErrorInfo = ActualIdx;\n"; + OS << " OptionalOperandsMask.set(FormalIdx, " << MaxNumOperands + << ");\n"; OS << " break;\n"; OS << " }\n"; OS << " MCParsedAsmOperand &Actual = *Operands[ActualIdx];\n"; @@ -3137,8 +3177,10 @@ OS << " // If current formal operand wasn't matched and it is optional\n" << " // then try to match next formal operand\n"; OS << " if (Diag == Match_InvalidOperand " - << "&& isSubclass(Formal, OptionalMatchClass))\n"; + << "&& isSubclass(Formal, OptionalMatchClass)) {\n"; + OS << " OptionalOperandsMask.set(FormalIdx);\n"; OS << " continue;\n"; + OS << " }\n"; OS << " // If this operand is broken for all of the instances of this\n"; OS << " // mnemonic, keep track of it so we can report loc info.\n"; OS << " // If we already had a match that only failed due to a\n"; @@ -3177,7 +3219,8 @@ OS << " }\n\n"; OS << " // We have selected a definite instruction, convert the parsed\n" << " // operands into the appropriate MCInst.\n"; - OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n"; + OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n" + << " OptionalOperandsMask);\n"; OS << "\n"; // Verify the instruction with the target-specific match predicate function.