Index: llvm/trunk/include/llvm/Target/Target.td =================================================================== --- llvm/trunk/include/llvm/Target/Target.td +++ llvm/trunk/include/llvm/Target/Target.td @@ -619,6 +619,13 @@ /// /// Optional arguments must be at the end of the operand list. bit IsOptional = 0; + + /// The name of the method on the target specific asm parser that returns the + /// default operand for this optional operand. This method is only used if + /// IsOptional == 1. If not set, this will default to "defaultFooOperands", + /// where Foo is the AsmOperandClass name. The method signature should be: + /// std::unique_ptr defaultFooOperands() const; + string DefaultMethod = ?; } def ImmAsmOperand : AsmOperandClass { Index: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -15,6 +15,7 @@ #include "Utils/AMDKernelCodeTUtils.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Twine.h" @@ -59,6 +60,8 @@ MCContext *Ctx; + typedef std::unique_ptr Ptr; + enum ImmTy { ImmTyNone, ImmTyGDS, @@ -390,9 +393,9 @@ } } - static std::unique_ptr CreateImm(int64_t Val, SMLoc Loc, - enum ImmTy Type = ImmTyNone, - bool IsFPImm = false) { + static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc, + enum ImmTy Type = ImmTyNone, + bool IsFPImm = false) { auto Op = llvm::make_unique(Immediate); Op->Imm.Val = Val; Op->Imm.IsFPImm = IsFPImm; @@ -403,8 +406,8 @@ return Op; } - static std::unique_ptr CreateToken(StringRef Str, SMLoc Loc, - bool HasExplicitEncodingSize = true) { + static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc, + bool HasExplicitEncodingSize = true) { auto Res = llvm::make_unique(Token); Res->Tok.Data = Str.data(); Res->Tok.Length = Str.size(); @@ -413,11 +416,11 @@ return Res; } - static std::unique_ptr CreateReg(unsigned RegNo, SMLoc S, - SMLoc E, - const MCRegisterInfo *TRI, - const MCSubtargetInfo *STI, - bool ForceVOP3) { + static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S, + SMLoc E, + const MCRegisterInfo *TRI, + const MCSubtargetInfo *STI, + bool ForceVOP3) { auto Op = llvm::make_unique(Register); Op->Reg.RegNo = RegNo; Op->Reg.TRI = TRI; @@ -429,7 +432,7 @@ return Op; } - static std::unique_ptr CreateExpr(const class MCExpr *Expr, SMLoc S) { + static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) { auto Op = llvm::make_unique(Expression); Op->Expr = Expr; Op->StartLoc = S; @@ -568,11 +571,15 @@ bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier); OperandMatchResultTy parseHwreg(OperandVector &Operands); OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands); + AMDGPUOperand::Ptr defaultHwreg() const; - void cvtFlat(MCInst &Inst, const OperandVector &Operands); - void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands); void cvtMubuf(MCInst &Inst, const OperandVector &Operands); + AMDGPUOperand::Ptr defaultMubufOffset() const; + AMDGPUOperand::Ptr defaultGLC() const; + AMDGPUOperand::Ptr defaultSLC() const; + AMDGPUOperand::Ptr defaultTFE() const; + OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); } OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); } OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); } @@ -597,25 +604,36 @@ OperandMatchResultTy parseDA(OperandVector &Operands); OperandMatchResultTy parseR128(OperandVector &Operands); OperandMatchResultTy parseLWE(OperandVector &Operands); - + AMDGPUOperand::Ptr defaultDMask() const; + AMDGPUOperand::Ptr defaultUNorm() const; + AMDGPUOperand::Ptr defaultDA() const; + AMDGPUOperand::Ptr defaultR128() const; + AMDGPUOperand::Ptr defaultLWE() const; + AMDGPUOperand::Ptr defaultSMRDOffset() const; + AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const; + + AMDGPUOperand::Ptr defaultClampSI() const; + AMDGPUOperand::Ptr defaultOModSI() const; + OperandMatchResultTy parseOModOperand(OperandVector &Operands); void cvtId(MCInst &Inst, const OperandVector &Operands); void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands); - void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands); - void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands); void cvtVOP3(MCInst &Inst, const OperandVector &Operands); void cvtMIMG(MCInst &Inst, const OperandVector &Operands); void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault); - void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands); - void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands); - void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods); + AMDGPUOperand::Ptr defaultRowMask() const; + AMDGPUOperand::Ptr defaultBankMask() const; + AMDGPUOperand::Ptr defaultBoundCtrl() const; + void cvtDPP(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseSDWASel(OperandVector &Operands); OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands); + AMDGPUOperand::Ptr defaultSDWASel() const; + AMDGPUOperand::Ptr defaultSDWADstUnused() const; }; struct OptionalOperand { @@ -1135,21 +1153,6 @@ return true; } -static bool operandsHaveModifiers(const OperandVector &Operands) { - - for (unsigned i = 0, e = Operands.size(); i != e; ++i) { - const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]); - if (Op.isRegKind() && Op.hasModifiers()) - return true; - if (Op.isImm() && Op.hasModifiers()) - return true; - if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOModSI || - Op.getImmTy() == AMDGPUOperand::ImmTyClampSI)) - return true; - } - return false; -} - AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { @@ -1746,6 +1749,10 @@ return isImmTy(ImmTyHwreg); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg); +} + //===----------------------------------------------------------------------===// // sopp branch targets //===----------------------------------------------------------------------===// @@ -1777,58 +1784,28 @@ // flat //===----------------------------------------------------------------------===// -void AMDGPUAsmParser::cvtFlat(MCInst &Inst, - const OperandVector &Operands) { - OptionalImmIndexMap OptionalIdx; - - for (unsigned i = 1, e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); - - // Add the register arguments - if (Op.isReg()) { - Op.addRegOperands(Inst, 1); - continue; - } +//===----------------------------------------------------------------------===// +// mubuf +//===----------------------------------------------------------------------===// - OptionalIdx[Op.getImmTy()] = i; - } - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); +bool AMDGPUOperand::isMubufOffset() const { + return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset); +} -void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst, - const OperandVector &Operands) { - OptionalImmIndexMap OptionalIdx; - - for (unsigned i = 1, e = Operands.size(); i != e; ++i) { - AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); - - // Add the register arguments - if (Op.isReg()) { - Op.addRegOperands(Inst, 1); - continue; - } - - // Handle 'glc' token for flat atomics. - if (Op.isToken()) { - continue; - } - - // Handle optional arguments - OptionalIdx[Op.getImmTy()] = i; - } - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC); } -//===----------------------------------------------------------------------===// -// mubuf -//===----------------------------------------------------------------------===// +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC); +} -bool AMDGPUOperand::isMubufOffset() const { - return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE); } void AMDGPUAsmParser::cvtMubuf(MCInst &Inst, @@ -1896,6 +1873,26 @@ return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE); +} + //===----------------------------------------------------------------------===// // smrd //===----------------------------------------------------------------------===// @@ -1913,6 +1910,14 @@ return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm()); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset); +} + //===----------------------------------------------------------------------===// // vop3 //===----------------------------------------------------------------------===// @@ -2036,6 +2041,14 @@ } } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const { + return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI); +} + void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) { unsigned I = 1; const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); @@ -2055,18 +2068,6 @@ } } -void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) { - if (operandsHaveModifiers(Operands)) { - cvtVOP3(Inst, Operands); - } else { - cvtId(Inst, Operands); - } -} - -void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) { - cvtVOP3(Inst, Operands); -} - void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { OptionalImmIndexMap OptionalIdx; unsigned I = 1; @@ -2300,16 +2301,19 @@ return MatchOperand_Success; } -void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) { - cvtDPP(Inst, Operands, true); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const { + return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const { + return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask); } -void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) { - cvtDPP(Inst, Operands, false); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl); } -void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands, - bool HasMods) { +void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) { OptionalImmIndexMap OptionalIdx; unsigned I = 1; @@ -2321,9 +2325,8 @@ for (unsigned E = Operands.size(); I != E; ++I) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); // Add the register arguments - if (!HasMods && Op.isReg()) { - Op.addRegOperands(Inst, 1); - } else if (HasMods && Op.isRegOrImmWithInputMods()) { + if (Op.isRegOrImmWithInputMods()) { + // We convert only instructions with modifiers Op.addRegOrImmWithInputModsOperands(Inst, 2); } else if (Op.isDPPCtrl()) { Op.addImmOperands(Inst, 1); @@ -2415,6 +2418,14 @@ return MatchOperand_Success; } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const { + return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel); +} + +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const { + return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused); +} + /// Force static initialization. extern "C" void LLVMInitializeAMDGPUAsmParser() { Index: llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrFormats.td @@ -147,8 +147,9 @@ let AsmMatchConverter = !if(!eq(VOP3Only,1), - "cvtVOP3_only", - !if(!eq(HasMods,1), "cvtVOP3_2_mod", "cvtVOP3_2_nomod")); + "cvtVOP3", + !if(!eq(HasMods,1), "cvtVOP3_2_mod", "")); + let isCodeGenOnly = 0; int Size = 8; @@ -710,7 +711,6 @@ let UseNamedOperandTable = 1; let hasSideEffects = 0; - let AsmMatchConverter = "cvtFlat"; let SchedRW = [WriteVMEM]; } Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td @@ -467,6 +467,7 @@ let ParserMethod = "parse"#CName; let RenderMethod = "addImmOperands"; let IsOptional = Optional; + let DefaultMethod = "default"#CName; } def sdwa_sel : NamedMatchClass<"SDWASel">; @@ -3118,7 +3119,7 @@ dag outs_noret = (outs), string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> { - let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in { + let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in { def "" : FLAT_Pseudo , @@ -3135,7 +3136,7 @@ asm_noret>; } - let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in { + let glc = 1, hasPostISelHook = 1 in { defm _RTN : FLAT_AtomicRet_m ParserMethod = ""; Entry->DiagnosticType = ""; Entry->IsOptional = false; + Entry->DefaultMethod = ""; } return Entry; @@ -1254,6 +1265,7 @@ // FIXME: diagnostic type. CI->DiagnosticType = ""; CI->IsOptional = false; + CI->DefaultMethod = ""; // unused RegisterSetClasses.insert(std::make_pair(RS, CI)); ++Index; } @@ -1372,6 +1384,15 @@ if (BitInit *BI = dyn_cast(IsOptional)) CI->IsOptional = BI->getValue(); + // Get or construct the default method name. + Init *DMName = Rec->getValueInit("DefaultMethod"); + if (StringInit *SI = dyn_cast(DMName)) { + CI->DefaultMethod = SI->getValue(); + } else { + assert(isa(DMName) && "Unexpected DefaultMethod field!"); + CI->DefaultMethod = "default" + CI->ClassName + "Operands"; + } + ++Index; } } @@ -1808,7 +1829,8 @@ static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName, std::vector> &Infos, - bool HasMnemonicFirst, raw_ostream &OS) { + bool HasMnemonicFirst, bool HasOptionalOperands, + raw_ostream &OS) { SmallSetVector OperandConversionKinds; SmallSetVector InstructionConversionKinds; std::vector > ConversionTable; @@ -1823,24 +1845,40 @@ std::string ConvertFnBody; raw_string_ostream CvtOS(ConvertFnBody); // Start the unified conversion function. - CvtOS << "void " << Target.getName() << ClassName << "::\n" - << "convertToMCInst(unsigned Kind, MCInst &Inst, " - << "unsigned Opcode,\n" - << " const OperandVector" - << " &Operands) {\n" - << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n" - << " const uint8_t *Converter = ConversionTable[Kind];\n" - << " Inst.setOpcode(Opcode);\n" - << " for (const uint8_t *p = Converter; *p; p+= 2) {\n" - << " switch (*p) {\n" - << " default: llvm_unreachable(\"invalid conversion entry!\");\n" - << " case CVT_Reg:\n" - << " static_cast<" << TargetOperandClass - << "&>(*Operands[*(p + 1)]).addRegOperands(Inst, 1);\n" - << " break;\n" - << " case CVT_Tied:\n" - << " Inst.addOperand(Inst.getOperand(*(p + 1)));\n" - << " break;\n"; + if (HasOptionalOperands) { + CvtOS << "void " << Target.getName() << ClassName << "::\n" + << "convertToMCInst(unsigned Kind, MCInst &Inst, " + << "unsigned Opcode,\n" + << " const OperandVector &Operands,\n" + << " const SmallBitVector &OptionalOperandsMask) {\n"; + } else { + CvtOS << "void " << Target.getName() << ClassName << "::\n" + << "convertToMCInst(unsigned Kind, MCInst &Inst, " + << "unsigned Opcode,\n" + << " const OperandVector &Operands) {\n"; + } + CvtOS << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n"; + CvtOS << " const uint8_t *Converter = ConversionTable[Kind];\n"; + if (HasOptionalOperands) { + CvtOS << " unsigned NumDefaults = 0;\n"; + } + CvtOS << " unsigned OpIdx;\n"; + CvtOS << " Inst.setOpcode(Opcode);\n"; + CvtOS << " for (const uint8_t *p = Converter; *p; p+= 2) {\n"; + if (HasOptionalOperands) { + CvtOS << " OpIdx = *(p + 1) - NumDefaults;\n"; + } else { + CvtOS << " OpIdx = *(p + 1);\n"; + } + CvtOS << " switch (*p) {\n"; + CvtOS << " default: llvm_unreachable(\"invalid conversion entry!\");\n"; + CvtOS << " case CVT_Reg:\n"; + CvtOS << " static_cast<" << TargetOperandClass + << "&>(*Operands[OpIdx]).addRegOperands(Inst, 1);\n"; + CvtOS << " break;\n"; + CvtOS << " case CVT_Tied:\n"; + CvtOS << " Inst.addOperand(Inst.getOperand(OpIdx));\n"; + CvtOS << " break;\n"; std::string OperandFnBody; raw_string_ostream OpOS(OperandFnBody); @@ -1934,6 +1972,11 @@ // the index of its entry in the vector). std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" : Op.Class->RenderMethod); + if (Op.Class->IsOptional) { + // For optional operands we must also care about DefaultMethod + assert(HasOptionalOperands); + Name += "_" + Op.Class->DefaultMethod; + } Name = getEnumNameForToken(Name); bool IsNewConverter = false; @@ -1949,11 +1992,27 @@ // This is a new operand kind. Add a handler for it to the // converter driver. - CvtOS << " case " << Name << ":\n" - << " static_cast<" << TargetOperandClass - << "&>(*Operands[*(p + 1)])." << Op.Class->RenderMethod - << "(Inst, " << OpInfo.MINumOperands << ");\n" - << " break;\n"; + CvtOS << " case " << Name << ":\n"; + if (Op.Class->IsOptional) { + // If optional operand is not present in actual instruction then we + // should call its DefaultMethod before RenderMethod + assert(HasOptionalOperands); + CvtOS << " if (OptionalOperandsMask[*(p + 1) - 1]) {\n" + << " " << Op.Class->DefaultMethod << "()" + << "->" << Op.Class->RenderMethod << "(Inst, " + << OpInfo.MINumOperands << ");\n" + << " ++NumDefaults;\n" + << " } else {\n" + << " static_cast<" << TargetOperandClass + << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod + << "(Inst, " << OpInfo.MINumOperands << ");\n" + << " }\n"; + } else { + CvtOS << " static_cast<" << TargetOperandClass + << "&>(*Operands[OpIdx])." << Op.Class->RenderMethod + << "(Inst, " << OpInfo.MINumOperands << ");\n"; + } + CvtOS << " break;\n"; // Add a handler for the operand number lookup. OpOS << " case " << Name << ":\n" @@ -2806,6 +2865,7 @@ Info.buildOperandMatchInfo(); bool HasMnemonicFirst = AsmParser->getValueAsBit("HasMnemonicFirst"); + bool HasOptionalOperands = Info.hasOptionalOperands(); // Write the output. @@ -2815,10 +2875,16 @@ OS << " // This should be included into the middle of the declaration of\n"; OS << " // your subclasses implementation of MCTargetAsmParser.\n"; OS << " uint64_t ComputeAvailableFeatures(const FeatureBitset& FB) const;\n"; - OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " - << "unsigned Opcode,\n" - << " const OperandVector " - << "&Operands);\n"; + if (HasOptionalOperands) { + OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " + << "unsigned Opcode,\n" + << " const OperandVector &Operands,\n" + << " const SmallBitVector &OptionalOperandsMask);\n"; + } else { + OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " + << "unsigned Opcode,\n" + << " const OperandVector &Operands);\n"; + } OS << " void convertToMapAndConstraints(unsigned Kind,\n "; OS << " const OperandVector &Operands) override;\n"; if (HasMnemonicFirst) @@ -2885,7 +2951,8 @@ // Generate the convertToMCInst function to convert operands into an MCInst. // Also, generate the convertToMapAndConstraints function for MS-style inline // assembly. The latter doesn't actually generate a MCInst. - emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst, OS); + emitConvertFuncs(Target, ClassName, Info.Matchables, HasMnemonicFirst, + HasOptionalOperands, OS); // Emit the enumeration for classes which participate in matching. emitMatchClassEnumeration(Target, Info.Classes, OS); @@ -3067,6 +3134,9 @@ OS << " bool HadMatchOtherThanPredicate = false;\n"; OS << " unsigned RetCode = Match_InvalidOperand;\n"; OS << " uint64_t MissingFeatures = ~0ULL;\n"; + if (HasOptionalOperands) { + OS << " SmallBitVector OptionalOperandsMask(" << MaxNumOperands << ");\n"; + } OS << " // Set ErrorInfo to the operand that mismatches if it is\n"; OS << " // wrong for all instances of the instruction.\n"; OS << " ErrorInfo = ~0ULL;\n"; @@ -3111,6 +3181,9 @@ // Emit check that the subclasses match. OS << " bool OperandsValid = true;\n"; + if (HasOptionalOperands) { + OS << " OptionalOperandsMask.reset(0, " << MaxNumOperands << ");\n"; + } OS << " for (unsigned FormalIdx = " << (HasMnemonicFirst ? "0" : "SIndex") << ", ActualIdx = " << (HasMnemonicFirst ? "1" : "SIndex") << "; FormalIdx != " << MaxNumOperands << "; ++FormalIdx) {\n"; @@ -3120,6 +3193,10 @@ OS << " OperandsValid = (Formal == " <<"InvalidMatchClass) || " "isSubclass(Formal, OptionalMatchClass);\n"; OS << " if (!OperandsValid) ErrorInfo = ActualIdx;\n"; + if (HasOptionalOperands) { + OS << " OptionalOperandsMask.set(FormalIdx, " << MaxNumOperands + << ");\n"; + } OS << " break;\n"; OS << " }\n"; OS << " MCParsedAsmOperand &Actual = *Operands[ActualIdx];\n"; @@ -3140,8 +3217,12 @@ OS << " // If current formal operand wasn't matched and it is optional\n" << " // then try to match next formal operand\n"; OS << " if (Diag == Match_InvalidOperand " - << "&& isSubclass(Formal, OptionalMatchClass))\n"; + << "&& isSubclass(Formal, OptionalMatchClass)) {\n"; + if (HasOptionalOperands) { + OS << " OptionalOperandsMask.set(FormalIdx);\n"; + } OS << " continue;\n"; + OS << " }\n"; OS << " // If this operand is broken for all of the instances of this\n"; OS << " // mnemonic, keep track of it so we can report loc info.\n"; OS << " // If we already had a match that only failed due to a\n"; @@ -3180,7 +3261,12 @@ OS << " }\n\n"; OS << " // We have selected a definite instruction, convert the parsed\n" << " // operands into the appropriate MCInst.\n"; - OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n"; + if (HasOptionalOperands) { + OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands,\n" + << " OptionalOperandsMask);\n"; + } else { + OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n"; + } OS << "\n"; // Verify the instruction with the target-specific match predicate function.