Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -461,6 +461,7 @@ OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands); OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands); void cvtFlat(MCInst &Inst, const OperandVector &Operands); + void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands); void cvtMubuf(MCInst &Inst, const OperandVector &Operands); OperandMatchResultTy parseOffset(OperandVector &Operands); @@ -647,31 +648,8 @@ SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0ULL) { if (ErrorInfo >= Operands.size()) { - if (isForcedVOP3()) { - // If 64-bit encoding has been forced we can end up with no - // clamp or omod operands if none of the registers have modifiers, - // so we need to add these to the operand list. - AMDGPUOperand &LastOp = - ((AMDGPUOperand &)*Operands[Operands.size() - 1]); - if (LastOp.isRegKind() || - (LastOp.isImm() && - LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) { - SMLoc S = Parser.getTok().getLoc(); - Operands.push_back(AMDGPUOperand::CreateImm(0, S, - AMDGPUOperand::ImmTyClamp)); - Operands.push_back(AMDGPUOperand::CreateImm(0, S, - AMDGPUOperand::ImmTyOMod)); - bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands, - Out, ErrorInfo, - MatchingInlineAsm); - if (!Res) - return Res; - } - - } return Error(IDLoc, "too few operands for instruction"); } - ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc(); if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; @@ -1234,13 +1212,6 @@ } } - // Once we reach end of statement, continue parsing so we can add default - // values for optional arguments. - AMDGPUAsmParser::OperandMatchResultTy Res; - while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) { - if (Res != MatchOperand_Success) - return Error(getLexer().getLoc(), "failed parsing operand."); - } return false; } @@ -1329,6 +1300,18 @@ return MatchOperand_Success; } +typedef std::map OptionalImmIndexMap; + +void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) { + auto i = OptionalIdx.find(ImmT); + if (i != OptionalIdx.end()) { + unsigned Idx = i->second; + ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1); + } else { + Inst.addOperand(MCOperand::createImm(0)); + } +} + static bool operandsHasOptionalOp(const OperandVector &Operands, const OptionalOperand &OOp) { for (unsigned i = 0; i < Operands.size(); i++) { @@ -1365,11 +1348,15 @@ if (Res != MatchOperand_Success) return Res; + bool DefaultValue = (Value == Op.Default); + if (Op.ConvertResult && !Op.ConvertResult(Value)) { return MatchOperand_ParseFail; } - Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type)); + if (!DefaultValue) { + Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type)); + } return MatchOperand_Success; } return MatchOperand_NoMatch; @@ -1423,7 +1410,7 @@ void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst, const OperandVector &Operands) { - std::map OptionalIdx; + OptionalImmIndexMap OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); @@ -1438,13 +1425,10 @@ OptionalIdx[Op.getImmTy()] = i; } - unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0]; - unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1]; - unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS); - ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0 - ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1 - ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0 } @@ -1471,12 +1455,11 @@ OptionalIdx[Op.getImmTy()] = i; } - unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset]; - ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS); if (!GDSOnly) { - unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS]; - ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS); } Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0 } @@ -1615,7 +1598,7 @@ void AMDGPUAsmParser::cvtFlat(MCInst &Inst, const OperandVector &Operands) { - std::map OptionalIdx; + OptionalImmIndexMap OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); @@ -1626,27 +1609,39 @@ continue; } - // Handle 'glc' token which is sometimes hard-coded into the - // asm string. There are no MCInst operands for these. - if (Op.isToken()) - continue; - - // Handle optional arguments OptionalIdx[Op.getImmTy()] = i; - } + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); +} - // flat atomic instructions don't have a glc argument. - if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) { - unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC]; - ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1); - } - unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC]; - unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE]; +void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst, + const OperandVector &Operands) { + OptionalImmIndexMap OptionalIdx; - ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1); - ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1); + bool token = false; + for (unsigned i = 1, e = Operands.size(); i != e; ++i) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + + // Add the register arguments + if (Op.isReg()) { + Op.addRegOperands(Inst, 1); + continue; + } + + // Handle 'glc' token for flat atomics. + if (Op.isToken()) { + token = true; + continue; + } + + // Handle optional arguments + OptionalIdx[Op.getImmTy()] = token ? i - 1 : i; + } + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); } //===----------------------------------------------------------------------===// @@ -1691,7 +1686,7 @@ void AMDGPUAsmParser::cvtMubuf(MCInst &Inst, const OperandVector &Operands) { - std::map OptionalIdx; + OptionalImmIndexMap OptionalIdx; for (unsigned i = 1, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); @@ -1719,17 +1714,10 @@ OptionalIdx[Op.getImmTy()] = i; } - assert(OptionalIdx.size() == 4); - - unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset]; - unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC]; - unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC]; - unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE]; - - ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); - ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1); - ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1); - ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); } //===----------------------------------------------------------------------===// @@ -1863,7 +1851,9 @@ } void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) { - if (operandsHaveModifiers(Operands) || isForcedVOP3()) { + uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; + //if (operandsHaveModifiers(Operands) || isForcedVOP3()) { + if (TSFlags & SIInstrFlags::VOP3) { cvtVOP3(Inst, Operands); } else { cvtId(Inst, Operands); Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -435,6 +435,7 @@ let Name = "MubufOffset"; let ParserMethod = "parseMubufOptionalOps"; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } class DSOffsetBaseMatchClass : AsmOperandClass { @@ -442,6 +443,7 @@ let ParserMethod = parser; let RenderMethod = "addImmOperands"; let PredicateMethod = "isDSOffset"; + let IsOptional = 1; } def DSOffsetMatchClass : DSOffsetBaseMatchClass <"parseDSOptionalOps">; @@ -452,6 +454,7 @@ let ParserMethod = "parseDSOff01OptionalOps"; let RenderMethod = "addImmOperands"; let PredicateMethod = "isDSOffset01"; + let IsOptional = 1; } class GDSBaseMatchClass : AsmOperandClass { @@ -459,6 +462,7 @@ let PredicateMethod = "isImm"; let ParserMethod = parser; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } def GDSMatchClass : GDSBaseMatchClass <"parseDSOptionalOps">; @@ -469,6 +473,7 @@ let PredicateMethod = "isImm"; let ParserMethod = parser; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } def GLCMubufMatchClass : GLCBaseMatchClass <"parseMubufOptionalOps">; @@ -479,6 +484,7 @@ let PredicateMethod = "isImm"; let ParserMethod = parser; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } def SLCMubufMatchClass : SLCBaseMatchClass <"parseMubufOptionalOps">; @@ -490,6 +496,7 @@ let PredicateMethod = "isImm"; let ParserMethod = parser; let RenderMethod = "addImmOperands"; + let IsOptional = 1; } def TFEMubufMatchClass : TFEBaseMatchClass <"parseMubufOptionalOps">; @@ -523,13 +530,21 @@ "isSMRDLiteralOffset" >; +class OptionalImmAsmOperand : AsmOperandClass { + let Name = "Imm"#OpName; + let PredicateMethod = "isImm"; + let IsOptional = 1; +} + let OperandType = "OPERAND_IMMEDIATE" in { def offen : Operand { let PrintMethod = "printOffen"; + let ParserMatchClass = OptionalImmAsmOperand<"offen">; } def idxen : Operand { let PrintMethod = "printIdxen"; + let ParserMatchClass = OptionalImmAsmOperand<"idxen">; } def addr64 : Operand { let PrintMethod = "printAddr64"; @@ -2864,7 +2879,7 @@ dag outs_noret = (outs), string asm_noret = asm_name#" $addr, $data"#"$slc"#"$tfe"> { - let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0 in { + let mayLoad = 1, mayStore = 1, glc = 0, vdst = 0, AsmMatchConverter = "cvtFlatAtomic" in { def "" : FLAT_Pseudo , @@ -2881,7 +2896,7 @@ asm_noret>; } - let glc = 1, hasPostISelHook = 1 in { + let glc = 1, hasPostISelHook = 1, AsmMatchConverter = "cvtFlatAtomic" in { defm _RTN : FLAT_AtomicRet_m (VOP_CNDMASK.Asm64), [], name, 3>; + name#!cast(VOP_CNDMASK.Asm64), [], name, 3, 0>; } defm V_CNDMASK_B32 : V_CNDMASK, "v_cndmask_b32">;