Index: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -214,7 +214,7 @@ } bool isReg() const override { - return isRegKind() && !Reg.Mods.hasModifiers(); + return isRegKind() && !hasModifiers(); } bool isRegOrImmWithInputMods(MVT type) const { @@ -245,6 +245,15 @@ return isRegOrImmWithInputMods(MVT::f64); } + bool isVReg() const { + return isRegClass(AMDGPU::VGPR_32RegClassID) || + isRegClass(AMDGPU::VReg_64RegClassID) || + isRegClass(AMDGPU::VReg_96RegClassID) || + isRegClass(AMDGPU::VReg_128RegClassID) || + isRegClass(AMDGPU::VReg_256RegClassID) || + isRegClass(AMDGPU::VReg_512RegClassID); + } + bool isVReg32OrOff() const { return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID); } @@ -299,28 +308,32 @@ bool isRegClass(unsigned RCID) const; + bool isRegOrInlineNoMods(unsigned RCID, MVT type) const { + return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers(); + } + bool isSCSrcB16() const { - return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i16); + return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16); } bool isSCSrcB32() const { - return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32); + return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32); } bool isSCSrcB64() const { - return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64); + return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64); } bool isSCSrcF16() const { - return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f16); + return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16); } bool isSCSrcF32() const { - return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32); + return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32); } bool isSCSrcF64() const { - return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64); + return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64); } bool isSSrcB32() const { @@ -350,27 +363,27 @@ } bool isVCSrcB32() const { - return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32); + return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32); } bool isVCSrcB64() const { - return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64); + return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64); } bool isVCSrcB16() const { - return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i16); + return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16); } bool isVCSrcF32() const { - return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32); + return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32); } bool isVCSrcF64() const { - return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64); + return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64); } bool isVCSrcF16() const { - return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f16); + return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16); } bool isVSrcB32() const { @@ -534,6 +547,23 @@ addRegOrImmWithInputModsOperands(Inst, N); } + void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const { + Modifiers Mods = getModifiers(); + Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand())); + assert(isRegKind()); + addRegOperands(Inst, N); + } + + void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const { + assert(!hasIntModifiers()); + addRegWithInputModsOperands(Inst, N); + } + + void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const { + assert(!hasFPModifiers()); + addRegWithInputModsOperands(Inst, N); + } + void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const { if (isImm()) addImmOperands(Inst, N); @@ -852,9 +882,12 @@ StringRef &Value); OperandMatchResultTy parseImm(OperandVector &Operands); + OperandMatchResultTy parseReg(OperandVector &Operands); OperandMatchResultTy parseRegOrImm(OperandVector &Operands); - OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands); - OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands); + OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true); + OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true); + OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands); + OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands); OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands); void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands); @@ -1057,7 +1090,7 @@ } bool AMDGPUOperand::isRegClass(unsigned RCID) const { - return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg()); + return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg()); } void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const { @@ -1468,23 +1501,28 @@ } OperandMatchResultTy -AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) { - auto res = parseImm(Operands); - if (res != MatchOperand_NoMatch) { - return res; - } - +AMDGPUAsmParser::parseReg(OperandVector &Operands) { if (auto R = parseRegister()) { assert(R->isReg()); R->Reg.IsForcedVOP3 = isForcedVOP3(); Operands.push_back(std::move(R)); return MatchOperand_Success; } - return MatchOperand_ParseFail; + return MatchOperand_NoMatch; } OperandMatchResultTy -AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) { +AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) { + auto res = parseImm(Operands); + if (res != MatchOperand_NoMatch) { + return res; + } + + return parseReg(Operands); +} + +OperandMatchResultTy +AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm) { // XXX: During parsing we can't determine if minus sign means // negate-modifier or negative immediate value. // By default we suppose it is modifier. @@ -1514,7 +1552,12 @@ Abs = true; } - auto Res = parseRegOrImm(Operands); + OperandMatchResultTy Res; + if (AllowImm) { + Res = parseRegOrImm(Operands); + } else { + Res = parseReg(Operands); + } if (Res != MatchOperand_Success) { return Res; } @@ -1548,7 +1591,7 @@ } OperandMatchResultTy -AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) { +AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm) { bool Sext = false; if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") { @@ -1561,7 +1604,12 @@ Parser.Lex(); } - auto Res = parseRegOrImm(Operands); + OperandMatchResultTy Res; + if (AllowImm) { + Res = parseRegOrImm(Operands); + } else { + Res = parseReg(Operands); + } if (Res != MatchOperand_Success) { return Res; } @@ -1584,6 +1632,16 @@ return MatchOperand_Success; } +OperandMatchResultTy +AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) { + return parseRegOrImmWithFPInputMods(Operands, false); +} + +OperandMatchResultTy +AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) { + return parseRegOrImmWithIntInputMods(Operands, false); +} + OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) { std::unique_ptr Reg = parseRegister(); if (Reg) { @@ -3382,7 +3440,7 @@ // Skip it. continue; } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { - Op.addRegOrImmWithFPInputModsOperands(Inst, 2); + Op.addRegWithFPInputModsOperands(Inst, 2); } else if (Op.isDPPCtrl()) { Op.addImmOperands(Inst, 1); } else if (Op.isImm()) { @@ -3508,7 +3566,7 @@ // Skip it. continue; } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) { - Op.addRegOrImmWithInputModsOperands(Inst, 2); + Op.addRegWithInputModsOperands(Inst, 2); } else if (Op.isImm()) { // Handle optional arguments OptionalIdx[Op.getImmTy()] = I; Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td @@ -557,6 +557,27 @@ def Int32InputMods : IntInputMods; def Int64InputMods : IntInputMods; +def FPVRegInputModsMatchClass : AsmOperandClass { + let Name = "VRegWithFPInputMods"; + let ParserMethod = "parseRegWithFPInputMods"; + let PredicateMethod = "isVReg"; +} + +def FPVRegInputMods : InputMods { + let PrintMethod = "printOperandAndFPInputMods"; +} + +def IntVRegInputModsMatchClass : AsmOperandClass { + let Name = "VRegWithIntInputMods"; + let ParserMethod = "parseRegWithIntInputMods"; + let PredicateMethod = "isVReg"; +} + +def IntVRegInputMods : InputMods { + let PrintMethod = "printOperandAndIntInputMods"; +} + + //===----------------------------------------------------------------------===// // Complex patterns //===----------------------------------------------------------------------===// @@ -761,6 +782,15 @@ ); } +// Return type of input modifiers operand specified input operand for SDWA/DPP +class getSrcModExt { + bit isFP = !if(!eq(VT.Value, f16.Value), 1, + !if(!eq(VT.Value, f32.Value), 1, + !if(!eq(VT.Value, f64.Value), 1, + 0))); + Operand ret = !if(isFP, FPVRegInputMods, IntVRegInputMods); +} + // Returns the input arguments for VOP[12C] instructions for the given SrcVT. class getIns32 { dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1 @@ -1001,6 +1031,11 @@ field Operand Src0Mod = getSrcMod.ret; field Operand Src1Mod = getSrcMod.ret; field Operand Src2Mod = getSrcMod.ret; + field Operand Src0ModDPP = getSrcModExt.ret; + field Operand Src1ModDPP = getSrcModExt.ret; + field Operand Src0ModSDWA = getSrcModExt.ret; + field Operand Src1ModSDWA = getSrcModExt.ret; + field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1); field bit HasDst32 = HasDst; @@ -1038,15 +1073,16 @@ field dag Outs32 = Outs; field dag Outs64 = Outs; field dag OutsDPP = getOutsExt.ret; - field dag OutsSDWA = getOutsExt.ret; + field dag OutsSDWA = getOutsExt.ret; field dag Ins32 = getIns32.ret; field dag Ins64 = getIns64.ret; field dag InsDPP = getInsDPP.ret; + HasModifiers, Src0ModDPP, Src1ModDPP>.ret; field dag InsSDWA = getInsSDWA.ret; + HasModifiers, Src0ModSDWA, Src1ModSDWA, + DstVT>.ret; field string Asm32 = getAsm32.ret; field string Asm64 = getAsm64.ret; Index: llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td +++ llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td @@ -232,7 +232,7 @@ let Ins64 = (ins Src0RC64:$vdst, VSrc_b32:$src0); let InsDPP = (ins Src0RC32:$vdst, Src0RC32:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, bank_mask:$bank_mask, bound_ctrl:$bound_ctrl); - let InsSDWA = (ins Src0RC32:$vdst, Int32InputMods:$src0_modifiers, VCSrc_b32:$src0, + let InsSDWA = (ins Src0RC32:$vdst, Src0ModSDWA:$src0_modifiers, VCSrc_b32:$src0, clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel); Index: llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td +++ llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td @@ -183,13 +183,13 @@ let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2); let Ins64 = getIns64, 3, HasModifiers, Src0Mod, Src1Mod, Src2Mod>.ret; - let InsDPP = (ins FP32InputMods:$src0_modifiers, Src0DPP:$src0, - FP32InputMods:$src1_modifiers, Src1DPP:$src1, + let InsDPP = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0, + Src1ModDPP:$src1_modifiers, Src1DPP:$src1, VGPR_32:$src2, // stub argument dpp_ctrl:$dpp_ctrl, row_mask:$row_mask, bank_mask:$bank_mask, bound_ctrl:$bound_ctrl); - let InsSDWA = (ins FP32InputMods:$src0_modifiers, Src0SDWA:$src0, - FP32InputMods:$src1_modifiers, Src1SDWA:$src1, + let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0, + Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1, VGPR_32:$src2, // stub argument clampmod:$clamp, dst_sel:$dst_sel, dst_unused:$dst_unused, src0_sel:$src0_sel, src1_sel:$src1_sel); Index: llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/VOPCInstructions.td @@ -517,8 +517,8 @@ VOPC_Profile { let Ins64 = (ins Src0Mod:$src0_modifiers, Src0RC64:$src0, Src1RC64:$src1); let Asm64 = "$sdst, $src0_modifiers, $src1"; - let InsSDWA = (ins Src0Mod:$src0_modifiers, Src0RC64:$src0, - Int32InputMods:$src1_modifiers, Src1RC64:$src1, + let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0, + Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1, clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel); let AsmSDWA = " vcc, $src0_modifiers, $src1_modifiers$clamp $src0_sel $src1_sel"; let HasSrc1Mods = 0; Index: llvm/trunk/test/MC/AMDGPU/vop_dpp.s =================================================================== --- llvm/trunk/test/MC/AMDGPU/vop_dpp.s +++ llvm/trunk/test/MC/AMDGPU/vop_dpp.s @@ -1,7 +1,9 @@ -// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s --check-prefix=GCN --check-prefix=CIVI --check-prefix=VI +// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s --check-prefix=GCN --check-prefix=CIVI --check-prefix=VI + // RUN: not llvm-mc -arch=amdgcn -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSI --check-prefix=NOSICI // RUN: not llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSI --check-prefix=NOSICI // RUN: not llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSICI +// RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOVI //===----------------------------------------------------------------------===// // Check dpp_ctrl values @@ -527,3 +529,31 @@ // NOSICI: error: // VI: v_subbrev_u32_dpp v1, vcc, v2, v3, vcc row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 ; encoding: [0xfa,0x06,0x02,0x3c,0x02,0x01,0x09,0xa1] v_subbrev_u32 v1, vcc, v2, v3, vcc row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +//===----------------------------------------------------------------------===// +// Check that immideates and scalar regs are not supported +//===----------------------------------------------------------------------===// + +// NOSICI: error: +// NOVI: error: +v_mov_b32 v0, 1 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +// NOSICI: error: +// NOVI: error: +v_and_b32 v0, 42, v1 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +// NOSICI: error: +// NOVI: error: +v_add_f32 v0, v1, 345 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +// NOSICI: error: +// NOVI: error: +v_mov_b32 v0, s1 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +// NOSICI: error: +// NOVI: error: +v_and_b32 v0, s42, v1 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 + +// NOSICI: error: +// NOVI: error: +v_add_f32 v0, v1, s45 row_shl:1 row_mask:0xa bank_mask:0x1 bound_ctrl:0 Index: llvm/trunk/test/MC/AMDGPU/vop_sdwa.s =================================================================== --- llvm/trunk/test/MC/AMDGPU/vop_sdwa.s +++ llvm/trunk/test/MC/AMDGPU/vop_sdwa.s @@ -594,3 +594,39 @@ // NOSICI: error: // VI: v_cmpx_class_f32 vcc, v1, v2 src0_sel:BYTE_2 src1_sel:WORD_0 ; encoding: [0xf9,0x04,0x22,0x7c,0x01,0x16,0x02,0x04] v_cmpx_class_f32 vcc, v1, v2 src0_sel:BYTE_2 src1_sel:WORD_0 + +//===----------------------------------------------------------------------===// +// Check that immideates and scalar regs are not supported +//===----------------------------------------------------------------------===// + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_mov_b32 v0, 1 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_and_b32 v0, 42, v1 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_add_f32 v0, v1, 345 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_cmpx_class_f32 vcc, -1, 200 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_mov_b32 v0, s1 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_and_b32 v0, s42, v1 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_add_f32 v0, v1, s45 src0_sel:BYTE_2 src1_sel:WORD_0 + +// NOSICI: error: +// NOVI: error: invalid operand for instruction +v_cmpx_class_f32 vcc, s1, s2 src0_sel:BYTE_2 src1_sel:WORD_0