Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -347,6 +347,11 @@ bool ParseSectionDirectiveHSAText(); public: +public: + enum AMDGPUMatchResultTy { + Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY + }; + AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser, const MCInstrInfo &MII, const MCTargetOptions &Options) @@ -556,6 +561,11 @@ (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3))) return Match_InvalidOperand; + if ((TSFlags & SIInstrFlags::VOP3) && + (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) && + getForcedEncodingSize() != 64) + return Match_PreferE32; + return Match_Success; } @@ -614,6 +624,9 @@ } return Error(ErrorLoc, "invalid operand for instruction"); } + case Match_PreferE32: + return Error(IDLoc, "internal error: instruction without _e64 suffix " + "should be encoded as e32"); } llvm_unreachable("Implement any new match types added!"); } @@ -1701,8 +1714,12 @@ } void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) { - ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1); - unsigned i = 2; + + unsigned i = 1; + const MCInstrDesc &Desc = MII.get(Inst.getOpcode()); + if (Desc.getNumDefs() > 0) { + ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1); + } std::map OptionalIdx; Index: lib/Target/AMDGPU/SIDefines.h =================================================================== --- lib/Target/AMDGPU/SIDefines.h +++ lib/Target/AMDGPU/SIDefines.h @@ -37,7 +37,8 @@ MIMG = 1 << 18, FLAT = 1 << 19, WQM = 1 << 20, - VGPRSpill = 1 << 21 + VGPRSpill = 1 << 21, + VOPAsmPrefer32Bit = 1 << 22 }; } Index: lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- lib/Target/AMDGPU/SIInstrFormats.td +++ lib/Target/AMDGPU/SIInstrFormats.td @@ -40,6 +40,7 @@ field bits<1> FLAT = 0; field bits<1> WQM = 0; field bits<1> VGPRSpill = 0; + field bits<1> VOPAsmPrefer32Bit = 0; // These need to be kept in sync with the enum in SIInstrFlags. let TSFlags{0} = VM_CNT; @@ -68,6 +69,7 @@ let TSFlags{19} = FLAT; let TSFlags{20} = WQM; let TSFlags{21} = VGPRSpill; + let TSFlags{22} = VOPAsmPrefer32Bit; let SchedRW = [Write32Bit]; } Index: lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.td +++ lib/Target/AMDGPU/SIInstrInfo.td @@ -989,11 +989,12 @@ let ParserMatchClass = InputModsMatchClass; } -class getNumSrcArgs { +class getNumSrcArgs { int ret = - !if (!eq(Src1.Value, untyped.Value), 1, // VOP1 + !if (!eq(Src0.Value, untyped.Value), 0, + !if (!eq(Src1.Value, untyped.Value), 1, // VOP1 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2 - 3)); // VOP3 + 3))); // VOP3 } // Returns the register class to use for the destination of VOP[123C] @@ -1085,17 +1086,20 @@ // Returns the assembly string for the inputs and outputs of a VOP[12C] // instruction. This does not add the _e32 suffix, so it can be reused // by getAsm64. -class getAsm32 { +class getAsm32 { + string dst = "$dst"; + string src0 = ", $src0"; string src1 = ", $src1"; string src2 = ", $src2"; - string ret = "$dst, $src0"# - !if(!eq(NumSrcArgs, 1), "", src1)# - !if(!eq(NumSrcArgs, 3), src2, ""); + string ret = !if(HasDst, dst, "") # + !if(!eq(NumSrcArgs, 1), src0, "") # + !if(!eq(NumSrcArgs, 2), src0#src1, "") # + !if(!eq(NumSrcArgs, 3), src0#src1#src2, ""); } // Returns the assembly string for the inputs and outputs of a VOP3 // instruction. -class getAsm64 { +class getAsm64 { string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,"); string src1 = !if(!eq(NumSrcArgs, 1), "", !if(!eq(NumSrcArgs, 2), " $src1_modifiers", @@ -1103,7 +1107,7 @@ string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", ""); string ret = !if(!eq(HasModifiers, 0), - getAsm32.ret, + getAsm32.ret, "$dst, "#src0#src1#src2#"$clamp"#"$omod"); } @@ -1122,11 +1126,12 @@ field RegisterOperand Src1RC64 = getVOP3SrcForVT.ret; field RegisterOperand Src2RC64 = getVOP3SrcForVT.ret; - field bit HasDst32 = !if(!eq(DstVT, untyped), 0, 1); - field int NumSrcArgs = getNumSrcArgs.ret; + field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1); + field bit HasDst32 = HasDst; + field int NumSrcArgs = getNumSrcArgs.ret; field bit HasModifiers = hasModifiers.ret; - field dag Outs = (outs DstRC:$dst); + field dag Outs = !if(HasDst,(outs DstRC:$dst),(outs)); // VOP3b instructions are a special case with a second explicit // output. This is manually overridden for them. @@ -1137,8 +1142,8 @@ field dag Ins64 = getIns64.ret; - field string Asm32 = getAsm32.ret; - field string Asm64 = getAsm64.ret; + field string Asm32 = getAsm32.ret; + field string Asm64 = getAsm64.ret; } // FIXME: I think these F16/I16 profiles will need to use f16/i16 types in order @@ -1151,6 +1156,8 @@ def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i32, untyped]>; def VOP_I16_I16_I16 : VOPProfile <[i32, i32, i32, untyped]>; +def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>; + def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>; def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>; def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>; @@ -1246,8 +1253,8 @@ let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2); let Ins64 = getIns64, 3, HasModifiers>.ret; - let Asm32 = getAsm32<2>.ret; - let Asm64 = getAsm64<2, HasModifiers>.ret; + let Asm32 = getAsm32<1, 2>.ret; + let Asm64 = getAsm64<1, 2, HasModifiers>.ret; } def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>; def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>; @@ -1256,8 +1263,15 @@ class SIInstAlias : InstAlias , PredicateControl { - field bit isCompare; field bit isCommutable; + field bit isCompare; + field bit isMoveImm; + field bit isReMaterializable; + field bit isAsCheapAsAMove; + list SchedRW; + field bits<8> vdst; + field bits<9> src0; + field bit VOPAsmPrefer32Bit; let ResultInst = !if (p.HasDst32, @@ -1424,6 +1438,9 @@ MnemonicAlias { let isPseudo = 1; let isCodeGenOnly = 1; + + field bit vdst; + field bit src0; } class VOP3_Real_si op, dag outs, dag ins, string asm, string opName> : Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -1156,8 +1156,8 @@ // VOP1 Instructions //===----------------------------------------------------------------------===// -let vdst = 0, src0 = 0 in { -defm V_NOP : VOP1_m , (outs), (ins), "v_nop", [], "v_nop">; +let vdst = 0, src0 = 0, VOPAsmPrefer32Bit = 1 in { +defm V_NOP : VOP1Inst , "v_nop", VOP_NONE>; } let isMoveImm = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in { @@ -1332,10 +1332,8 @@ defm V_FREXP_MANT_F32 : VOP1Inst , "v_frexp_mant_f32", VOP_F32_F32 >; -let vdst = 0, src0 = 0 in { -defm V_CLREXCP : VOP1_m , (outs), (ins), "v_clrexcp", [], - "v_clrexcp" ->; +let vdst = 0, src0 = 0, VOPAsmPrefer32Bit = 1 in { +defm V_CLREXCP : VOP1Inst , "v_clrexcp", VOP_NONE>; } defm V_MOVRELD_B32 : VOP1Inst , "v_movreld_b32", VOP_I32_I32>; defm V_MOVRELS_B32 : VOP1Inst , "v_movrels_b32", VOP_I32_I32>; Index: test/MC/AMDGPU/vop1.s =================================================================== --- test/MC/AMDGPU/vop1.s +++ test/MC/AMDGPU/vop1.s @@ -8,6 +8,25 @@ // RUN: not llvm-mc -arch=amdgcn -mcpu=bonaire -show-encoding %s 2>&1 | FileCheck %s --check-prefix=NOSICI // RUN: not llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s 2>&1 | FileCheck %s -check-prefix=NOVI +// Force 32-bit encoding + +// GCN: v_mov_b32_e32 v1, v2 ; encoding: [0x02,0x03,0x02,0x7e] +v_mov_b32_e32 v1, v2 + +// Force 32-bit encoding for special instructions +// FIXME: We should be printing _e32 suffixes for these: + +// GCN: v_nop ; encoding: [0x00,0x00,0x00,0x7e] +v_nop_e32 + +// SICI: v_clrexcp ; encoding: [0x00,0x82,0x00,0x7e] +// VI: v_clrexcp ; encoding: [0x00,0x6a,0x00,0x7e] +v_clrexcp_e32 + +//===----------------------------------------------------------------------===// +// Instructions +//===----------------------------------------------------------------------===// + // GCN: v_nop ; encoding: [0x00,0x00,0x00,0x7e] v_nop Index: test/MC/AMDGPU/vop3-vop1-nosrc.s =================================================================== --- /dev/null +++ test/MC/AMDGPU/vop3-vop1-nosrc.s @@ -0,0 +1,14 @@ +// RUN: llvm-mc -arch=amdgcn -mcpu=SI -show-encoding %s | FileCheck %s --check-prefix=SICI +// RUN: llvm-mc -arch=amdgcn -mcpu=tonga -show-encoding %s | FileCheck %s --check-prefix=VI +// XFAIL: * + +// FIXME: We should be printing _e64 suffixes for these. +// FIXME: When this is fixed delete this file and fix test case in vop3.s + +v_nop_e64 +// SICI: v_nop_e64 ; encoding: [0x00,0x00,0x00,0xd3,0x00,0x00,0x00,0x00] +// VI: v_nop_e64 ; encoding: [0x00,0x00,0x40,0xd1,0x00,0x00,0x00,0x00] + +v_clrexcp_e64 +// SICI: v_clrexcp_e64 ; encoding: [0x00,0x00,0x82,0xd3,0x00,0x00,0x00,0x00] +// VI: v_clrexcp_e64 ; encoding: [0x00,0x00,0x75,0xd1,0x00,0x00,0x00,0x00] Index: test/MC/AMDGPU/vop3.s =================================================================== --- test/MC/AMDGPU/vop3.s +++ test/MC/AMDGPU/vop3.s @@ -118,6 +118,23 @@ // VOP1 Instructions //===----------------------------------------------------------------------===// +// Test forced e64 encoding with e32 operands + +v_mov_b32_e64 v1, v2 +// SICI: v_mov_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x02,0xd3,0x02,0x01,0x00,0x00] +// VI: v_mov_b32_e64 v1, v2 ; encoding: [0x01,0x00,0x41,0xd1,0x02,0x01,0x00,0x00] + +// Force e64 encoding for special instructions. +// FIXME, we should be printing the _e64 suffix for v_nop and v_clrexcp. + +v_nop_e64 +// SICI: v_nop ; encoding: [0x00,0x00,0x00,0xd3,0x00,0x00,0x00,0x00] +// VI: v_nop ; encoding: [0x00,0x00,0x40,0xd1,0x00,0x00,0x00,0x00] + +v_clrexcp_e64 +// SICI: v_clrexcp ; encoding: [0x00,0x00,0x82,0xd3,0x00,0x00,0x00,0x00] +// VI: v_clrexcp ; encoding: [0x00,0x00,0x75,0xd1,0x00,0x00,0x00,0x00] + // // Modifier tests: //