Index: llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/trunk/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -138,6 +138,7 @@ ImmTyGLC, ImmTySLC, ImmTyTFE, + ImmTyD16, ImmTyClampSI, ImmTyOModSI, ImmTyDppCtrl, @@ -286,7 +287,7 @@ bool isDMask() const { return isImmTy(ImmTyDMask); } bool isUNorm() const { return isImmTy(ImmTyUNorm); } bool isDA() const { return isImmTy(ImmTyDA); } - bool isR128() const { return isImmTy(ImmTyUNorm); } + bool isR128() const { return isImmTy(ImmTyR128); } bool isLWE() const { return isImmTy(ImmTyLWE); } bool isOff() const { return isImmTy(ImmTyOff); } bool isExpTgt() const { return isImmTy(ImmTyExpTgt); } @@ -305,6 +306,7 @@ bool isGLC() const { return isImmTy(ImmTyGLC); } bool isSLC() const { return isImmTy(ImmTySLC); } bool isTFE() const { return isImmTy(ImmTyTFE); } + bool isD16() const { return isImmTy(ImmTyD16); } bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); } bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); } bool isBankMask() const { return isImmTy(ImmTyDppBankMask); } @@ -657,6 +659,7 @@ case ImmTyGLC: OS << "GLC"; break; case ImmTySLC: OS << "SLC"; break; case ImmTyTFE: OS << "TFE"; break; + case ImmTyD16: OS << "D16"; break; case ImmTyDFMT: OS << "DFMT"; break; case ImmTyNFMT: OS << "NFMT"; break; case ImmTyClampSI: OS << "ClampSI"; break; @@ -821,7 +824,7 @@ // Number of extra operands parsed after the first optional operand. // This may be necessary to skip hardcoded mandatory operands. - static const unsigned MAX_OPR_LOOKAHEAD = 1; + static const unsigned MAX_OPR_LOOKAHEAD = 8; unsigned ForcedEncodingSize = 0; bool ForcedDPP = false; @@ -1081,6 +1084,7 @@ AMDGPUOperand::Ptr defaultSLC() const; AMDGPUOperand::Ptr defaultTFE() const; + AMDGPUOperand::Ptr defaultD16() const; AMDGPUOperand::Ptr defaultDMask() const; AMDGPUOperand::Ptr defaultUNorm() const; AMDGPUOperand::Ptr defaultDA() const; @@ -4016,6 +4020,10 @@ return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE); } +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultD16() const { + return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyD16); +} + void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn) { @@ -4260,6 +4268,7 @@ {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr}, {"slc", AMDGPUOperand::ImmTySLC, true, nullptr}, {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr}, + {"d16", AMDGPUOperand::ImmTyD16, true, nullptr}, {"high", AMDGPUOperand::ImmTyHigh, true, nullptr}, {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr}, {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul}, @@ -4964,6 +4973,8 @@ return Operand.isGDS() ? Match_Success : Match_InvalidOperand; case MCK_glc: return Operand.isGLC() ? Match_Success : Match_InvalidOperand; + case MCK_d16: + return Operand.isD16() ? Match_Success : Match_InvalidOperand; case MCK_idxen: return Operand.isIdxen() ? Match_Success : Match_InvalidOperand; case MCK_offen: Index: llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstrInfo.td @@ -836,6 +836,7 @@ def unorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>; def da : NamedOperandBit<"DA", NamedMatchClass<"DA">>; def r128 : NamedOperandBit<"R128", NamedMatchClass<"R128">>; +def D16 : NamedOperandBit<"D16", NamedMatchClass<"D16">>; def lwe : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>; def exp_compr : NamedOperandBit<"ExpCompr", NamedMatchClass<"ExpCompr">>; def exp_vm : NamedOperandBit<"ExpVM", NamedMatchClass<"ExpVM">>; Index: llvm/trunk/test/MC/AMDGPU/mimg.s =================================================================== --- llvm/trunk/test/MC/AMDGPU/mimg.s +++ llvm/trunk/test/MC/AMDGPU/mimg.s @@ -18,6 +18,16 @@ // SICI: image_load v[4:7], v[237:240], s[28:35] dmask:0x7 tfe ; encoding: [0x00,0x07,0x01,0xf0,0xed,0x04,0x07,0x00] // VI: image_load v[4:7], v[237:240], s[28:35] dmask:0x7 tfe ; encoding: [0x00,0x07,0x01,0xf0,0xed,0x04,0x07,0x00] +// Verify support of all possible modifiers. +// FIXME: This test is incorrect because r128 assumes a 128-bit SRSRC. +// FIXME: Check that d16 is not supported before VI +image_load v[5:6], v[1:4], s[8:15] dmask:0x1 unorm glc slc r128 tfe lwe da d16 +// VI: image_load v[5:6], v[1:4], s[8:15] dmask:0x1 unorm glc slc r128 tfe lwe da d16 ; encoding: [0x00,0xf1,0x03,0xf2,0x01,0x05,0x02,0x80] + +// FIXME: Check that d16 is not supported before VI +image_load v5, v[1:4], s[8:15] d16 +// VI: image_load v5, v[1:4], s[8:15] d16 ; encoding: [0x00,0x00,0x00,0xf0,0x01,0x05,0x02,0x80] + image_store v[193:195], v[237:240], s[28:35] dmask:0x7 unorm // SICI: image_store v[193:195], v[237:240], s[28:35] dmask:0x7 unorm ; encoding: [0x00,0x17,0x20,0xf0,0xed,0xc1,0x07,0x00] // VI: image_store v[193:195], v[237:240], s[28:35] dmask:0x7 unorm ; encoding: [0x00,0x17,0x20,0xf0,0xed,0xc1,0x07,0x00] @@ -30,6 +40,16 @@ // SICI: image_store v[193:194], v[237:240], s[28:35] tfe ; encoding: [0x00,0x00,0x21,0xf0,0xed,0xc1,0x07,0x00] // VI: image_store v[193:194], v[237:240], s[28:35] tfe ; encoding: [0x00,0x00,0x21,0xf0,0xed,0xc1,0x07,0x00] +// Verify support of all possible modifiers. +// FIXME: This test is incorrect because r128 assumes a 128-bit SRSRC. +// FIXME: Check that d16 is not supported before VI +image_store v5, v[1:4], s[8:15] dmask:0x1 unorm glc slc r128 lwe da d16 +// VI: image_store v5, v[1:4], s[8:15] dmask:0x1 unorm glc slc r128 lwe da d16 ; encoding: [0x00,0xf1,0x22,0xf2,0x01,0x05,0x02,0x80] + +// FIXME: Check that d16 is not supported before VI +image_store v5, v[1:4], s[8:15] d16 +// VI: image_store v5, v[1:4], s[8:15] d16 ; encoding: [0x00,0x00,0x20,0xf0,0x01,0x05,0x02,0x80] + //===----------------------------------------------------------------------===// // Image Sample //===----------------------------------------------------------------------===// Index: llvm/trunk/test/MC/Disassembler/AMDGPU/mimg_vi.txt =================================================================== --- llvm/trunk/test/MC/Disassembler/AMDGPU/mimg_vi.txt +++ llvm/trunk/test/MC/Disassembler/AMDGPU/mimg_vi.txt @@ -28,6 +28,12 @@ # VI: image_store v0, v1, s[0:7] dmask:0x1 unorm ; encoding: [0x00,0x11,0x20,0xf0,0x01,0x00,0x00,0x00] 0x00 0x11 0x20 0xf0 0x01 0x00 0x00 0x00 +# Test all modifiers +# FIXME: This test is incorrect because r128 assumes a 128-bit SRSRC. +# FIXME: This test is incorrect because tfe shall increase data size by 1. +# VI: image_load v5, v1, s[8:15] dmask:0x1 unorm glc slc r128 tfe lwe da d16 ; encoding: [0x00,0xf1,0x03,0xf2,0x01,0x05,0x02,0x80] +0x00,0xf1,0x03,0xf2,0x01,0x05,0x02,0x80 + # Test dmask == 0 # VI: image_load v0, v4, s[8:15] unorm ; encoding: [0x00,0x10,0x00,0xf0,0x04,0x00,0x02,0x00] 0x00 0x10 0x00 0xf0 0x04 0x00 0x02 0x00