Index: llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -113,9 +113,7 @@ ImmTyInstOffset, ImmTyOffset0, ImmTyOffset1, - ImmTyDLC, - ImmTyGLC, - ImmTySLC, + ImmTyCPol, ImmTySWZ, ImmTyTFE, ImmTyD16, @@ -336,12 +334,10 @@ bool isFlatOffset() const { return isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset); } bool isGDS() const { return isImmTy(ImmTyGDS); } bool isLDS() const { return isImmTy(ImmTyLDS); } - bool isDLC() const { return isImmTy(ImmTyDLC); } - bool isGLC() const { return isImmTy(ImmTyGLC); } - // "GLC_1" is a MatchClass of the GLC_1 operand with the default and forced - // value of the GLC operand. - bool isGLC_1() const { return isImmTy(ImmTyGLC); } - bool isSLC() const { return isImmTy(ImmTySLC); } + bool isCPol() const { return isImmTy(ImmTyCPol); } + // "CPol_GLC1" is a MatchClass of the CPOL_GLC1 operand with the default and + // forced value of the GLC operand. + bool isCPol_GLC1() const { return isImmTy(ImmTyCPol); } bool isSWZ() const { return isImmTy(ImmTySWZ); } bool isTFE() const { return isImmTy(ImmTyTFE); } bool isD16() const { return isImmTy(ImmTyD16); } @@ -837,9 +833,7 @@ case ImmTyInstOffset: OS << "InstOffset"; break; case ImmTyOffset0: OS << "Offset0"; break; case ImmTyOffset1: OS << "Offset1"; break; - case ImmTyDLC: OS << "DLC"; break; - case ImmTyGLC: OS << "GLC"; break; - case ImmTySLC: OS << "SLC"; break; + case ImmTyCPol: OS << "CPol"; break; case ImmTySWZ: OS << "SWZ"; break; case ImmTyTFE: OS << "TFE"; break; case ImmTyD16: OS << "D16"; break; @@ -1105,7 +1099,7 @@ bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth); void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, - bool IsAtomic, bool IsAtomicReturn, bool IsLds = false); + bool IsAtomic, bool IsLds = false); void cvtDSImpl(MCInst &Inst, const OperandVector &Operands, bool IsGdsHardcoded); @@ -1296,6 +1290,7 @@ OperandMatchResultTy parseNamedBit(StringRef Name, OperandVector &Operands, AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone); + OperandMatchResultTy parseCPol(OperandVector &Operands); OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value, SMLoc &StringLoc); @@ -1452,16 +1447,13 @@ OperandMatchResultTy parseGPRIdxMode(OperandVector &Operands); int64_t parseGPRIdxMacro(); - void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); } - void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); } - void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); } - void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); } + void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false); } + void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true); } + void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, true); } void cvtMtbuf(MCInst &Inst, const OperandVector &Operands); - AMDGPUOperand::Ptr defaultDLC() const; - AMDGPUOperand::Ptr defaultGLC() const; - AMDGPUOperand::Ptr defaultGLC_1() const; - AMDGPUOperand::Ptr defaultSLC() const; + AMDGPUOperand::Ptr defaultCPol() const; + AMDGPUOperand::Ptr defaultCPol_GLC1() const; AMDGPUOperand::Ptr defaultSMRDOffset8() const; AMDGPUOperand::Ptr defaultSMEMOffset() const; @@ -1483,6 +1475,8 @@ void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands); void cvtIntersectRay(MCInst &Inst, const OperandVector &Operands); + void cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands); + bool parseDimId(unsigned &Encoding); OperandMatchResultTy parseDim(OperandVector &Operands); OperandMatchResultTy parseDPP8(OperandVector &Operands); @@ -3807,15 +3801,29 @@ bool AMDGPUAsmParser::validateCoherencyBits(const MCInst &Inst, const OperandVector &Operands, const SMLoc &IDLoc) { - int GLCPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), - AMDGPU::OpName::glc1); - if (GLCPos != -1) { - // -1 is set by GLC_1 default operand. In all cases "glc" must be present - // in the asm string, and the default value means it is not present. - if (Inst.getOperand(GLCPos).getImm() == -1) { + uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags; + if (!(TSFlags & (SIInstrFlags::IsAtomicNoRet | SIInstrFlags::IsAtomicRet))) + return true; + + int CPolPos = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), + AMDGPU::OpName::cpol); + if (CPolPos == -1) + return true; + + if (TSFlags & SIInstrFlags::IsAtomicRet) { + if (!(TSFlags & SIInstrFlags::MIMG) && + !(Inst.getOperand(CPolPos).getImm() & CPol::GLC)) { Error(IDLoc, "instruction must use glc"); return false; } + } else { + if (Inst.getOperand(CPolPos).getImm() & CPol::GLC) { + SMLoc S = getImmLoc(AMDGPUOperand::ImmTyCPol, Operands); + StringRef CStr(S.getPointer()); + S = SMLoc::getFromPointer(&CStr.data()[CStr.find("glc")]); + Error(S, "instruction must not use glc"); + return false; + } } return true; @@ -5071,10 +5079,6 @@ Error(S, "a16 modifier is not supported on this GPU"); return MatchOperand_ParseFail; } - if (!isGFX10Plus() && ImmTy == AMDGPUOperand::ImmTyDLC) { - Error(S, "dlc modifier is not supported on this GPU"); - return MatchOperand_ParseFail; - } if (isGFX9() && ImmTy == AMDGPUOperand::ImmTyA16) ImmTy = AMDGPUOperand::ImmTyR128A16; @@ -5083,6 +5087,46 @@ return MatchOperand_Success; } +OperandMatchResultTy +AMDGPUAsmParser::parseCPol(OperandVector &Operands) { + unsigned CPolOn = 0; + unsigned CPolOff = 0; + SMLoc S = getLoc(); + + if (trySkipId("glc")) + CPolOn = AMDGPU::CPol::GLC; + else if (trySkipId("noglc")) + CPolOff = AMDGPU::CPol::GLC; + else if (trySkipId("slc")) + CPolOn = AMDGPU::CPol::SLC; + else if (trySkipId("noslc")) + CPolOff = AMDGPU::CPol::SLC; + else if (trySkipId("dlc")) + CPolOn = AMDGPU::CPol::DLC; + else if (trySkipId("nodlc")) + CPolOff = AMDGPU::CPol::DLC; + else + return MatchOperand_NoMatch; + + if (!isGFX10Plus() && ((CPolOn | CPolOff) & AMDGPU::CPol::DLC)) { + Error(S, "dlc modifier is not supported on this GPU"); + return MatchOperand_ParseFail; + } + + for (unsigned I = 1; I != Operands.size(); ++I) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]); + if (Op.isCPol()) { + Op.setImm((Op.getImm() | CPolOn) & ~CPolOff); + return MatchOperand_Success; + } + } + + Operands.push_back(AMDGPUOperand::CreateImm(this, CPolOn, S, + AMDGPUOperand::ImmTyCPol)); + + return MatchOperand_Success; +} + static void addOptionalImmOperand( MCInst& Inst, const OperandVector& Operands, AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx, @@ -6500,32 +6544,43 @@ // mubuf //===----------------------------------------------------------------------===// -AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDLC() const { - return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDLC); -} - -AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const { - return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC); -} - -AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC_1() const { - return AMDGPUOperand::CreateImm(this, -1, SMLoc(), AMDGPUOperand::ImmTyGLC); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultCPol() const { + return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyCPol); } -AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const { - return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC); +AMDGPUOperand::Ptr AMDGPUAsmParser::defaultCPol_GLC1() const { + return AMDGPUOperand::CreateImm(this, CPol::GLC, SMLoc(), + AMDGPUOperand::ImmTyCPol); } void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst, - const OperandVector &Operands, - bool IsAtomic, - bool IsAtomicReturn, - bool IsLds) { + const OperandVector &Operands, + bool IsAtomic, + bool IsLds) { bool IsLdsOpcode = IsLds; bool HasLdsModifier = false; OptionalImmIndexMap OptionalIdx; - assert(IsAtomicReturn ? IsAtomic : true); unsigned FirstOperandIdx = 1; + bool IsAtomicReturn = false; + + if (IsAtomic) { + for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + if (!Op.isCPol()) + continue; + IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC; + break; + } + + if (!IsAtomicReturn) { + int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode()); + if (NewOpc != -1) + Inst.setOpcode(NewOpc); + } + + IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags & + SIInstrFlags::IsAtomicRet; + } for (unsigned i = FirstOperandIdx, e = Operands.size(); i != e; ++i) { AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); @@ -6576,18 +6631,11 @@ } addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); - if (!IsAtomic || IsAtomicReturn) { - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC, - IsAtomicReturn ? -1 : 0); - } - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0); if (!IsLdsOpcode) { // tfe is not legal with lds opcodes addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); } - - if (isGFX10Plus()) - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC); } void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) { @@ -6622,12 +6670,8 @@ addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyFORMAT); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0); addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE); - - if (isGFX10Plus()) - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC); } //===----------------------------------------------------------------------===// @@ -6669,10 +6713,7 @@ if (IsGFX10Plus) addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDim, -1); addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm); - if (IsGFX10Plus) - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC); - addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol); addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128A16); if (IsGFX10Plus) addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyA16); @@ -6687,6 +6728,61 @@ cvtMIMG(Inst, Operands, true); } +void AMDGPUAsmParser::cvtSMEMAtomic(MCInst &Inst, const OperandVector &Operands) { + OptionalImmIndexMap OptionalIdx; + bool IsAtomicReturn = false; + + for (unsigned i = 1, e = Operands.size(); i != e; ++i) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + if (!Op.isCPol()) + continue; + IsAtomicReturn = Op.getImm() & AMDGPU::CPol::GLC; + break; + } + + if (!IsAtomicReturn) { + int NewOpc = AMDGPU::getAtomicNoRetOp(Inst.getOpcode()); + if (NewOpc != -1) + Inst.setOpcode(NewOpc); + } + + IsAtomicReturn = MII.get(Inst.getOpcode()).TSFlags & + SIInstrFlags::IsAtomicRet; + + for (unsigned i = 1, e = Operands.size(); i != e; ++i) { + AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]); + + // Add the register arguments + if (Op.isReg()) { + Op.addRegOperands(Inst, 1); + if (IsAtomicReturn && i == 1) + Op.addRegOperands(Inst, 1); + continue; + } + + // Handle the case where soffset is an immediate + if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) { + Op.addImmOperands(Inst, 1); + continue; + } + + // Handle tokens like 'offen' which are sometimes hard-coded into the + // asm string. There are no MCInst operands for these. + if (Op.isToken()) { + continue; + } + assert(Op.isImm()); + + // Handle optional arguments + OptionalIdx[Op.getImmTy()] = i; + } + + if ((int)Inst.getNumOperands() <= + AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset)) + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset); + addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyCPol, 0); +} + void AMDGPUAsmParser::cvtIntersectRay(MCInst &Inst, const OperandVector &Operands) { for (unsigned I = 1; I < Operands.size(); ++I) { @@ -6783,9 +6879,7 @@ {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr}, {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr}, {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr}, - {"dlc", AMDGPUOperand::ImmTyDLC, true, nullptr}, - {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr}, - {"slc", AMDGPUOperand::ImmTySLC, true, nullptr}, + {"", AMDGPUOperand::ImmTyCPol, false, nullptr}, {"swz", AMDGPUOperand::ImmTySWZ, true, nullptr}, {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr}, {"d16", AMDGPUOperand::ImmTyD16, true, nullptr}, @@ -6868,6 +6962,8 @@ Op.ConvertResult); } else if (Op.Type == AMDGPUOperand::ImmTyDim) { res = parseDim(Operands); + } else if (Op.Type == AMDGPUOperand::ImmTyCPol) { + res = parseCPol(Operands); } else { res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult); } @@ -7722,8 +7818,6 @@ return Operand.isGDS() ? Match_Success : Match_InvalidOperand; case MCK_lds: return Operand.isLDS() ? Match_Success : Match_InvalidOperand; - case MCK_glc: - return Operand.isGLC() ? Match_Success : Match_InvalidOperand; case MCK_idxen: return Operand.isIdxen() ? Match_Success : Match_InvalidOperand; case MCK_offen: Index: llvm/lib/Target/AMDGPU/BUFInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/BUFInstructions.td +++ llvm/lib/Target/AMDGPU/BUFInstructions.td @@ -122,13 +122,11 @@ let TSFlags = ps.TSFlags; bits<12> offset; - bits<1> glc; - bits<1> dlc; + bits<3> cpol; bits<7> format; bits<8> vaddr; bits<8> vdata; bits<7> srsrc; - bits<1> slc; bits<1> tfe; bits<8> soffset; @@ -142,17 +140,17 @@ RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList)); dag InsNoData = !if(!empty(vaddrList), (ins SReg_128:$srsrc, SCSrc_b32:$soffset, - offset:$offset, FORMAT:$format, GLC:$glc, SLC:$slc, TFE:$tfe, DLC:$dlc, SWZ:$swz), + offset:$offset, FORMAT:$format, CPol:$cpol, TFE:$tfe, SWZ:$swz), (ins vaddrClass:$vaddr, SReg_128:$srsrc, SCSrc_b32:$soffset, - offset:$offset, FORMAT:$format, GLC:$glc, SLC:$slc, TFE:$tfe, DLC:$dlc, SWZ:$swz) + offset:$offset, FORMAT:$format, CPol:$cpol, TFE:$tfe, SWZ:$swz) ); dag InsData = !if(!empty(vaddrList), (ins vdataClass:$vdata, SReg_128:$srsrc, - SCSrc_b32:$soffset, offset:$offset, FORMAT:$format, GLC:$glc, - SLC:$slc, TFE:$tfe, DLC:$dlc, SWZ:$swz), + SCSrc_b32:$soffset, offset:$offset, FORMAT:$format, CPol:$cpol, + TFE:$tfe, SWZ:$swz), (ins vdataClass:$vdata, vaddrClass:$vaddr, SReg_128:$srsrc, - SCSrc_b32:$soffset, offset:$offset, FORMAT:$format, GLC:$glc, - SLC:$slc, TFE:$tfe, DLC:$dlc, SWZ:$swz) + SCSrc_b32:$soffset, offset:$offset, FORMAT:$format, CPol:$cpol, + TFE:$tfe, SWZ:$swz) ); dag ret = !if(!empty(vdataList), InsNoData, InsData); } @@ -204,7 +202,7 @@ : MTBUF_Pseudo.ret, - " $vdata, " # getMTBUFAsmOps.ret # "$glc$slc$tfe$dlc$swz", + " $vdata, " # getMTBUFAsmOps.ret # "$cpol$tfe$swz", pattern>, MTBUF_SetupAddr { let PseudoInstr = opName # "_" # getAddrName.ret; @@ -220,13 +218,13 @@ def _OFFSET : MTBUF_Load_Pseudo , + CPol:$cpol, i1:$tfe, i1:$swz)))]>, MTBUFAddr64Table<0, NAME>; def _ADDR64 : MTBUF_Load_Pseudo , + i8:$format, CPol:$cpol, i1:$tfe, i1:$swz)))]>, MTBUFAddr64Table<1, NAME>; def _OFFEN : MTBUF_Load_Pseudo ; @@ -252,7 +250,7 @@ : MTBUF_Pseudo.ret, - " $vdata, " # getMTBUFAsmOps.ret # "$glc$slc$tfe$dlc$swz", + " $vdata, " # getMTBUFAsmOps.ret # "$cpol$tfe$swz", pattern>, MTBUF_SetupAddr { let PseudoInstr = opName # "_" # getAddrName.ret; @@ -267,14 +265,14 @@ def _OFFSET : MTBUF_Store_Pseudo , + i16:$offset, i8:$format, CPol:$cpol, + i1:$tfe, i1:$swz))]>, MTBUFAddr64Table<0, NAME>; def _ADDR64 : MTBUF_Store_Pseudo , + i16:$offset, i8:$format, CPol:$cpol, + i1:$tfe, i1:$swz))]>, MTBUFAddr64Table<1, NAME>; def _OFFEN : MTBUF_Store_Pseudo ; @@ -359,12 +357,10 @@ let UseNamedOperandTable = ps.UseNamedOperandTable; bits<12> offset; - bits<1> glc; - bits<1> dlc; + bits<3> cpol; bits<8> vaddr; bits<8> vdata; bits<7> srsrc; - bits<1> slc; bits<1> tfe; bits<8> soffset; } @@ -404,19 +400,19 @@ RegisterClass vaddrClass = !if(!empty(vaddrList), ?, !head(vaddrList)); dag InsNoData = !if(!empty(vaddrList), (ins SReg_128:$srsrc, SCSrc_b32:$soffset, - offset:$offset, GLC:$glc, SLC:$slc), + offset:$offset, CPol:$cpol), (ins vaddrClass:$vaddr, SReg_128:$srsrc, SCSrc_b32:$soffset, - offset:$offset, GLC:$glc, SLC:$slc) + offset:$offset, CPol:$cpol) ); dag InsData = !if(!empty(vaddrList), (ins vdataClass:$vdata, SReg_128:$srsrc, - SCSrc_b32:$soffset, offset:$offset, GLC:$glc, SLC:$slc), + SCSrc_b32:$soffset, offset:$offset, CPol:$cpol), (ins vdataClass:$vdata, vaddrClass:$vaddr, SReg_128:$srsrc, - SCSrc_b32:$soffset, offset:$offset, GLC:$glc, SLC:$slc) + SCSrc_b32:$soffset, offset:$offset, CPol:$cpol) ); dag ret = !con( !if(!empty(vdataList), InsNoData, InsData), - !if(isLds, (ins DLC:$dlc, SWZ:$swz), (ins TFE:$tfe, DLC:$dlc,SWZ:$swz)) + !if(isLds, (ins SWZ:$swz), (ins TFE:$tfe, SWZ:$swz)) ); } @@ -487,8 +483,8 @@ (outs getVregSrcForVT.ret:$vdata), !con(getMUBUFIns.ret, !if(HasTiedDest, (ins getVregSrcForVT.ret:$vdata_in), (ins))), - " $vdata, " # getMUBUFAsmOps.ret # "$glc$slc" # - !if(isLds, " lds", "$tfe") # "$dlc$swz", + " $vdata, " # getMUBUFAsmOps.ret # "$cpol" # + !if(isLds, " lds", "$tfe") # "$swz", pattern>, MUBUF_SetupAddr { let PseudoInstr = opName # !if(isLds, "_lds", "") # @@ -506,15 +502,15 @@ } class MUBUF_Offset_Load_Pat : Pat < - (load_vt (ld (MUBUFOffset v4i32:$srsrc, i32:$soffset, i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz))), - (load_vt (inst v4i32:$srsrc, i32:$soffset, i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz)) + (load_vt (ld (MUBUFOffset v4i32:$srsrc, i32:$soffset, i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))), + (load_vt (inst v4i32:$srsrc, i32:$soffset, i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz)) >; class MUBUF_Addr64_Load_Pat : Pat < - (load_vt (ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz))), - (load_vt (inst i64:$vaddr, v4i32:$srsrc, i32:$soffset, i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz)) + (load_vt (ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))), + (load_vt (inst i64:$vaddr, v4i32:$srsrc, i32:$soffset, i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz)) >; multiclass MUBUF_Pseudo_Load_Pats { @@ -567,7 +563,7 @@ : MUBUF_Pseudo.ret]>.ret, - " $vdata, " # getMUBUFAsmOps.ret # "$glc$slc$tfe$dlc$swz", + " $vdata, " # getMUBUFAsmOps.ret # "$cpol$tfe$swz", pattern>, MUBUF_SetupAddr { let PseudoInstr = opName # "_" # getAddrName.ret; @@ -585,12 +581,12 @@ def _OFFSET : MUBUF_Store_Pseudo , + i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))]>, MUBUFAddr64Table<0, NAME>; def _ADDR64 : MUBUF_Store_Pseudo , + i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))]>, MUBUFAddr64Table<1, NAME>; def _OFFEN : MUBUF_Store_Pseudo ; @@ -608,8 +604,8 @@ class MUBUF_Pseudo_Store_Lds : MUBUF_Pseudo { + (ins SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, CPol:$cpol, SWZ:$swz), + " $srsrc, $soffset$offset lds$cpol$swz"> { let mayLoad = 0; let mayStore = 1; let maybeAtomic = 1; @@ -629,15 +625,15 @@ dag ret = !if(vdata_in, !if(!empty(vaddrList), (ins vdataClass:$vdata_in, - SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, GLC_1:$glc1, SLC:$slc), + SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, CPol_GLC1:$cpol), (ins vdataClass:$vdata_in, vaddrClass:$vaddr, - SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, GLC_1:$glc1, SLC:$slc) + SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, CPol_GLC1:$cpol) ), !if(!empty(vaddrList), (ins vdataClass:$vdata, - SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, SLC:$slc), + SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, CPol_0:$cpol), (ins vdataClass:$vdata, vaddrClass:$vaddr, - SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, SLC:$slc) + SReg_128:$srsrc, SCSrc_b32:$soffset, offset:$offset, CPol_0:$cpol) )); } @@ -679,6 +675,7 @@ let has_dlc = 0; let has_tfe = 0; let maybeAtomic = 1; + let AsmMatchConverter = "cvtMubufAtomic"; } class MUBUF_AtomicNoRet_Pseudo.ret, - " $vdata, " # getMUBUFAsmOps.ret # "$slc", + " $vdata, " # getMUBUFAsmOps.ret # "$cpol", pattern>, AtomicNoRet.ret, 0> { let PseudoInstr = opName # "_" # getAddrName.ret; let glc_value = 0; let dlc_value = 0; let IsAtomicNoRet = 1; - let AsmMatchConverter = "cvtMubufAtomic"; } class MUBUF_AtomicRet_Pseudo.ret, - " $vdata, " # getMUBUFAsmOps.ret # "$glc1$slc", + " $vdata, " # getMUBUFAsmOps.ret # "$cpol", pattern>, AtomicNoRet.ret, 1> { let PseudoInstr = opName # "_rtn_" # getAddrName.ret; @@ -718,7 +714,6 @@ let IsAtomicRet = 1; let Constraints = "$vdata = $vdata_in"; let DisableEncoding = "$vdata_in"; - let AsmMatchConverter = "cvtMubufAtomicReturn"; } multiclass MUBUF_Pseudo_Atomics_NO_RTN , MUBUFAddr64Table <0, NAME # "_RTN">; let FPAtomic = isFP in def _ADDR64_RTN : MUBUF_AtomicRet_Pseudo , + (atomic (MUBUFAddr64Atomic v4i32:$srsrc, i64:$vaddr, i32:$soffset, i16:$offset, + CPol_GLC1:$cpol), vdataType:$vdata_in))]>, MUBUFAddr64Table <1, NAME # "_RTN">; let FPAtomic = isFP in @@ -1177,24 +1172,21 @@ (vt (st v4i32:$rsrc, 0, 0, i32:$soffset, timm:$offset, timm:$auxiliary, 0)), (!cast(opcode # _OFFSET) SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< (vt (st v4i32:$rsrc, 0, i32:$voffset, i32:$soffset, timm:$offset, timm:$auxiliary, 0)), (!cast(opcode # _OFFEN) VGPR_32:$voffset, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< (vt (st v4i32:$rsrc, i32:$vindex, 0, i32:$soffset, timm:$offset, timm:$auxiliary, timm)), (!cast(opcode # _IDXEN) VGPR_32:$vindex, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1203,8 +1195,7 @@ (!cast(opcode # _BOTHEN) (REG_SEQUENCE VReg_64, VGPR_32:$vindex, sub0, VGPR_32:$voffset, sub1), SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; } @@ -1263,26 +1254,21 @@ (st vt:$vdata, v4i32:$rsrc, 0, 0, i32:$soffset, timm:$offset, timm:$auxiliary, 0), (!cast(opcode # _OFFSET_exact) getVregSrcForVT.ret:$vdata, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< (st vt:$vdata, v4i32:$rsrc, 0, i32:$voffset, i32:$soffset, timm:$offset, timm:$auxiliary, 0), (!cast(opcode # _OFFEN_exact) getVregSrcForVT.ret:$vdata, VGPR_32:$voffset, SReg_128:$rsrc, SCSrc_b32:$soffset, - (as_i16timm $offset), (extract_glc $auxiliary), - (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (as_i16timm $offset), (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< (st vt:$vdata, v4i32:$rsrc, i32:$vindex, 0, i32:$soffset, timm:$offset, timm:$auxiliary, timm), (!cast(opcode # _IDXEN_exact) getVregSrcForVT.ret:$vdata, VGPR_32:$vindex, SReg_128:$rsrc, SCSrc_b32:$soffset, - (as_i16timm $offset), (extract_glc $auxiliary), - (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (as_i16timm $offset), (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1291,9 +1277,8 @@ (!cast(opcode # _BOTHEN_exact) getVregSrcForVT.ret:$vdata, (REG_SEQUENCE VReg_64, VGPR_32:$vindex, sub0, VGPR_32:$voffset, sub1), - SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (extract_glc $auxiliary), - (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (extract_cpol $auxiliary), + 0, (extract_swz $auxiliary)) >; } @@ -1513,21 +1498,21 @@ class MUBUFLoad_PatternADDR64 : GCNPat < (vt (constant_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, - i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz))), - (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, $glc, $slc, $tfe, $dlc, $swz) + i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))), + (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, CPol:$cpol, $tfe, $swz) >; multiclass MUBUFLoad_Atomic_Pattern { def : GCNPat < (vt (atomic_ld (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, - i16:$offset, i1:$slc))), - (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, 0, $slc, 0, 0, 0) + i16:$offset, CPol:$cpol))), + (Instr_ADDR64 $vaddr, $srsrc, $soffset, $offset, 0, $cpol, 0) >; def : GCNPat < (vt (atomic_ld (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset))), - (Instr_OFFSET $rsrc, $soffset, (as_i16imm $offset), 0, 0, 0, 0, 0) + (Instr_OFFSET $rsrc, $soffset, (as_i16imm $offset), 0, 0, 0) >; } @@ -1548,8 +1533,8 @@ def : GCNPat < (vt (ld (MUBUFOffset v4i32:$srsrc, i32:$soffset, - i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz))), - (Instr_OFFSET $srsrc, $soffset, $offset, $glc, $slc, $tfe, $dlc, $swz) + i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz))), + (Instr_OFFSET $srsrc, $soffset, $offset, CPol:$cpol, $tfe, $swz) >; } @@ -1572,12 +1557,12 @@ def : GCNPat < (vt (ld (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr, i32:$soffset, u16imm:$offset))), - (InstrOffen $vaddr, $srsrc, $soffset, $offset, 0, 0, 0, 0, 0) + (InstrOffen $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) >; def : GCNPat < (vt (ld (MUBUFScratchOffset v4i32:$srsrc, i32:$soffset, u16imm:$offset))), - (InstrOffset $srsrc, $soffset, $offset, 0, 0, 0, 0, 0) + (InstrOffset $srsrc, $soffset, $offset, 0, 0, 0) >; } @@ -1587,12 +1572,12 @@ ValueType vt, PatFrag ld_frag> { def : GCNPat < (ld_frag (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr, i32:$soffset, u16imm:$offset), vt:$in), - (InstrOffen $vaddr, $srsrc, $soffset, $offset, 0, 0, 0, 0, 0, $in) + (InstrOffen $vaddr, $srsrc, $soffset, $offset, 0, 0, 0, $in) >; def : GCNPat < (ld_frag (MUBUFScratchOffset v4i32:$srsrc, i32:$soffset, u16imm:$offset), vt:$in), - (InstrOffset $srsrc, $soffset, $offset, 0, 0, 0, 0, 0, $in) + (InstrOffset $srsrc, $soffset, $offset, 0, 0, 0, $in) >; } @@ -1638,13 +1623,13 @@ // Store follows atomic op convention so address is first def : GCNPat < (atomic_st (MUBUFAddr64 v4i32:$srsrc, i64:$vaddr, i32:$soffset, - i16:$offset, i1:$slc), vt:$val), - (Instr_ADDR64 $val, $vaddr, $srsrc, $soffset, $offset, 0, $slc, 0, 0, 0) + i16:$offset, CPol:$cpol), vt:$val), + (Instr_ADDR64 $val, $vaddr, $srsrc, $soffset, $offset, 0, $cpol, 0) >; def : GCNPat < (atomic_st (MUBUFOffsetNoGLC v4i32:$rsrc, i32:$soffset, i16:$offset), vt:$val), - (Instr_OFFSET $val, $rsrc, $soffset, (as_i16imm $offset), 0, 0, 0, 0, 0) + (Instr_OFFSET $val, $rsrc, $soffset, (as_i16imm $offset), 0, 0, 0) >; } let SubtargetPredicate = isGFX6GFX7 in { @@ -1658,8 +1643,8 @@ def : GCNPat < (st vt:$vdata, (MUBUFOffset v4i32:$srsrc, i32:$soffset, - i16:$offset, i1:$glc, i1:$slc, i1:$tfe, i1:$dlc, i1:$swz)), - (Instr_OFFSET $vdata, $srsrc, $soffset, $offset, $glc, $slc, $tfe, $dlc, $swz) + i16:$offset, CPol:$cpol, i1:$tfe, i1:$swz)), + (Instr_OFFSET $vdata, $srsrc, $soffset, $offset, CPol:$cpol, $tfe, $swz) >; } @@ -1673,13 +1658,13 @@ def : GCNPat < (st vt:$value, (MUBUFScratchOffen v4i32:$srsrc, i32:$vaddr, i32:$soffset, u16imm:$offset)), - (InstrOffen rc:$value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0, 0, 0) + (InstrOffen rc:$value, $vaddr, $srsrc, $soffset, $offset, 0, 0, 0) >; def : GCNPat < (st vt:$value, (MUBUFScratchOffset v4i32:$srsrc, i32:$soffset, u16imm:$offset)), - (InstrOffset rc:$value, $srsrc, $soffset, $offset, 0, 0, 0, 0, 0) + (InstrOffset rc:$value, $srsrc, $soffset, $offset, 0, 0, 0) >; } @@ -1725,8 +1710,7 @@ timm:$format, timm:$auxiliary, 0)), (!cast(opcode # _OFFSET) SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1734,8 +1718,7 @@ timm:$format, timm:$auxiliary, timm)), (!cast(opcode # _IDXEN) VGPR_32:$vindex, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1743,8 +1726,7 @@ timm:$format, timm:$auxiliary, 0)), (!cast(opcode # _OFFEN) VGPR_32:$voffset, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1754,8 +1736,7 @@ (REG_SEQUENCE VReg_64, VGPR_32:$vindex, sub0, VGPR_32:$voffset, sub1), SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; } @@ -1793,8 +1774,7 @@ timm:$format, timm:$auxiliary, 0), (!cast(opcode # _OFFSET_exact) getVregSrcForVT.ret:$vdata, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1802,8 +1782,7 @@ timm:$format, timm:$auxiliary, timm), (!cast(opcode # _IDXEN_exact) getVregSrcForVT.ret:$vdata, VGPR_32:$vindex, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1811,8 +1790,7 @@ timm:$format, timm:$auxiliary, 0), (!cast(opcode # _OFFEN_exact) getVregSrcForVT.ret:$vdata, VGPR_32:$voffset, SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; def : GCNPat< @@ -1822,8 +1800,7 @@ getVregSrcForVT.ret:$vdata, (REG_SEQUENCE VReg_64, VGPR_32:$vindex, sub0, VGPR_32:$voffset, sub1), SReg_128:$rsrc, SCSrc_b32:$soffset, (as_i16timm $offset), (as_i8timm $format), - (extract_glc $auxiliary), (extract_slc $auxiliary), 0, (extract_dlc $auxiliary), - (extract_swz $auxiliary)) + (extract_cpol $auxiliary), 0, (extract_swz $auxiliary)) >; } @@ -1865,21 +1842,21 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{16} = ps.lds; let Inst{24-18} = op; let Inst{31-26} = 0x38; let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); let Inst{47-40} = !if(ps.has_vdata, vdata, ?); let Inst{52-48} = !if(ps.has_srsrc, srsrc{6-2}, ?); - let Inst{54} = !if(ps.has_slc, slc, ?); + let Inst{54} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{55} = !if(ps.has_tfe, tfe, ?); let Inst{63-56} = !if(ps.has_soffset, soffset, ?); } class MUBUF_Real_gfx10 op, MUBUF_Pseudo ps> : Base_MUBUF_Real_gfx6_gfx7_gfx10 { - let Inst{15} = !if(ps.has_dlc, dlc, ps.dlc_value); + let Inst{15} = !if(ps.has_dlc, cpol{CPolBit.DLC}, ps.dlc_value); let Inst{25} = op{7}; } @@ -1931,16 +1908,33 @@ } multiclass MUBUF_Real_Atomics_RTN_gfx10 op> { def _BOTHEN_RTN_gfx10 : - MUBUF_Real_gfx10(NAME#"_BOTHEN_RTN")>; + MUBUF_Real_gfx10(NAME#"_BOTHEN_RTN")>, + AtomicNoRet; def _IDXEN_RTN_gfx10 : - MUBUF_Real_gfx10(NAME#"_IDXEN_RTN")>; + MUBUF_Real_gfx10(NAME#"_IDXEN_RTN")>, + AtomicNoRet; def _OFFEN_RTN_gfx10 : - MUBUF_Real_gfx10(NAME#"_OFFEN_RTN")>; + MUBUF_Real_gfx10(NAME#"_OFFEN_RTN")>, + AtomicNoRet; def _OFFSET_RTN_gfx10 : - MUBUF_Real_gfx10(NAME#"_OFFSET_RTN")>; + MUBUF_Real_gfx10(NAME#"_OFFSET_RTN")>, + AtomicNoRet; } multiclass MUBUF_Real_Atomics_gfx10 op> : - MUBUF_Real_AllAddr_gfx10, MUBUF_Real_Atomics_RTN_gfx10; + MUBUF_Real_Atomics_RTN_gfx10 { + def _BOTHEN_gfx10 : + MUBUF_Real_gfx10(NAME#"_BOTHEN")>, + AtomicNoRet; + def _IDXEN_gfx10 : + MUBUF_Real_gfx10(NAME#"_IDXEN")>, + AtomicNoRet; + def _OFFEN_gfx10 : + MUBUF_Real_gfx10(NAME#"_OFFEN")>, + AtomicNoRet; + def _OFFSET_gfx10 : + MUBUF_Real_gfx10(NAME#"_OFFSET")>, + AtomicNoRet; + } } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" defm BUFFER_STORE_BYTE_D16_HI : MUBUF_Real_AllAddr_gfx10<0x019>; @@ -2020,18 +2014,38 @@ def _LDS_BOTHEN_gfx6_gfx7 : MUBUF_Real_gfx6_gfx7(NAME#"_LDS_BOTHEN")>, MUBUFLdsTable<1, NAME # "_BOTHEN_gfx6_gfx7">; } - multiclass MUBUF_Real_Atomics_gfx6_gfx7 op> : - MUBUF_Real_AllAddr_gfx6_gfx7 { + multiclass MUBUF_Real_Atomics_gfx6_gfx7 op> { + def _ADDR64_gfx6_gfx7 : + MUBUF_Real_gfx6_gfx7(NAME#"_ADDR64")>, + AtomicNoRet; + def _BOTHEN_gfx6_gfx7 : + MUBUF_Real_gfx6_gfx7(NAME#"_BOTHEN")>, + AtomicNoRet; + def _IDXEN_gfx6_gfx7 : + MUBUF_Real_gfx6_gfx7(NAME#"_IDXEN")>, + AtomicNoRet; + def _OFFEN_gfx6_gfx7 : + MUBUF_Real_gfx6_gfx7(NAME#"_OFFEN")>, + AtomicNoRet; + def _OFFSET_gfx6_gfx7 : + MUBUF_Real_gfx6_gfx7(NAME#"_OFFSET")>, + AtomicNoRet; + def _ADDR64_RTN_gfx6_gfx7 : - MUBUF_Real_gfx6_gfx7(NAME#"_ADDR64_RTN")>; + MUBUF_Real_gfx6_gfx7(NAME#"_ADDR64_RTN")>, + AtomicNoRet; def _BOTHEN_RTN_gfx6_gfx7 : - MUBUF_Real_gfx6_gfx7(NAME#"_BOTHEN_RTN")>; + MUBUF_Real_gfx6_gfx7(NAME#"_BOTHEN_RTN")>, + AtomicNoRet; def _IDXEN_RTN_gfx6_gfx7 : - MUBUF_Real_gfx6_gfx7(NAME#"_IDXEN_RTN")>; + MUBUF_Real_gfx6_gfx7(NAME#"_IDXEN_RTN")>, + AtomicNoRet; def _OFFEN_RTN_gfx6_gfx7 : - MUBUF_Real_gfx6_gfx7(NAME#"_OFFEN_RTN")>; + MUBUF_Real_gfx6_gfx7(NAME#"_OFFEN_RTN")>, + AtomicNoRet; def _OFFSET_RTN_gfx6_gfx7 : - MUBUF_Real_gfx6_gfx7(NAME#"_OFFSET_RTN")>; + MUBUF_Real_gfx6_gfx7(NAME#"_OFFSET_RTN")>, + AtomicNoRet; } } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" @@ -2120,13 +2134,13 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{18-16} = op; let Inst{31-26} = 0x3a; //encoding let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); let Inst{47-40} = !if(ps.has_vdata, vdata, ?); let Inst{52-48} = !if(ps.has_srsrc, srsrc{6-2}, ?); - let Inst{54} = !if(ps.has_slc, slc, ?); + let Inst{54} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{55} = !if(ps.has_tfe, tfe, ?); let Inst{63-56} = !if(ps.has_soffset, soffset, ?); } @@ -2137,7 +2151,7 @@ class MTBUF_Real_gfx10 op, MTBUF_Pseudo ps> : Base_MTBUF_Real_gfx6_gfx7_gfx10 { - let Inst{15} = !if(ps.has_dlc, dlc, ps.dlc_value); + let Inst{15} = !if(ps.has_dlc, cpol{CPolBit.DLC}, ps.dlc_value); let Inst{25-19} = format; let Inst{53} = op{3}; } @@ -2216,9 +2230,9 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{16} = ps.lds; - let Inst{17} = !if(ps.has_slc, slc, ?); + let Inst{17} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{24-18} = op; let Inst{31-26} = 0x38; //encoding let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); @@ -2266,9 +2280,9 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{16} = ps.lds; - let Inst{17} = !if(ps.has_slc, slc, ?); + let Inst{17} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{24-18} = op; let Inst{31-26} = 0x38; //encoding let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); @@ -2285,12 +2299,23 @@ def _BOTHEN_gfx80 : MUBUF_Real_gfx80 (NAME#"_BOTHEN")>; } -multiclass MUBUF_Real_Atomic_vi op> : - MUBUF_Real_AllAddr_vi { - def _OFFSET_RTN_vi : MUBUF_Real_vi (NAME#"_OFFSET_RTN")>; - def _OFFEN_RTN_vi : MUBUF_Real_vi (NAME#"_OFFEN_RTN")>; - def _IDXEN_RTN_vi : MUBUF_Real_vi (NAME#"_IDXEN_RTN")>; - def _BOTHEN_RTN_vi : MUBUF_Real_vi (NAME#"_BOTHEN_RTN")>; +multiclass MUBUF_Real_Atomic_vi op> { + def _OFFSET_vi : MUBUF_Real_vi (NAME#"_OFFSET")>, + AtomicNoRet; + def _OFFEN_vi : MUBUF_Real_vi (NAME#"_OFFEN")>, + AtomicNoRet; + def _IDXEN_vi : MUBUF_Real_vi (NAME#"_IDXEN")>, + AtomicNoRet; + def _BOTHEN_vi : MUBUF_Real_vi (NAME#"_BOTHEN")>, + AtomicNoRet; + def _OFFSET_RTN_vi : MUBUF_Real_vi (NAME#"_OFFSET_RTN")>, + AtomicNoRet; + def _OFFEN_RTN_vi : MUBUF_Real_vi (NAME#"_OFFEN_RTN")>, + AtomicNoRet; + def _IDXEN_RTN_vi : MUBUF_Real_vi (NAME#"_IDXEN_RTN")>, + AtomicNoRet; + def _BOTHEN_RTN_vi : MUBUF_Real_vi (NAME#"_BOTHEN_RTN")>, + AtomicNoRet; } defm BUFFER_LOAD_FORMAT_X : MUBUF_Real_AllAddr_Lds_vi <0x00>; @@ -2398,7 +2423,7 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{18-15} = op; let Inst{22-19} = dfmt; let Inst{25-23} = nfmt; @@ -2406,7 +2431,7 @@ let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); let Inst{47-40} = !if(ps.has_vdata, vdata, ?); let Inst{52-48} = !if(ps.has_srsrc, srsrc{6-2}, ?); - let Inst{54} = !if(ps.has_slc, slc, ?); + let Inst{54} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{55} = !if(ps.has_tfe, tfe, ?); let Inst{63-56} = !if(ps.has_soffset, soffset, ?); } @@ -2428,7 +2453,7 @@ let Inst{11-0} = !if(ps.has_offset, offset, ?); let Inst{12} = ps.offen; let Inst{13} = ps.idxen; - let Inst{14} = !if(ps.has_glc, glc, ps.glc_value); + let Inst{14} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glc_value); let Inst{18-15} = op; let Inst{22-19} = dfmt; let Inst{25-23} = nfmt; @@ -2436,7 +2461,7 @@ let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); let Inst{47-40} = !if(ps.has_vdata, vdata, ?); let Inst{52-48} = !if(ps.has_srsrc, srsrc{6-2}, ?); - let Inst{54} = !if(ps.has_slc, slc, ?); + let Inst{54} = !if(ps.has_slc, cpol{CPolBit.SLC}, ?); let Inst{55} = !if(ps.has_tfe, tfe, ?); let Inst{63-56} = !if(ps.has_soffset, soffset, ?); } Index: llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp =================================================================== --- llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp +++ llvm/lib/Target/AMDGPU/Disassembler/AMDGPUDisassembler.cpp @@ -379,9 +379,20 @@ } if (Res && (MCII->get(MI.getOpcode()).TSFlags & - (SIInstrFlags::MUBUF | SIInstrFlags::FLAT)) && - AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::glc1) != -1) { - insertNamedMCOperand(MI, MCOperand::createImm(1), AMDGPU::OpName::glc1); + (SIInstrFlags::MUBUF | SIInstrFlags::FLAT | SIInstrFlags::SMRD))) { + int CPolPos = AMDGPU::getNamedOperandIdx(MI.getOpcode(), + AMDGPU::OpName::cpol); + if (CPolPos != -1) { + unsigned CPol = + (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::IsAtomicRet) ? + AMDGPU::CPol::GLC : 0; + if (MI.getNumOperands() <= (unsigned)CPolPos) { + insertNamedMCOperand(MI, MCOperand::createImm(CPol), + AMDGPU::OpName::cpol); + } else if (CPol) { + MI.getOperand(CPolPos).setImm(MI.getOperand(CPolPos).getImm() | CPol); + } + } } if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) { Index: llvm/lib/Target/AMDGPU/FLATInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/FLATInstructions.td +++ llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -92,9 +92,7 @@ bits<7> saddr; bits<8> vdst; - bits<1> slc; - bits<1> glc; - bits<1> dlc; + bits<3> cpol; // Only valid on gfx9 bits<1> lds = 0; // XXX - What does this actually do? @@ -116,8 +114,8 @@ let Inst{13} = lds; let Inst{15-14} = seg; - let Inst{16} = !if(ps.has_glc, glc, ps.glcValue); - let Inst{17} = slc; + let Inst{16} = !if(ps.has_glc, cpol{CPolBit.GLC}, ps.glcValue); + let Inst{17} = cpol{CPolBit.SLC}; let Inst{24-18} = op; let Inst{31-26} = 0x37; // Encoding. let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?); @@ -149,9 +147,9 @@ (ins VReg_64:$vaddr)), (ins flat_offset:$offset)), // FIXME: Operands with default values do not work with following non-optional operands. - !if(HasTiedOutput, (ins GLC:$glc, SLC:$slc, DLC:$dlc, regClass:$vdst_in), - (ins GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc))), - " $vdst, $vaddr"#!if(HasSaddr, !if(EnableSaddr, ", $saddr", ", off"), "")#"$offset$glc$slc$dlc"> { + !if(HasTiedOutput, (ins CPol:$cpol, regClass:$vdst_in), + (ins CPol_0:$cpol))), + " $vdst, $vaddr"#!if(HasSaddr, !if(EnableSaddr, ", $saddr", ", off"), "")#"$offset$cpol"> { let has_data = 0; let mayLoad = 1; let has_saddr = HasSaddr; @@ -171,8 +169,8 @@ !if(EnableSaddr, (ins VGPR_32:$vaddr, vdataClass:$vdata, SReg_64:$saddr), (ins VReg_64:$vaddr, vdataClass:$vdata)), - (ins flat_offset:$offset, GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc)), - " $vaddr, $vdata"#!if(HasSaddr, !if(EnableSaddr, ", $saddr", ", off"), "")#"$offset$glc$slc$dlc"> { + (ins flat_offset:$offset, CPol_0:$cpol)), + " $vaddr, $vdata"#!if(HasSaddr, !if(EnableSaddr, ", $saddr", ", off"), "")#"$offset$cpol"> { let mayLoad = 0; let mayStore = 1; let has_vdst = 0; @@ -196,9 +194,9 @@ opName, (outs regClass:$vdst), !con(!if(EnableSaddr, (ins SReg_64:$saddr), (ins)), - (ins flat_offset:$offset, GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc), + (ins flat_offset:$offset, CPol_0:$cpol), !if(HasTiedOutput, (ins regClass:$vdst_in), (ins))), - " $vdst, "#!if(EnableSaddr, "$saddr", "off")#"$offset$glc$slc$dlc"> { + " $vdst, "#!if(EnableSaddr, "$saddr", "off")#"$offset$cpol"> { let is_flat_global = 1; let has_data = 0; let mayLoad = 1; @@ -234,8 +232,8 @@ opName, (outs), !con(!if(EnableSaddr, (ins vdataClass:$vdata, SReg_64:$saddr), (ins vdataClass:$vdata)), - (ins flat_offset:$offset, GLC:$glc, SLC:$slc, DLC:$dlc)), - " $vdata, "#!if(EnableSaddr, "$saddr", "off")#"$offset$glc$slc$dlc"> { + (ins flat_offset:$offset, CPol:$cpol)), + " $vdata, "#!if(EnableSaddr, "$saddr", "off")#"$offset$cpol"> { let is_flat_global = 1; let mayLoad = 0; let mayStore = 1; @@ -273,9 +271,9 @@ !if(EnableVaddr, (ins VGPR_32:$vaddr, flat_offset:$offset), (ins flat_offset:$offset))), - !if(HasTiedOutput, (ins GLC:$glc, SLC:$slc, DLC:$dlc, regClass:$vdst_in), - (ins GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc))), - " $vdst, "#!if(EnableVaddr, "$vaddr, ", "off, ")#!if(EnableSaddr, "$saddr", "off")#"$offset$glc$slc$dlc"> { + !if(HasTiedOutput, (ins CPol:$cpol, regClass:$vdst_in), + (ins CPol_0:$cpol))), + " $vdst, "#!if(EnableVaddr, "$vaddr, ", "off, ")#!if(EnableSaddr, "$saddr", "off")#"$offset$cpol"> { let has_data = 0; let mayLoad = 1; let has_saddr = 1; @@ -293,11 +291,11 @@ opName, (outs), !if(EnableSaddr, - (ins vdataClass:$vdata, SReg_32_XEXEC_HI:$saddr, flat_offset:$offset, GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc), + (ins vdataClass:$vdata, SReg_32_XEXEC_HI:$saddr, flat_offset:$offset, CPol_0:$cpol), !if(EnableVaddr, - (ins vdataClass:$vdata, VGPR_32:$vaddr, flat_offset:$offset, GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc), - (ins vdataClass:$vdata, flat_offset:$offset, GLC_0:$glc, SLC_0:$slc, DLC_0:$dlc))), - " "#!if(EnableVaddr, "$vaddr", "off")#", $vdata, "#!if(EnableSaddr, "$saddr", "off")#"$offset$glc$slc$dlc"> { + (ins vdataClass:$vdata, VGPR_32:$vaddr, flat_offset:$offset, CPol_0:$cpol), + (ins vdataClass:$vdata, flat_offset:$offset, CPol_0:$cpol))), + " "#!if(EnableVaddr, "$vaddr", "off")#", $vdata, "#!if(EnableSaddr, "$saddr", "off")#"$offset$cpol"> { let mayLoad = 0; let mayStore = 1; let has_vdst = 0; @@ -370,8 +368,8 @@ bit isFP = isFloatType.ret> { def "" : FLAT_AtomicNoRet_Pseudo , + (ins VReg_64:$vaddr, data_rc:$vdata, flat_offset:$offset, CPol_0:$cpol), + " $vaddr, $vdata$offset$cpol">, GlobalSaddrTable<0, opName>, AtomicNoRet { let PseudoInstr = NAME; @@ -381,8 +379,8 @@ def _RTN : FLAT_AtomicRet_Pseudo , GlobalSaddrTable<0, opName#"_rtn">, @@ -403,8 +401,8 @@ def "" : FLAT_AtomicNoRet_Pseudo , + (ins VReg_64:$vaddr, data_rc:$vdata, flat_offset:$offset, CPol_0:$cpol), + " $vaddr, $vdata, off$offset$cpol">, GlobalSaddrTable<0, opName>, AtomicNoRet { let has_saddr = 1; @@ -414,8 +412,8 @@ def _SADDR : FLAT_AtomicNoRet_Pseudo , + (ins VGPR_32:$vaddr, data_rc:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_0:$cpol), + " $vaddr, $vdata, $saddr$offset$cpol">, GlobalSaddrTable<1, opName>, AtomicNoRet { let has_saddr = 1; @@ -436,8 +434,8 @@ def _RTN : FLAT_AtomicRet_Pseudo , GlobalSaddrTable<0, opName#"_rtn">, @@ -448,8 +446,8 @@ def _SADDR_RTN : FLAT_AtomicRet_Pseudo , + (ins VGPR_32:$vaddr, data_rc:$vdata, SReg_64:$saddr, flat_offset:$offset, CPol_GLC1:$cpol), + " $vdst, $vaddr, $vdata, $saddr$offset$cpol">, GlobalSaddrTable<1, opName#"_rtn">, AtomicNoRet { let has_saddr = 1; @@ -794,17 +792,17 @@ class FlatLoadPat_D16 : GCNPat < (node (FLATOffset (i64 VReg_64:$vaddr), i16:$offset), vt:$in), - (inst $vaddr, $offset, 0, 0, 0, $in) + (inst $vaddr, $offset, 0, $in) >; class FlatSignedLoadPat_D16 : GCNPat < (node (FLATOffsetSigned (i64 VReg_64:$vaddr), i16:$offset), vt:$in), - (inst $vaddr, $offset, 0, 0, 0, $in) + (inst $vaddr, $offset, 0, $in) >; class GlobalLoadSaddrPat_D16 : GCNPat < (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset), vt:$in)), - (inst $saddr, $voffset, $offset, 0, 0, 0, $in) + (inst $saddr, $voffset, $offset, 0, $in) >; class FlatLoadSignedPat : GCNPat < @@ -814,7 +812,7 @@ class GlobalLoadSaddrPat : GCNPat < (vt (node (GlobalSAddr (i64 SReg_64:$saddr), (i32 VGPR_32:$voffset), i16:$offset))), - (inst $saddr, $voffset, $offset, 0, 0, 0) + (inst $saddr, $voffset, $offset, 0) >; class GlobalStoreSaddrPat : GCNPat < (node (ScratchOffset (i32 VGPR_32:$vaddr), i16:$offset), vt:$in), - (inst $vaddr, $offset, 0, 0, 0, $in) + (inst $vaddr, $offset, 0, $in) >; class ScratchStoreSignedPat : GCNPat < @@ -910,7 +908,7 @@ class ScratchLoadSaddrPat_D16 : GCNPat < (vt (node (ScratchSAddr (i32 SGPR_32:$saddr), i16:$offset), vt:$in)), - (inst $saddr, $offset, 0, 0, 0, $in) + (inst $saddr, $offset, 0, $in) >; class ScratchStoreSaddrPat getOperand(OpNo).getImm(); + if (Imm & CPol::GLC) + O << " glc"; + if (Imm & CPol::SLC) + O << " slc"; + if (Imm & CPol::DLC) + O << " dlc"; + if (Imm & ~CPol::ALL) + O << " /* unexpected cache policy bit */"; +} + void AMDGPUInstPrinter::printSWZ(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI, raw_ostream &O) { } Index: llvm/lib/Target/AMDGPU/MIMGInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/MIMGInstructions.td +++ llvm/lib/Target/AMDGPU/MIMGInstructions.td @@ -250,10 +250,10 @@ string dns=""> : MIMG_gfx6789 { let InOperandList = !con((ins addr_rc:$vaddr, SReg_256:$srsrc, - DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc, + DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da" + let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -262,10 +262,10 @@ string dns=""> : MIMG_gfx10 { let InOperandList = !con((ins AddrRC:$vaddr0, SReg_256:$srsrc, DMask:$dmask, - Dim:$dim, UNorm:$unorm, DLC:$dlc, GLC:$glc, - SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$dlc$glc$slc$r128$a16$tfe$lwe" + let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -275,10 +275,10 @@ : MIMG_nsa_gfx10 { let InOperandList = !con(AddrIns, (ins SReg_256:$srsrc, DMask:$dmask, - Dim:$dim, UNorm:$unorm, DLC:$dlc, GLC:$glc, - SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$dlc$glc$slc$r128$a16$tfe$lwe" + let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -351,10 +351,10 @@ string dns = ""> : MIMG_gfx6789 { let InOperandList = !con((ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc, - DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc, + DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da" + let AsmString = asm#" $vdata, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -363,10 +363,10 @@ string dns=""> : MIMG_gfx10 { let InOperandList = !con((ins DataRC:$vdata, AddrRC:$vaddr0, SReg_256:$srsrc, - DMask:$dmask, Dim:$dim, UNorm:$unorm, DLC:$dlc, - GLC:$glc, SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$dlc$glc$slc$r128$a16$tfe$lwe" + let AsmString = opcode#" $vdata, $vaddr0, $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -377,10 +377,10 @@ let InOperandList = !con((ins DataRC:$vdata), AddrIns, (ins SReg_256:$srsrc, DMask:$dmask, - Dim:$dim, UNorm:$unorm, DLC:$dlc, GLC:$glc, - SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$dlc$glc$slc$r128$a16$tfe$lwe" + let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc$dmask$dim$unorm$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -450,9 +450,9 @@ let AsmMatchConverter = "cvtMIMGAtomic"; let InOperandList = (ins data_rc:$vdata, addr_rc:$vaddr, SReg_256:$srsrc, - DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc, + DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da); - let AsmString = asm#" $vdst, $vaddr, $srsrc$dmask$unorm$glc$slc$r128$tfe$lwe$da"; + let AsmString = asm#" $vdst, $vaddr, $srsrc$dmask$unorm$cpol$r128$tfe$lwe$da"; } class MIMG_Atomic_si : MIMG_gfx6789 { let InOperandList = !con((ins src_rc:$vaddr, SReg_256:$srsrc, SReg_128:$ssamp, - DMask:$dmask, UNorm:$unorm, GLC:$glc, SLC:$slc, + DMask:$dmask, UNorm:$unorm, CPol:$cpol, R128A16:$r128, TFE:$tfe, LWE:$lwe, DA:$da), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); - let AsmString = asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$glc$slc$r128$tfe$lwe$da" + let AsmString = asm#" $vdata, $vaddr, $srsrc, $ssamp$dmask$unorm$cpol$r128$tfe$lwe$da" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -591,11 +591,11 @@ string dns=""> : MIMG_gfx10 { let InOperandList = !con((ins AddrRC:$vaddr0, SReg_256:$srsrc, SReg_128:$ssamp, - DMask:$dmask, Dim:$dim, UNorm:$unorm, DLC:$dlc, - GLC:$glc, SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + DMask:$dmask, Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); let AsmString = opcode#" $vdata, $vaddr0, $srsrc, $ssamp$dmask$dim$unorm" - #"$dlc$glc$slc$r128$a16$tfe$lwe" + #"$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -605,11 +605,11 @@ : MIMG_nsa_gfx10 { let InOperandList = !con(AddrIns, (ins SReg_256:$srsrc, SReg_128:$ssamp, DMask:$dmask, - Dim:$dim, UNorm:$unorm, DLC:$dlc, GLC:$glc, - SLC:$slc, R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), + Dim:$dim, UNorm:$unorm, CPol:$cpol, + R128A16:$r128, GFX10A16:$a16, TFE:$tfe, LWE:$lwe), !if(BaseOpcode.HasD16, (ins D16:$d16), (ins))); let AsmString = opcode#" $vdata, "#AddrAsm#", $srsrc, $ssamp$dmask$dim$unorm" - #"$dlc$glc$slc$r128$a16$tfe$lwe" + #"$cpol$r128$a16$tfe$lwe" #!if(BaseOpcode.HasD16, "$d16", ""); } @@ -792,9 +792,7 @@ dmask = 0xf, unorm = 1, d16 = 0, - glc = 0, - slc = 0, - dlc = 0, + cpol = 0, tfe = 0, lwe = 0, r128 = 1, Index: llvm/lib/Target/AMDGPU/SIDefines.h =================================================================== --- llvm/lib/Target/AMDGPU/SIDefines.h +++ llvm/lib/Target/AMDGPU/SIDefines.h @@ -269,6 +269,17 @@ } // namespace AMDGPU namespace AMDGPU { +namespace CPol { + +enum CPol { + GLC = 1, + SLC = 2, + DLC = 4, + ALL = GLC | SLC | DLC +}; + +} // namespace CPol + namespace SendMsg { // Encoding of SIMM16 used in s_sendmsg* insns. enum Id { // Message ID, width(4) [3:0]. Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -11134,10 +11134,12 @@ int NoRetAtomicOp = AMDGPU::getAtomicNoRetOp(MI.getOpcode()); if (NoRetAtomicOp != -1) { if (!Node->hasAnyUseOfValue(0)) { - int Glc1Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), - AMDGPU::OpName::glc1); - if (Glc1Idx != -1) - MI.RemoveOperand(Glc1Idx); + int CPolIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), + AMDGPU::OpName::cpol); + if (CPolIdx != -1) { + MachineOperand &CPol = MI.getOperand(CPolIdx); + CPol.setImm(CPol.getImm() | AMDGPU::CPol::GLC); + } MI.RemoveOperand(0); MI.setDesc(TII->get(NoRetAtomicOp)); return; Index: llvm/lib/Target/AMDGPU/SIInstrFormats.td =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrFormats.td +++ llvm/lib/Target/AMDGPU/SIInstrFormats.td @@ -261,6 +261,12 @@ int Size = 8; } +def CPolBit { + int GLC = 0; + int SLC = 1; + int DLC = 2; +} + class VOPDstOperand : RegisterOperand ; class VINTRPe op> : Enc32 { @@ -281,22 +287,21 @@ bits<8> vdata; bits<4> dmask; bits<1> unorm; - bits<1> glc; + bits<3> cpol; bits<1> r128; bits<1> tfe; bits<1> lwe; - bits<1> slc; bit d16; bits<7> srsrc; bits<7> ssamp; let Inst{11-8} = dmask; let Inst{12} = unorm; - let Inst{13} = glc; + let Inst{13} = cpol{CPolBit.GLC}; let Inst{15} = r128; let Inst{16} = tfe; let Inst{17} = lwe; - let Inst{25} = slc; + let Inst{25} = cpol{CPolBit.SLC}; let Inst{31-26} = 0x3c; let Inst{47-40} = vdata; let Inst{52-48} = srsrc{6-2}; @@ -318,13 +323,12 @@ bits<8> vaddr0; bits<3> dim; bits<2> nsa; - bits<1> dlc; bits<1> a16; let Inst{0} = op{7}; let Inst{2-1} = nsa; let Inst{5-3} = dim; - let Inst{7} = dlc; + let Inst{7} = cpol{CPolBit.DLC}; let Inst{24-18} = op{6-0}; let Inst{39-32} = vaddr0; let Inst{62} = a16; Index: llvm/lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -5331,17 +5331,10 @@ .add(*SOffset) .add(*Offset); - // Atomics do not have this operand. - if (const MachineOperand *GLC = - getNamedOperand(MI, AMDGPU::OpName::glc)) { - MIB.addImm(GLC->getImm()); + if (const MachineOperand *CPol = + getNamedOperand(MI, AMDGPU::OpName::cpol)) { + MIB.addImm(CPol->getImm()); } - if (const MachineOperand *DLC = - getNamedOperand(MI, AMDGPU::OpName::dlc)) { - MIB.addImm(DLC->getImm()); - } - - MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); if (const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe)) { @@ -5361,7 +5354,7 @@ .addReg(NewSRsrc) .add(*SOffset) .add(*Offset) - .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) + .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol)) .cloneMemRefs(MI); } Index: llvm/lib/Target/AMDGPU/SIInstrInfo.td =================================================================== --- llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -818,6 +818,10 @@ return CurDAG->getTargetConstant((N->getZExtValue() >> 2) & 1, SDLoc(N), MVT::i8); }]>; +def extract_cpol : SDNodeXFormgetTargetConstant(N->getZExtValue() & AMDGPU::CPol::ALL, SDLoc(N), MVT::i8); +}]>; + def extract_swz : SDNodeXFormgetTargetConstant((N->getZExtValue() >> 3) & 1, SDLoc(N), MVT::i8); }]>; @@ -1074,6 +1078,12 @@ let ParserMatchClass = MatchClass; } +class NamedOperandU32Default1 : + OperandWithDefaultOps { + let PrintMethod = "print"#Name; + let ParserMatchClass = MatchClass; +} + let OperandType = "OPERAND_IMMEDIATE" in { def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>; @@ -1097,15 +1107,9 @@ def clampmod0 : NamedOperandBit_0<"ClampSI", NamedMatchClass<"ClampSI">>; def highmod : NamedOperandBit<"High", NamedMatchClass<"High">>; -def DLC : NamedOperandBit<"DLC", NamedMatchClass<"DLC">>; -def DLC_0 : NamedOperandBit_0<"DLC", NamedMatchClass<"DLC">>; - -def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>; -def GLC_0 : NamedOperandBit_0<"GLC", NamedMatchClass<"GLC">>; -def GLC_1 : NamedOperandBit_1<"GLC", NamedMatchClass<"GLC_1">>; - -def SLC : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>; -def SLC_0 : NamedOperandBit_0<"SLC", NamedMatchClass<"SLC">>; +def CPol : NamedOperandU32<"CPol", NamedMatchClass<"CPol">>; +def CPol_0 : NamedOperandU32Default0<"CPol", NamedMatchClass<"CPol">>; +def CPol_GLC1 : NamedOperandU32Default1<"CPol", NamedMatchClass<"CPol">>; def TFE : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>; def SWZ : NamedOperandBit<"SWZ", NamedMatchClass<"SWZ">>; Index: llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -104,9 +104,7 @@ unsigned BaseOff; unsigned DMask; InstClassEnum InstClass; - bool GLC; - bool SLC; - bool DLC; + unsigned CPol; bool UseST64; int AddrIdx[MaxAddressRegs]; const MachineOperand *AddrReg[MaxAddressRegs]; @@ -521,11 +519,7 @@ if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) { Offset &= 0xffff; } else if (InstClass != MIMG) { - GLC = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm(); - if (InstClass != S_BUFFER_LOAD_IMM) { - SLC = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm(); - } - DLC = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm(); + CPol = TII.getNamedOperand(*I, AMDGPU::OpName::cpol)->getImm(); } AddressRegs Regs = getRegs(Opc, TII); @@ -675,10 +669,9 @@ return false; // Check other optional immediate operands for equality. - unsigned OperandsToMatch[] = {AMDGPU::OpName::glc, AMDGPU::OpName::slc, - AMDGPU::OpName::d16, AMDGPU::OpName::unorm, - AMDGPU::OpName::da, AMDGPU::OpName::r128, - AMDGPU::OpName::a16, AMDGPU::OpName::dlc}; + unsigned OperandsToMatch[] = {AMDGPU::OpName::cpol, AMDGPU::OpName::d16, + AMDGPU::OpName::unorm, AMDGPU::OpName::da, + AMDGPU::OpName::r128, AMDGPU::OpName::a16}; for (auto op : OperandsToMatch) { int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op); @@ -783,8 +776,8 @@ if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) { return (EltOffset0 + CI.Width == EltOffset1 || EltOffset1 + Paired.Width == EltOffset0) && - CI.GLC == Paired.GLC && CI.DLC == Paired.DLC && - (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC == Paired.SLC); + CI.CPol == Paired.CPol && + (CI.InstClass == S_BUFFER_LOAD_IMM || CI.CPol == Paired.CPol); } // If the offset in elements doesn't fit in 8-bits, we might be able to use @@ -1252,8 +1245,7 @@ BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg) .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase)) .addImm(MergedOffset) // offset - .addImm(CI.GLC) // glc - .addImm(CI.DLC) // dlc + .addImm(CI.CPol) // cpol .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); std::pair SubRegIdx = getSubRegIdxs(CI, Paired); @@ -1312,10 +1304,8 @@ MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) .addImm(MergedOffset) // offset - .addImm(CI.GLC) // glc - .addImm(CI.SLC) // slc + .addImm(CI.CPol) // cpol .addImm(0) // tfe - .addImm(CI.DLC) // dlc .addImm(0) // swz .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); @@ -1379,10 +1369,8 @@ .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) .addImm(MergedOffset) // offset .addImm(JoinedFormat) // format - .addImm(CI.GLC) // glc - .addImm(CI.SLC) // slc + .addImm(CI.CPol) // cpol .addImm(0) // tfe - .addImm(CI.DLC) // dlc .addImm(0) // swz .addMemOperand( combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); @@ -1459,10 +1447,8 @@ .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) .addImm(std::min(CI.Offset, Paired.Offset)) // offset .addImm(JoinedFormat) // format - .addImm(CI.GLC) // glc - .addImm(CI.SLC) // slc + .addImm(CI.CPol) // cpol .addImm(0) // tfe - .addImm(CI.DLC) // dlc .addImm(0) // swz .addMemOperand( combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); @@ -1619,10 +1605,8 @@ MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc)) .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset)) .addImm(std::min(CI.Offset, Paired.Offset)) // offset - .addImm(CI.GLC) // glc - .addImm(CI.SLC) // slc + .addImm(CI.CPol) // cpol .addImm(0) // tfe - .addImm(CI.DLC) // dlc .addImm(0) // swz .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb)); Index: llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp +++ llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp @@ -84,22 +84,6 @@ LLVM_MARK_AS_BITMASK_ENUM(/* LargestFlag = */ ALL) }; -/// Sets named bit \p BitName to "true" if present in instruction \p MI. -/// \returns Returns true if \p MI is modified, false otherwise. -template -bool enableNamedBit(const MachineBasicBlock::iterator &MI) { - int BitIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), BitName); - if (BitIdx == -1) - return false; - - MachineOperand &Bit = MI->getOperand(BitIdx); - if (Bit.getImm() != 0) - return false; - - Bit.setImm(1); - return true; -} - class SIMemOpInfo final { private: @@ -288,6 +272,11 @@ SICacheControl(const GCNSubtarget &ST); + /// Sets named bit \p BitName to "true" if present in instruction \p MI. + /// \returns Returns true if \p MI is modified, false otherwise. + bool enableNamedBit(const MachineBasicBlock::iterator MI, + AMDGPU::CPol::CPol Bit) const; + public: /// Create a cache control for the subtarget \p ST. @@ -355,13 +344,13 @@ /// Sets GLC bit to "true" if present in \p MI. Returns true if \p MI /// is modified, false otherwise. bool enableGLCBit(const MachineBasicBlock::iterator &MI) const { - return enableNamedBit(MI); + return enableNamedBit(MI, AMDGPU::CPol::GLC); } /// Sets SLC bit to "true" if present in \p MI. Returns true if \p MI /// is modified, false otherwise. bool enableSLCBit(const MachineBasicBlock::iterator &MI) const { - return enableNamedBit(MI); + return enableNamedBit(MI, AMDGPU::CPol::SLC); } public: @@ -414,7 +403,7 @@ /// Sets DLC bit to "true" if present in \p MI. Returns true if \p MI /// is modified, false otherwise. bool enableDLCBit(const MachineBasicBlock::iterator &MI) const { - return enableNamedBit(MI); + return enableNamedBit(MI, AMDGPU::CPol::DLC); } public: @@ -714,6 +703,16 @@ InsertCacheInv = !AmdgcnSkipCacheInvalidations; } +bool SICacheControl::enableNamedBit(const MachineBasicBlock::iterator MI, + AMDGPU::CPol::CPol Bit) const { + MachineOperand *CPol = TII->getNamedOperand(*MI, AMDGPU::OpName::cpol); + if (!CPol) + return false; + + CPol->setImm(CPol->getImm() | Bit); + return true; +} + /* static */ std::unique_ptr SICacheControl::create(const GCNSubtarget &ST) { GCNSubtarget::Generation Generation = ST.getGeneration(); Index: llvm/lib/Target/AMDGPU/SMInstructions.td =================================================================== --- llvm/lib/Target/AMDGPU/SMInstructions.td +++ llvm/lib/Target/AMDGPU/SMInstructions.td @@ -62,6 +62,8 @@ let UseNamedOperandTable = ps.UseNamedOperandTable; let SMRD = ps.SMRD; + let TSFlags = ps.TSFlags; + bit is_buffer = ps.is_buffer; // encoding @@ -69,6 +71,7 @@ bits<7> sdst; bits<32> offset; bits<1> imm = !if(ps.has_offset, ps.offset_is_imm, 0); + bits<3> cpol; } class SM_Probe_Pseudo @@ -120,8 +123,8 @@ RegisterClass dstClass> { def _IMM : SM_Load_Pseudo { + (ins baseClass:$sbase, i32imm:$offset, CPol:$cpol), + " $sdst, $sbase, $offset$cpol", []> { let offset_is_imm = 1; let BaseClass = baseClass; let PseudoInstr = opName # "_IMM"; @@ -131,8 +134,8 @@ def _SGPR : SM_Load_Pseudo { + (ins baseClass:$sbase, SReg_32:$soff, CPol:$cpol), + " $sdst, $sbase, $offset$cpol", []> { let BaseClass = baseClass; let PseudoInstr = opName # "_SGPR"; let has_glc = 1; @@ -144,8 +147,8 @@ RegisterClass baseClass, RegisterClass srcClass> { def _IMM : SM_Store_Pseudo { + (ins srcClass:$sdata, baseClass:$sbase, i32imm:$offset, CPol:$cpol), + " $sdata, $sbase, $offset$cpol", []> { let offset_is_imm = 1; let BaseClass = baseClass; let SrcClass = srcClass; @@ -153,8 +156,8 @@ } def _SGPR : SM_Store_Pseudo { + (ins srcClass:$sdata, baseClass:$sbase, SReg_32:$soff, CPol:$cpol), + " $sdata, $sbase, $offset$cpol", []> { let BaseClass = baseClass; let SrcClass = srcClass; let PseudoInstr = opName # "_SGPR"; @@ -227,24 +230,32 @@ let ScalarStore = 1; let hasSideEffects = 1; let maybeAtomic = 1; + + let IsAtomicNoRet = !not(isRet); + let IsAtomicRet = isRet; + + let AsmMatchConverter = "cvtSMEMAtomic"; } class SM_Pseudo_Atomic : + bit isRet, + string opNameWithSuffix = opName # !if(isImm, + !if(isRet, "_IMM_RTN", "_IMM"), + !if(isRet, "_SGPR_RTN", "_SGPR")), + Operand CPolTy = !if(isRet, CPol_GLC1, CPol)> : SM_Atomic_Pseudo { + (ins dataClass:$sdata, baseClass:$sbase, smem_offset:$offset, CPolTy:$cpol), + (ins dataClass:$sdata, baseClass:$sbase, SReg_32:$offset, CPolTy:$cpol)), + !if(isRet, " $sdst", " $sdata") # ", $sbase, $offset$cpol", + isRet>, + AtomicNoRet { let offset_is_imm = isImm; - let PseudoInstr = opName # !if(isImm, - !if(isRet, "_IMM_RTN", "_IMM"), - !if(isRet, "_SGPR_RTN", "_SGPR")); + let PseudoInstr = opNameWithSuffix; let Constraints = !if(isRet, "$sdst = $sdata", ""); let DisableEncoding = !if(isRet, "$sdata", ""); @@ -456,13 +467,13 @@ SM_Load_Pseudo sgprPs = !cast(ps#_SGPR)> { def _IMM_si : SMRD_Real_si { - let InOperandList = (ins immPs.BaseClass:$sbase, smrd_offset_8:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins immPs.BaseClass:$sbase, smrd_offset_8:$offset, CPol:$cpol); } // FIXME: The operand name $offset is inconsistent with $soff used // in the pseudo def _SGPR_si : SMRD_Real_si { - let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, CPol:$cpol); } } @@ -490,15 +501,13 @@ : SM_Real , SIMCInstr , Enc64 { - bit glc; - let AssemblerPredicate = isGFX8GFX9; let DecoderNamespace = "GFX8"; let Inst{5-0} = !if(ps.has_sbase, sbase{6-1}, ?); let Inst{12-6} = !if(ps.has_sdst, sdst{6-0}, ?); - let Inst{16} = !if(ps.has_glc, glc, ?); + let Inst{16} = !if(ps.has_glc, cpol{CPolBit.GLC}, ?); let Inst{17} = imm; let Inst{25-18} = op; let Inst{31-26} = 0x30; //encoding @@ -512,10 +521,10 @@ SM_Load_Pseudo immPs = !cast(ps#_IMM), SM_Load_Pseudo sgprPs = !cast(ps#_SGPR)> { def _IMM_vi : SMEM_Real_vi { - let InOperandList = (ins immPs.BaseClass:$sbase, smem_offset:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins immPs.BaseClass:$sbase, smem_offset:$offset, CPol:$cpol); } def _SGPR_vi : SMEM_Real_vi { - let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, CPol:$cpol); } } @@ -533,11 +542,11 @@ // FIXME: The operand name $offset is inconsistent with $soff used // in the pseudo def _IMM_vi : SMEM_Real_Store_vi { - let InOperandList = (ins immPs.SrcClass:$sdata, immPs.BaseClass:$sbase, smem_offset:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins immPs.SrcClass:$sdata, immPs.BaseClass:$sbase, smem_offset:$offset, CPol:$cpol); } def _SGPR_vi : SMEM_Real_Store_vi { - let InOperandList = (ins sgprPs.SrcClass:$sdata, sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins sgprPs.SrcClass:$sdata, sgprPs.BaseClass:$sbase, SReg_32:$offset, CPol:$cpol); } } @@ -589,15 +598,16 @@ //===----------------------------------------------------------------------===// class SMEM_Atomic_Real_vi op, SM_Atomic_Pseudo ps> - : SMEM_Real_vi { + : SMEM_Real_vi , + AtomicNoRet { bits<7> sdata; let Constraints = ps.Constraints; let DisableEncoding = ps.DisableEncoding; - let glc = ps.glc; - let Inst{12-6} = !if(glc, sdst{6-0}, sdata{6-0}); + let cpol{CPolBit.GLC} = ps.glc; + let Inst{12-6} = !if(ps.glc, sdst{6-0}, sdata{6-0}); } multiclass SM_Real_Atomics_vi op, string ps> { @@ -686,7 +696,7 @@ let AssemblerPredicate = isGFX7Only; let DecoderNamespace = "GFX7"; - let InOperandList = (ins ps.BaseClass:$sbase, smrd_literal_offset:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins ps.BaseClass:$sbase, smrd_literal_offset:$offset, CPol:$cpol); let LGKM_CNT = ps.LGKM_CNT; let mayLoad = ps.mayLoad; @@ -764,26 +774,26 @@ // 1. IMM offset def : GCNPat < (smrd_load (SMRDImm i64:$sbase, i32:$offset)), - (vt (!cast(Instr#"_IMM") $sbase, $offset, 0, 0)) + (vt (!cast(Instr#"_IMM") $sbase, $offset, 0)) >; // 2. 32-bit IMM offset on CI def : GCNPat < (smrd_load (SMRDImm32 i64:$sbase, i32:$offset)), - (vt (!cast(Instr#"_IMM_ci") $sbase, $offset, 0, 0))> { + (vt (!cast(Instr#"_IMM_ci") $sbase, $offset, 0))> { let OtherPredicates = [isGFX7Only]; } // 3. SGPR offset def : GCNPat < (smrd_load (SMRDSgpr i64:$sbase, i32:$offset)), - (vt (!cast(Instr#"_SGPR") $sbase, $offset, 0, 0)) + (vt (!cast(Instr#"_SGPR") $sbase, $offset, 0)) >; // 4. No offset def : GCNPat < (vt (smrd_load (i64 SReg_64:$sbase))), - (vt (!cast(Instr#"_IMM") i64:$sbase, 0, 0, 0)) + (vt (!cast(Instr#"_IMM") i64:$sbase, 0, 0)) >; } @@ -791,8 +801,7 @@ // 1. Offset as an immediate def : GCNPat < (SIsbuffer_load v4i32:$sbase, (SMRDBufferImm i32:$offset), timm:$cachepolicy), - (vt (!cast(Instr#"_IMM") SReg_128:$sbase, i32imm:$offset, (extract_glc $cachepolicy), - (extract_dlc $cachepolicy)))> { + (vt (!cast(Instr#"_IMM") SReg_128:$sbase, i32imm:$offset, (extract_cpol $cachepolicy)))> { let AddedComplexity = 2; } @@ -800,7 +809,7 @@ def : GCNPat < (vt (SIsbuffer_load v4i32:$sbase, (SMRDBufferImm32 i32:$offset), timm:$cachepolicy)), (!cast(Instr#"_IMM_ci") SReg_128:$sbase, smrd_literal_offset:$offset, - (extract_glc $cachepolicy), (extract_dlc $cachepolicy))> { + (extract_cpol $cachepolicy))> { let OtherPredicates = [isGFX7Only]; let AddedComplexity = 1; } @@ -808,8 +817,7 @@ // 3. Offset loaded in an 32bit SGPR def : GCNPat < (SIsbuffer_load v4i32:$sbase, i32:$offset, timm:$cachepolicy), - (vt (!cast(Instr#"_SGPR") SReg_128:$sbase, SReg_32:$offset, (extract_glc $cachepolicy), - (extract_dlc $cachepolicy))) + (vt (!cast(Instr#"_SGPR") SReg_128:$sbase, SReg_32:$offset, (extract_cpol $cachepolicy))) >; } @@ -873,16 +881,13 @@ class SMEM_Real_gfx10 op, SM_Pseudo ps> : SM_Real, SIMCInstr, Enc64 { - bit glc; - bit dlc; - let AssemblerPredicate = isGFX10Plus; let DecoderNamespace = "GFX10"; let Inst{5-0} = !if(ps.has_sbase, sbase{6-1}, ?); let Inst{12-6} = !if(ps.has_sdst, sdst{6-0}, ?); - let Inst{14} = !if(ps.has_dlc, dlc, ?); - let Inst{16} = !if(ps.has_glc, glc, ?); + let Inst{14} = !if(ps.has_dlc, cpol{CPolBit.DLC}, ?); + let Inst{16} = !if(ps.has_glc, cpol{CPolBit.GLC}, ?); let Inst{25-18} = op; let Inst{31-26} = 0x3d; let Inst{52-32} = !if(ps.offset_is_imm, !if(ps.has_offset, offset{20-0}, ?), ?); @@ -894,10 +899,10 @@ SM_Load_Pseudo immPs = !cast(ps#_IMM), SM_Load_Pseudo sgprPs = !cast(ps#_SGPR)> { def _IMM_gfx10 : SMEM_Real_gfx10 { - let InOperandList = (ins immPs.BaseClass:$sbase, smem_offset:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins immPs.BaseClass:$sbase, smem_offset:$offset, CPol:$cpol); } def _SGPR_gfx10 : SMEM_Real_gfx10 { - let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, CPol:$cpol); } } @@ -914,11 +919,11 @@ // FIXME: The operand name $offset is inconsistent with $soff used // in the pseudo def _IMM_gfx10 : SMEM_Real_Store_gfx10 { - let InOperandList = (ins immPs.SrcClass:$sdata, immPs.BaseClass:$sbase, smem_offset:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins immPs.SrcClass:$sdata, immPs.BaseClass:$sbase, smem_offset:$offset, CPol:$cpol); } def _SGPR_gfx10 : SMEM_Real_Store_gfx10 { - let InOperandList = (ins sgprPs.SrcClass:$sdata, sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc, DLC:$dlc); + let InOperandList = (ins sgprPs.SrcClass:$sdata, sgprPs.BaseClass:$sbase, SReg_32:$offset, CPol:$cpol); } } @@ -973,18 +978,18 @@ defm S_ATC_PROBE_BUFFER : SM_Real_Probe_gfx10 <0x27, "S_ATC_PROBE_BUFFER">; class SMEM_Atomic_Real_gfx10 op, SM_Atomic_Pseudo ps> - : SMEM_Real_gfx10 { + : SMEM_Real_gfx10 , + AtomicNoRet { bits<7> sdata; - bit dlc; let Constraints = ps.Constraints; let DisableEncoding = ps.DisableEncoding; - let glc = ps.glc; + let cpol{CPolBit.GLC} = ps.glc; - let Inst{14} = !if(ps.has_dlc, dlc, 0); - let Inst{12-6} = !if(glc, sdst{6-0}, sdata{6-0}); + let Inst{14} = !if(ps.has_dlc, cpol{CPolBit.DLC}, 0); + let Inst{12-6} = !if(ps.glc, sdst{6-0}, sdata{6-0}); } multiclass SM_Real_Atomics_gfx10 op, string ps> { Index: llvm/test/MC/AMDGPU/atomic-fadd-insts.s =================================================================== --- llvm/test/MC/AMDGPU/atomic-fadd-insts.s +++ llvm/test/MC/AMDGPU/atomic-fadd-insts.s @@ -41,7 +41,7 @@ // GFX908: encoding: [0x07,0x00,0x34,0xe1,0x00,0x05,0x02,0x03] buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:4095 glc -// GFX908-ERR: error: invalid operand for instruction +// GFX908-ERR: error: instruction must not use glc buffer_atomic_add_f32 v5, off, s[8:11], s3 offset:4095 slc // GFX908: encoding: [0xff,0x0f,0x36,0xe1,0x00,0x05,0x02,0x03] @@ -86,7 +86,7 @@ // GFX908: encoding: [0x07,0x00,0x38,0xe1,0x00,0x05,0x02,0x03] buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:4095 glc -// GFX908-ERR: error: invalid operand for instruction +// GFX908-ERR: error: instruction must not use glc buffer_atomic_pk_add_f16 v5, off, s[8:11], s3 offset:4095 slc // GFX908: encoding: [0xff,0x0f,0x3a,0xe1,0x00,0x05,0x02,0x03] Index: llvm/test/MC/AMDGPU/cpol-err.s =================================================================== --- /dev/null +++ llvm/test/MC/AMDGPU/cpol-err.s @@ -0,0 +1,21 @@ +// RUN: not llvm-mc -arch=amdgcn -mcpu=gfx900 %s 2>&1 | FileCheck %s --implicit-check-not=error: --strict-whitespace + +scratch_load_ubyte v1, v2, off cpol:2 +// CHECK: error: not a valid operand. +// CHECK-NEXT:{{^}}scratch_load_ubyte v1, v2, off cpol:2 +// CHECK-NEXT:{{^}} ^ + +scratch_load_ubyte v1, v2, off glc slc dlc +// CHECK: error: dlc modifier is not supported on this GPU +// CHECK-NEXT:{{^}}scratch_load_ubyte v1, v2, off glc slc dlc +// CHECK-NEXT:{{^}} ^ + +global_atomic_add v[3:4], v5, off slc glc +// CHECK: error: instruction must not use glc +// CHECK-NEXT:{{^}}global_atomic_add v[3:4], v5, off slc glc +// CHECK-NEXT:{{^}} ^ + +global_atomic_add v0, v[1:2], v2, off glc 1 +// CHECK: error: invalid operand for instruction +// CHECK-NEXT:{{^}}global_atomic_add v0, v[1:2], v2, off glc 1 +// CHECK-NEXT:{{^}} ^ Index: llvm/test/MC/AMDGPU/flat-gfx10.s =================================================================== --- llvm/test/MC/AMDGPU/flat-gfx10.s +++ llvm/test/MC/AMDGPU/flat-gfx10.s @@ -38,10 +38,10 @@ // GFX10: encoding: [0x00,0x00,0xc6,0xdc,0x01,0x03,0x7d,0x00] flat_atomic_cmpswap v[1:2], v[3:4] offset:2047 glc -// GFX10-ERR: error: invalid operand for instruction +// GFX10-ERR: error: instruction must not use glc flat_atomic_cmpswap v[1:2], v[3:4] glc -// GFX10-ERR: error: invalid operand for instruction +// GFX10-ERR: error: instruction must not use glc flat_atomic_cmpswap v0, v[1:2], v[3:4] offset:2047 glc // GFX10: encoding: [0xff,0x07,0xc5,0xdc,0x01,0x03,0x7d,0x00] Index: llvm/test/MC/AMDGPU/flat-gfx9.s =================================================================== --- llvm/test/MC/AMDGPU/flat-gfx9.s +++ llvm/test/MC/AMDGPU/flat-gfx9.s @@ -53,10 +53,11 @@ // VI: flat_atomic_cmpswap v[1:2], v[3:4] slc ; encoding: [0x00,0x00,0x06,0xdd,0x01,0x03,0x00,0x00] flat_atomic_cmpswap v[1:2], v[3:4] offset:4095 glc -// GCNERR: error: invalid operand for instruction +// GFX9-ERR: error: instruction must not use glc +// VI-ERR: error: flat offset modifier is not supported on this GPU flat_atomic_cmpswap v[1:2], v[3:4] glc -// GCNERR: error: invalid operand for instruction +// GCNERR: error: instruction must not use glc flat_atomic_cmpswap v0, v[1:2], v[3:4] offset:4095 glc // GFX9: flat_atomic_cmpswap v0, v[1:2], v[3:4] offset:4095 glc ; encoding: [0xff,0x0f,0x05,0xdd,0x01,0x03,0x00,0x00] Index: llvm/test/MC/AMDGPU/mubuf-gfx10.s =================================================================== --- llvm/test/MC/AMDGPU/mubuf-gfx10.s +++ llvm/test/MC/AMDGPU/mubuf-gfx10.s @@ -4,7 +4,7 @@ // GFX10: buffer_load_sbyte v5, off, s[8:11], s3 glc slc lds ; encoding: [0x00,0x40,0x25,0xe0,0x00,0x05,0x42,0x03] buffer_load_sbyte v5, off, s[8:11], s3 glc slc lds dlc -// GFX10: buffer_load_sbyte v5, off, s[8:11], s3 glc slc lds dlc ; encoding: [0x00,0xc0,0x25,0xe0,0x00,0x05,0x42,0x03] +// GFX10: buffer_load_sbyte v5, off, s[8:11], s3 glc slc dlc lds ; encoding: [0x00,0xc0,0x25,0xe0,0x00,0x05,0x42,0x03] buffer_load_sbyte v5, off, s[8:11], s3 glc slc dlc // GFX10: buffer_load_sbyte v5, off, s[8:11], s3 glc slc dlc ; encoding: [0x00,0xc0,0x24,0xe0,0x00,0x05,0x42,0x03]