Index: lib/Target/AMDGPU/FLATInstructions.td =================================================================== --- lib/Target/AMDGPU/FLATInstructions.td +++ lib/Target/AMDGPU/FLATInstructions.td @@ -57,8 +57,8 @@ let AsmMatchConverter = ps.AsmMatchConverter; // encoding fields - bits<8> addr; - bits<8> data; + bits<8> vaddr; + bits<8> vdata; bits<8> vdst; bits<1> slc; bits<1> glc; @@ -69,8 +69,8 @@ let Inst{17} = slc; let Inst{24-18} = op; let Inst{31-26} = 0x37; // Encoding. - let Inst{39-32} = addr; - let Inst{47-40} = !if(ps.has_data, data, ?); + let Inst{39-32} = vaddr; + let Inst{47-40} = !if(ps.has_data, vdata, ?); // 54-48 is reserved. let Inst{55} = tfe; let Inst{63-56} = !if(ps.has_vdst, vdst, ?); @@ -79,8 +79,8 @@ class FLAT_Load_Pseudo : FLAT_Pseudo< opName, (outs regClass:$vdst), - (ins VReg_64:$addr, GLC:$glc, slc:$slc, tfe:$tfe), - " $vdst, $addr$glc$slc$tfe"> { + (ins VReg_64:$vaddr, GLC:$glc, slc:$slc, tfe:$tfe), + " $vdst, $vaddr$glc$slc$tfe"> { let has_data = 0; let mayLoad = 1; } @@ -88,8 +88,8 @@ class FLAT_Store_Pseudo : FLAT_Pseudo< opName, (outs), - (ins VReg_64:$addr, vdataClass:$data, GLC:$glc, slc:$slc, tfe:$tfe), - " $addr, $data$glc$slc$tfe"> { + (ins VReg_64:$vaddr, vdataClass:$vdata, GLC:$glc, slc:$slc, tfe:$tfe), + " $vaddr, $vdata$glc$slc$tfe"> { let mayLoad = 0; let mayStore = 1; let has_vdst = 0; @@ -105,8 +105,8 @@ def "" : FLAT_Pseudo , AtomicNoRet { let mayLoad = 1; @@ -119,10 +119,10 @@ def _RTN : FLAT_Pseudo , + (atomic (FLATAtomic i64:$vaddr, i1:$slc, i1:$tfe), data_vt:$vdata))]>, AtomicNoRet { let mayLoad = 1; let mayStore = 1; Index: lib/Target/AMDGPU/GCNHazardRecognizer.cpp =================================================================== --- lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -423,7 +423,7 @@ } if (TII->isFLAT(MI)) { - int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::data); + int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); if (AMDGPU::getRegBitWidth(Desc.OpInfo[DataIdx].RegClass) > 64) return DataIdx; } Index: lib/Target/AMDGPU/SIInsertWaits.cpp =================================================================== --- lib/Target/AMDGPU/SIInsertWaits.cpp +++ lib/Target/AMDGPU/SIInsertWaits.cpp @@ -250,12 +250,6 @@ // operand comes before the value operand and it may have // multiple data operands. - if (TII->isDS(MI) || TII->isFLAT(MI)) { - MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data); - if (Data && Op.isIdenticalTo(*Data)) - return true; - } - if (TII->isDS(MI)) { MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0); if (Data0 && Op.isIdenticalTo(*Data0)) @@ -265,6 +259,12 @@ return Data1 && Op.isIdenticalTo(*Data1); } + if (TII->isFLAT(MI)) { + MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::vdata); + if (Data && Op.isIdenticalTo(*Data)) + return true; + } + // NOTE: This assumes that the value operand is before the // address operand, and that there is only one value operand. for (MachineInstr::mop_iterator I = MI.operands_begin(), Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -294,7 +294,7 @@ } if (isFLAT(LdSt)) { - const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr); + const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); BaseReg = AddrReg->getReg(); Offset = 0; return true;