Index: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h +++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.h @@ -50,6 +50,7 @@ int checkSetRegHazards(MachineInstr *SetRegInstr); int createsVALUHazard(const MachineInstr &MI); int checkVALUHazards(MachineInstr *VALU); + int checkRWLaneHazards(MachineInstr *RWLane); public: GCNHazardRecognizer(const MachineFunction &MF); // We can only issue one instruction per cycle. Index: llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp +++ llvm/trunk/lib/Target/AMDGPU/GCNHazardRecognizer.cpp @@ -50,7 +50,11 @@ return Opcode == AMDGPU::S_SETREG_B32 || Opcode == AMDGPU::S_SETREG_IMM32_B32; } -static unsigned getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) { +static bool isRWLane(unsigned Opcode) { + return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; +} + +static bool getHWReg(const SIInstrInfo *TII, const MachineInstr &RegInstr) { const MachineOperand *RegOp = TII->getNamedOperand(RegInstr, AMDGPU::OpName::simm16); @@ -76,6 +80,9 @@ if (isDivFMas(MI->getOpcode()) && checkDivFMasHazards(MI) > 0) return NoopHazard; + if (isRWLane(MI->getOpcode()) && checkRWLaneHazards(MI) > 0) + return NoopHazard; + if (isSGetReg(MI->getOpcode()) && checkGetRegHazards(MI) > 0) return NoopHazard; @@ -105,6 +112,9 @@ if (isDivFMas(MI->getOpcode())) WaitStates = std::max(WaitStates, checkDivFMasHazards(MI)); + if (isRWLane(MI->getOpcode())) + WaitStates = std::max(WaitStates, checkRWLaneHazards(MI)); + return WaitStates; } @@ -438,3 +448,25 @@ } return WaitStatesNeeded; } + +int GCNHazardRecognizer::checkRWLaneHazards(MachineInstr *RWLane) { + const SIInstrInfo *TII = ST.getInstrInfo(); + const SIRegisterInfo *TRI = ST.getRegisterInfo(); + const MachineRegisterInfo &MRI = + RWLane->getParent()->getParent()->getRegInfo(); + + const MachineOperand *LaneSelectOp = + TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1); + + if (!LaneSelectOp->isReg() || !TRI->isSGPRReg(MRI, LaneSelectOp->getReg())) + return 0; + + unsigned LaneSelectReg = LaneSelectOp->getReg(); + auto IsHazardFn = [TII] (MachineInstr *MI) { + return TII->isVALU(*MI); + }; + + const int RWLaneWaitStates = 4; + int WaitStatesSince = getWaitStatesSinceDef(LaneSelectReg, IsHazardFn); + return RWLaneWaitStates - WaitStatesSince; +} Index: llvm/trunk/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir =================================================================== --- llvm/trunk/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir +++ llvm/trunk/test/CodeGen/MIR/AMDGPU/inserted-wait-states.mir @@ -7,6 +7,7 @@ define void @s_getreg() { ret void } define void @s_setreg() { ret void } define void @vmem_gt_8dw_store() { ret void } + define void @readwrite_lane() { ret void } ... --- # GCN-LABEL: name: div_fmas @@ -234,3 +235,68 @@ S_ENDPGM ... + +... +--- + +# GCN-LABEL: name: readwrite_lane + +# GCN-LABEL: bb.0: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_READLANE_B32 + +# GCN-LABEL: bb.1: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_WRITELANE_B32 + +# GCN-LABEL: bb.2: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_READLANE_B32 + +# GCN-LABEL: bb.3: +# GCN: V_ADD_I32 +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: S_NOP +# GCN: V_WRITELANE_B32 + +name: readwrite_lane + +body: | + bb.0: + successors: %bb.1 + %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0 + S_BRANCH %bb.1 + + bb.1: + successors: %bb.2 + %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0 + S_BRANCH %bb.2 + + bb.2: + successors: %bb.3 + %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo + S_BRANCH %bb.3 + + bb.3: + %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec + %vgpr4 = V_WRITELANE_B32 %sgpr4, %vcc_lo + S_ENDPGM + +...