Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -343,11 +343,6 @@ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - // If we are trying to copy to or from SCC, there is a bug somewhere else in - // the backend. While it may be theoretically possible to do this, it should - // never be necessary. - assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); - static const int16_t Sub0_15[] = { AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, @@ -392,6 +387,13 @@ ArrayRef SubIndices; if (AMDGPU::SReg_32RegClass.contains(DestReg)) { + if (SrcReg == AMDGPU::SCC) { + BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) + .addImm(-1) + .addImm(0); + return; + } + assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) .addReg(SrcReg, getKillRegState(KillSrc)); @@ -418,6 +420,12 @@ .addReg(SrcReg, getKillRegState(KillSrc)); return; + } else if (DestReg == AMDGPU::SCC) { + assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); + BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) + .addReg(SrcReg, getKillRegState(KillSrc)) + .addImm(0); + return; } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); Opcode = AMDGPU::S_MOV_B64; Index: lib/Target/AMDGPU/SIWholeQuadMode.cpp =================================================================== --- lib/Target/AMDGPU/SIWholeQuadMode.cpp +++ lib/Target/AMDGPU/SIWholeQuadMode.cpp @@ -136,6 +136,14 @@ void propagateBlock(MachineBasicBlock &MBB, std::vector &Worklist); char analyzeFunction(MachineFunction &MF); + bool requiresCorrectState(const MachineInstr &MI) const; + + MachineBasicBlock::iterator saveSCC(MachineBasicBlock &MBB, + MachineBasicBlock::iterator Before); + MachineBasicBlock::iterator + prepareInsertion(MachineBasicBlock &MBB, MachineBasicBlock::iterator First, + MachineBasicBlock::iterator Last, bool PreferLast, + bool SaveSCC); void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, unsigned SaveWQM, unsigned LiveMaskReg); void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, @@ -501,6 +509,100 @@ return GlobalFlags; } +/// Whether \p MI really requires the exec state computed during analysis. +/// +/// Scalar instructions must occasionally be marked WQM for correct propagation +/// (e.g. thread masks leading up to branches), but when it comes actual +/// execution, they don't care about EXEC. +bool SIWholeQuadMode::requiresCorrectState(const MachineInstr &MI) const { + // Skip instructions that are not affected by EXEC + if (TII->isScalarUnit(MI)) + return false; + + // Generic instructions such as COPY will either disappear by register + // coalescing or be lowered to SALU or VALU instructions. + if (TargetInstrInfo::isGenericOpcode(MI.getOpcode())) { + if (MI.getNumExplicitOperands() >= 1) { + const MachineOperand &Op = MI.getOperand(0); + if (Op.isReg()) { + if (TRI->isSGPRReg(*MRI, Op.getReg())) { + // SGPR instructions are not affected by EXEC + return false; + } + } + } + } + + return true; +} + +MachineBasicBlock::iterator +SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB, + MachineBasicBlock::iterator Before) { + unsigned SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); + + MachineInstr *Save = + BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg) + .addReg(AMDGPU::SCC); + MachineInstr *Restore = + BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC) + .addReg(SaveReg); + + LIS->InsertMachineInstrInMaps(*Save); + LIS->InsertMachineInstrInMaps(*Restore); + LIS->createAndComputeVirtRegInterval(SaveReg); + + return Restore; +} + +MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion( + MachineBasicBlock &MBB, MachineBasicBlock::iterator First, + MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) { + if (!SaveSCC) + return PreferLast ? Last : First; + + LiveRange &LR = LIS->getRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI)); + auto MBBE = MBB.end(); + SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First) + : LIS->getMBBEndIdx(&MBB); + SlotIndex LastIdx = + Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB); + SlotIndex Idx = PreferLast ? LastIdx : FirstIdx; + const LiveRange::Segment *S; + + for (;;) { + S = LR.getSegmentContaining(Idx); + if (!S) + break; + + if (PreferLast) { + SlotIndex Next = S->start.getBaseIndex(); + if (Next < FirstIdx) + break; + Idx = Next; + } else { + SlotIndex Next = S->end.getNextIndex().getBaseIndex(); + if (Next > LastIdx) + break; + Idx = Next; + } + } + + MachineBasicBlock::iterator MBBI; + + if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx)) + MBBI = MI; + else { + assert(Idx == LIS->getMBBEndIdx(&MBB)); + MBBI = MBB.end(); + } + + if (S) + MBBI = saveSCC(MBB, MBBI); + + return MBBI; +} + void SIWholeQuadMode::toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before, unsigned SaveWQM, unsigned LiveMaskReg) { @@ -558,82 +660,68 @@ unsigned SavedWQMReg = 0; bool WQMFromExec = isEntry; char State = isEntry ? StateExact : StateWQM; - MachineInstr *FirstNonWQM = nullptr; - auto II = MBB.getFirstNonPHI(), IE = MBB.end(); - while (II != IE) { - MachineInstr &MI = *II; - ++II; + auto II = MBB.getFirstNonPHI(), IE = MBB.getFirstTerminator(); + if (isEntry) + ++II; // Skip the instruction that saves LiveMask - // Skip instructions that are not affected by EXEC - if (TII->isScalarUnit(MI) && !MI.isTerminator()) - continue; + MachineBasicBlock::iterator First = IE; + for (;;) { + char Needs = 0; + char OutNeeds = 0; - // Generic instructions such as COPY will either disappear by register - // coalescing or be lowered to SALU or VALU instructions. - if (TargetInstrInfo::isGenericOpcode(MI.getOpcode())) { - if (MI.getNumExplicitOperands() >= 1) { - const MachineOperand &Op = MI.getOperand(0); - if (Op.isReg()) { - if (TRI->isSGPRReg(*MRI, Op.getReg())) { - // SGPR instructions are not affected by EXEC - continue; - } - } - } - } + if (First == IE) + First = II; - DEBUG(dbgs() << " " << MI); + if (II != IE) { + MachineInstr &MI = *II; - char Needs = 0; - char OutNeeds = 0; - auto InstrInfoIt = Instructions.find(&MI); - if (InstrInfoIt != Instructions.end()) { - Needs = InstrInfoIt->second.Needs; - OutNeeds = InstrInfoIt->second.OutNeeds; + if (requiresCorrectState(MI)) { + auto III = Instructions.find(&MI); + if (III != Instructions.end()) { + Needs = III->second.Needs; + OutNeeds = III->second.OutNeeds; + } + } + } else { + // End of basic block + if ((BI.Needs.Out & StateWQM) && State != StateWQM) + Needs = StateWQM; + else if (BI.Needs.Out == StateExact && State != StateExact) + Needs = StateExact; } - // Keep track of the first consecutive non-WQM instruction, so that we - // switch away from WQM as soon as possible, potentially saving a small - // bit of bandwidth on loads. - if (Needs == StateWQM) - FirstNonWQM = nullptr; - else if (!FirstNonWQM) - FirstNonWQM = &MI; + if (Needs) { + if (Needs != State) { + MachineBasicBlock::iterator Before = + prepareInsertion(MBB, First, II, Needs == StateWQM, + Needs == StateExact || WQMFromExec); - // State switching - if (Needs && State != Needs) { - if (Needs == StateExact) { - assert(!SavedWQMReg); + if (Needs == StateExact) { + if (!WQMFromExec && (OutNeeds & StateWQM)) + SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); - if (!WQMFromExec && (OutNeeds & StateWQM)) - SavedWQMReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); + toExact(MBB, Before, SavedWQMReg, LiveMaskReg); + } else { + assert(WQMFromExec == (SavedWQMReg == 0)); - toExact(MBB, FirstNonWQM, SavedWQMReg, LiveMaskReg); - } else { - assert(WQMFromExec == (SavedWQMReg == 0)); - toWQM(MBB, &MI, SavedWQMReg); + toWQM(MBB, Before, SavedWQMReg); - if (SavedWQMReg) { - LIS->createAndComputeVirtRegInterval(SavedWQMReg); - SavedWQMReg = 0; + if (SavedWQMReg) { + LIS->createAndComputeVirtRegInterval(SavedWQMReg); + SavedWQMReg = 0; + } } + + State = Needs; } - State = Needs; + First = IE; } - } - - if ((BI.Needs.Out & StateWQM) && State != StateWQM) { - assert(WQMFromExec == (SavedWQMReg == 0)); - toWQM(MBB, MBB.end(), SavedWQMReg); - if (SavedWQMReg) - LIS->createAndComputeVirtRegInterval(SavedWQMReg); - } else if (BI.Needs.Out == StateExact && State != StateExact) { - toExact(MBB, FirstNonWQM ? MachineBasicBlock::iterator(FirstNonWQM) - : MBB.getFirstTerminator(), - 0, LiveMaskReg); + if (II == IE) + break; + ++II; } } @@ -708,5 +796,10 @@ if (LiveMaskReg) LIS->createAndComputeVirtRegInterval(LiveMaskReg); + // Physical registers like SCC aren't tracked by default anyway, so just + // removing the ranges we computed is the simplest option for maintaining + // the analysis results. + LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::SCC, TRI)); + return true; } Index: test/CodeGen/AMDGPU/wqm.ll =================================================================== --- test/CodeGen/AMDGPU/wqm.ll +++ test/CodeGen/AMDGPU/wqm.ll @@ -483,6 +483,41 @@ ret <4 x float> %r } +; Test awareness that s_wqm_b64 clobbers SCC. +; +; CHECK-LABEL: {{^}}test_scc: +; CHECK: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec +; CHECK: s_wqm_b64 exec, exec +; CHECK: s_cmp_ +; CHECK-NEXT: s_cbranch_scc +; CHECK: ; %if +; CHECK: s_and_b64 exec, exec, [[ORIG]] +; CHECK: image_sample +; CHECK: ; %else +; CHECK: s_and_b64 exec, exec, [[ORIG]] +; CHECK: image_sample +; CHECK: ; %end +define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 { +main_body: + %cc = icmp sgt i32 %sel, 0 + br i1 %cc, label %if, label %else + +if: + %r.if = call <4 x float> @llvm.SI.image.sample.i32(i32 0, <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + br label %end + +else: + %r.else = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> , <8 x i32> undef, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0) + br label %end + +end: + %r = phi <4 x float> [ %r.if, %if ], [ %r.else, %else ] + + call void @llvm.amdgcn.buffer.store.f32(float 1.0, <4 x i32> undef, i32 %idx, i32 0, i1 0, i1 0) + + ret <4 x float> %r +} + declare void @llvm.amdgcn.image.store.v4i32(<4 x float>, <4 x i32>, <8 x i32>, i32, i1, i1, i1, i1) #1 declare void @llvm.amdgcn.buffer.store.f32(float, <4 x i32>, i32, i32, i1, i1) #1