Index: lib/Target/AMDGPU/AMDGPU.h =================================================================== --- lib/Target/AMDGPU/AMDGPU.h +++ lib/Target/AMDGPU/AMDGPU.h @@ -51,6 +51,7 @@ FunctionPass *createSIDebuggerInsertNopsPass(); FunctionPass *createSIInsertWaitcntsPass(); FunctionPass *createSIFixWWMLivenessPass(); +FunctionPass *createSIFormMemoryClausesPass(); FunctionPass *createAMDGPUSimplifyLibCallsPass(const TargetOptions &); FunctionPass *createAMDGPUUseNativeCallsPass(); FunctionPass *createAMDGPUCodeGenPreparePass(); @@ -177,6 +178,9 @@ void initializeSIInsertWaitcntsPass(PassRegistry&); extern char &SIInsertWaitcntsID; +void initializeSIFormMemoryClausesPass(PassRegistry&); +extern char &SIFormMemoryClausesID; + void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry&); extern char &AMDGPUUnifyDivergentExitNodesID; Index: lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -171,6 +171,7 @@ initializeSIDebuggerInsertNopsPass(*PR); initializeSIOptimizeExecMaskingPass(*PR); initializeSIFixWWMLivenessPass(*PR); + initializeSIFormMemoryClausesPass(*PR); initializeAMDGPUUnifyDivergentExitNodesPass(*PR); initializeAMDGPUAAWrapperPassPass(*PR); initializeAMDGPUUseNativeCallsPass(*PR); @@ -842,6 +843,8 @@ void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) { insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID); + insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID); + // This must be run immediately after phi elimination and before // TwoAddressInstructions, otherwise the processing of the tied operand of // SI_ELSE will introduce a copy of the tied operand source after the else. Index: lib/Target/AMDGPU/CMakeLists.txt =================================================================== --- lib/Target/AMDGPU/CMakeLists.txt +++ lib/Target/AMDGPU/CMakeLists.txt @@ -85,6 +85,7 @@ SIFixVGPRCopies.cpp SIFixWWMLiveness.cpp SIFoldOperands.cpp + SIFormMemoryClauses.cpp SIFrameLowering.cpp SIInsertSkips.cpp SIInsertWaitcnts.cpp Index: lib/Target/AMDGPU/SIFormMemoryClauses.cpp =================================================================== --- /dev/null +++ lib/Target/AMDGPU/SIFormMemoryClauses.cpp @@ -0,0 +1,398 @@ +//===-- SIFormMemoryClauses.cpp -------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +/// \file +/// This pass creates bundles of SMEM and VMEM instructions forming memory +/// clauses if XNACK is enabled. Def operands of clauses are marked as early +/// clobber to make sure we will not override any source within a clause. +/// +//===----------------------------------------------------------------------===// + +#include "AMDGPU.h" +#include "AMDGPUSubtarget.h" +#include "GCNRegPressure.h" +#include "SIInstrInfo.h" +#include "SIMachineFunctionInfo.h" +#include "SIRegisterInfo.h" +#include "MCTargetDesc/AMDGPUMCTargetDesc.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/CodeGen/LiveIntervals.h" +#include "llvm/CodeGen/MachineFunctionPass.h" + +using namespace llvm; + +#define DEBUG_TYPE "si-form-memory-clauses" + +// Clauses longer then 15 instructions would overflow one of the counters +// and stall. They can stall even earlier if there are outstanding counters. +static cl::opt +MaxClause("amdgpu-max-memory-clause", cl::Hidden, cl::init(15), + cl::desc("Maximum length of a memory clause, instructions")); + +namespace { + +class SIFormMemoryClauses : public MachineFunctionPass { + typedef DenseMap> RegUse; + +public: + static char ID; + +public: + SIFormMemoryClauses() : MachineFunctionPass(ID) { + initializeSIFormMemoryClausesPass(*PassRegistry::getPassRegistry()); + } + + bool runOnMachineFunction(MachineFunction &MF) override; + + StringRef getPassName() const override { + return "SI Form memory clauses"; + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); + } + +private: + template + void forAllLanes(unsigned Reg, LaneBitmask LaneMask, Callable Func) const; + + bool canBundle(const MachineInstr &MI, RegUse &Defs, RegUse &Uses) const; + bool checkPressure(const MachineInstr &MI, GCNDownwardRPTracker &RPT); + void collectRegUses(const MachineInstr &MI, RegUse &Defs, RegUse &Uses) const; + bool processRegUses(const MachineInstr &MI, RegUse &Defs, RegUse &Uses, + GCNDownwardRPTracker &RPT); + + const SISubtarget *ST; + const SIRegisterInfo *TRI; + const MachineRegisterInfo *MRI; + SIMachineFunctionInfo *MFI; + + unsigned LastRecordedOccupancy; + unsigned MaxVGPRs; + unsigned MaxSGPRs; +}; + +} // End anonymous namespace. + +INITIALIZE_PASS_BEGIN(SIFormMemoryClauses, DEBUG_TYPE, + "SI Form memory clauses", false, false) +INITIALIZE_PASS_DEPENDENCY(LiveIntervals) +INITIALIZE_PASS_END(SIFormMemoryClauses, DEBUG_TYPE, + "SI Form memory clauses", false, false) + + +char SIFormMemoryClauses::ID = 0; + +char &llvm::SIFormMemoryClausesID = SIFormMemoryClauses::ID; + +FunctionPass *llvm::createSIFormMemoryClausesPass() { + return new SIFormMemoryClauses(); +} + +static bool isVMEMClauseInst(const MachineInstr &MI) { + return SIInstrInfo::isFLAT(MI) || SIInstrInfo::isVMEM(MI); +} + +static bool isSMEMClauseInst(const MachineInstr &MI) { + return SIInstrInfo::isSMRD(MI); +} + +// There no sense to create store clauses, they do not define anything, +// thus there is nothing to set early-clobber. +static bool isValidClauseInst(const MachineInstr &MI, bool IsVMEMClause) { + if (MI.isDebugValue() || MI.isBundled()) + return false; + if (!MI.mayLoad() || MI.mayStore()) + return false; + if (AMDGPU::getAtomicNoRetOp(MI.getOpcode()) != -1 || + AMDGPU::getAtomicRetOp(MI.getOpcode()) != -1) + return false; + if (IsVMEMClause && !isVMEMClauseInst(MI)) + return false; + if (!IsVMEMClause && !isSMEMClauseInst(MI)) + return false; + return true; +} + +static unsigned getMopState(const MachineOperand &MO) { + unsigned S = 0; + if (MO.isImplicit()) + S |= RegState::Implicit; + if (MO.isDead()) + S |= RegState::Dead; + if (MO.isUndef()) + S |= RegState::Undef; + if (MO.isKill()) + S |= RegState::Kill; + if (MO.isEarlyClobber()) + S |= RegState::EarlyClobber; + if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()) && MO.isRenamable()) + S |= RegState::Renamable; + return S; +} + +template +void SIFormMemoryClauses::forAllLanes(unsigned Reg, LaneBitmask LaneMask, + Callable Func) const { + if (LaneMask.all() || TargetRegisterInfo::isPhysicalRegister(Reg) || + LaneMask == MRI->getMaxLaneMaskForVReg(Reg)) { + Func(0); + return; + } + + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + unsigned E = TRI->getNumSubRegIndices(); + SmallVector CoveringSubregs; + for (unsigned Idx = 1; Idx < E; ++Idx) { + // Is this index even compatible with the given class? + if (TRI->getSubClassWithSubReg(RC, Idx) != RC) + continue; + LaneBitmask SubRegMask = TRI->getSubRegIndexLaneMask(Idx); + // Early exit if we found a perfect match. + if (SubRegMask == LaneMask) { + Func(Idx); + return; + } + + if ((SubRegMask & ~LaneMask).any() || (SubRegMask & LaneMask).none()) + continue; + + CoveringSubregs.push_back(Idx); + } + + llvm::sort(CoveringSubregs.begin(), CoveringSubregs.end(), + [this](unsigned A, unsigned B) { + LaneBitmask MaskA = TRI->getSubRegIndexLaneMask(A); + LaneBitmask MaskB = TRI->getSubRegIndexLaneMask(B); + unsigned NA = MaskA.getNumLanes(); + unsigned NB = MaskB.getNumLanes(); + if (NA != NB) + return NA > NB; + return MaskA.getHighestLane() > MaskB.getHighestLane(); + }); + + for (unsigned Idx : CoveringSubregs) { + LaneBitmask SubRegMask = TRI->getSubRegIndexLaneMask(Idx); + if ((SubRegMask & ~LaneMask).any() || (SubRegMask & LaneMask).none()) + continue; + + Func(Idx); + LaneMask &= ~SubRegMask; + if (LaneMask.none()) + return; + } + + llvm_unreachable("Failed to find all subregs to cover lane mask"); +} + +// Returns false if there is a use of a def already in the map. +// In this case we must break the clause. +bool SIFormMemoryClauses::canBundle(const MachineInstr &MI, + RegUse &Defs, RegUse &Uses) const { + // Check interference with defs. + for (const MachineOperand &MO : MI.operands()) { + // TODO: Prologue/Epilogue Insertion pass does not process bundled + // instructions. + if (MO.isFI()) + return false; + + if (!MO.isReg()) + continue; + + unsigned Reg = MO.getReg(); + + // If it is tied we will need to write same register as we read. + if (MO.isTied()) + return false; + + RegUse &Map = MO.isDef() ? Uses : Defs; + auto Conflict = Map.find(Reg); + if (Conflict == Map.end()) + continue; + + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + return false; + + LaneBitmask Mask = TRI->getSubRegIndexLaneMask(MO.getSubReg()); + if ((Conflict->second.second & Mask).any()) + return false; + } + + return true; +} + +// Since all defs in the clause are early clobber we can run out of registers. +// Function returns false if pressure would hit the limit if instruction is +// bundled into a memory clause. +bool SIFormMemoryClauses::checkPressure(const MachineInstr &MI, + GCNDownwardRPTracker &RPT) { + // NB: skip advanceBeforeNext() call. Since all defs will be marked + // early-clobber they will all stay alive at least to the end of the + // clause. Therefor we should not decrease pressure even if load + // pointer becomes dead and could otherwise be reused for destination. + RPT.advanceToNext(); + GCNRegPressure MaxPressure = RPT.moveMaxPressure(); + unsigned Occupancy = MaxPressure.getOccupancy(*ST); + if (Occupancy >= MFI->getMinAllowedOccupancy() && + MaxPressure.getVGPRNum() <= MaxVGPRs && + MaxPressure.getSGPRNum() <= MaxSGPRs) { + LastRecordedOccupancy = Occupancy; + return true; + } + return false; +} + +// Collect register defs and uses along with their lane masks and states. +void SIFormMemoryClauses::collectRegUses(const MachineInstr &MI, + RegUse &Defs, RegUse &Uses) const { + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg()) + continue; + unsigned Reg = MO.getReg(); + if (!Reg) + continue; + + LaneBitmask Mask = TargetRegisterInfo::isVirtualRegister(Reg) ? + TRI->getSubRegIndexLaneMask(MO.getSubReg()) : + LaneBitmask::getAll(); + RegUse &Map = MO.isDef() ? Defs : Uses; + + auto Loc = Map.find(Reg); + unsigned State = getMopState(MO); + if (Loc == Map.end()) { + Map[Reg] = std::make_pair(State, Mask); + } else { + Loc->second.first |= State; + Loc->second.second |= Mask; + } + } +} + +// Check register def/use conflicts, occupancy limits and collect def/use maps. +// Return true if instruction can be bundled with previous. It it cannot +// def/use maps are not updated. +bool SIFormMemoryClauses::processRegUses(const MachineInstr &MI, + RegUse &Defs, RegUse &Uses, + GCNDownwardRPTracker &RPT) { + if (!canBundle(MI, Defs, Uses)) + return false; + + if (!checkPressure(MI, RPT)) + return false; + + collectRegUses(MI, Defs, Uses); + return true; +} + +bool SIFormMemoryClauses::runOnMachineFunction(MachineFunction &MF) { + if (skipFunction(MF.getFunction())) + return false; + + ST = &MF.getSubtarget(); + if (!ST->isXNACKEnabled()) + return false; + + const SIInstrInfo *TII = ST->getInstrInfo(); + TRI = ST->getRegisterInfo(); + MRI = &MF.getRegInfo(); + MFI = MF.getInfo(); + LiveIntervals *LIS = &getAnalysis(); + SlotIndexes *Ind = LIS->getSlotIndexes(); + bool Changed = false; + + MaxVGPRs = TRI->getAllocatableSet(MF, &AMDGPU::VGPR_32RegClass).count(); + MaxSGPRs = TRI->getAllocatableSet(MF, &AMDGPU::SGPR_32RegClass).count(); + + for (MachineBasicBlock &MBB : MF) { + MachineBasicBlock::instr_iterator Next; + for (auto I = MBB.instr_begin(), E = MBB.instr_end(); I != E; I = Next) { + MachineInstr &MI = *I; + Next = std::next(I); + + bool IsVMEM = isVMEMClauseInst(MI); + + if (!isValidClauseInst(MI, IsVMEM)) + continue; + + RegUse Defs, Uses; + GCNDownwardRPTracker RPT(*LIS); + RPT.reset(MI); + + if (!processRegUses(MI, Defs, Uses, RPT)) + continue; + + unsigned Length = 1; + for ( ; Next != E && Length < MaxClause; ++Next) { + if (!isValidClauseInst(*Next, IsVMEM)) + break; + + // A load from pointer which was loaded inside the same bundle is an + // impossible clause because we will need to write and read the same + // register inside. In this case processRegUses will return false. + if (!processRegUses(*Next, Defs, Uses, RPT)) + break; + + ++Length; + } + if (Length < 2) + continue; + + Changed = true; + MFI->limitOccupancy(LastRecordedOccupancy); + + auto B = BuildMI(MBB, I, DebugLoc(), TII->get(TargetOpcode::BUNDLE)); + Ind->insertMachineInstrInMaps(*B); + + for (auto BI = I; BI != Next; ++BI) { + BI->bundleWithPred(); + Ind->removeSingleMachineInstrFromMaps(*BI); + + for (MachineOperand &MO : BI->defs()) + if (MO.readsReg()) + MO.setIsInternalRead(true); + } + + for (auto &&R : Defs) { + forAllLanes(R.first, R.second.second, [&R, &B](unsigned SubReg) { + unsigned S = R.second.first | RegState::EarlyClobber; + if (!SubReg) + S &= ~(RegState::Undef | RegState::Dead); + B.addDef(R.first, S, SubReg); + }); + } + + for (auto &&R : Uses) { + forAllLanes(R.first, R.second.second, [&R, &B](unsigned SubReg) { + B.addUse(R.first, R.second.first & ~RegState::Kill, SubReg); + }); + } + + for (auto &&R : Defs) { + unsigned Reg = R.first; + Uses.erase(Reg); + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + continue; + LIS->removeInterval(Reg); + LIS->createAndComputeVirtRegInterval(Reg); + } + + for (auto &&R : Uses) { + unsigned Reg = R.first; + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + continue; + LIS->removeInterval(Reg); + LIS->createAndComputeVirtRegInterval(Reg); + } + } + } + + return Changed; +} Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -1266,6 +1266,22 @@ MI.setDesc(get(AMDGPU::S_MOV_B64)); break; } + case TargetOpcode::BUNDLE: { + if (!MI.mayLoad()) + return false; + + // If it is a load it must be a memory clause + for (MachineBasicBlock::instr_iterator I = MI.getIterator(); + I->isBundledWithSucc(); ++I) { + I->unbundleFromSucc(); + for (MachineOperand &MO : I->operands()) + if (MO.isReg()) + MO.setIsInternalRead(false); + } + + MI.eraseFromParent(); + break; + } } return true; } Index: test/CodeGen/AMDGPU/memory_clause.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/memory_clause.ll @@ -0,0 +1,166 @@ +; RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; GCN-LABEL: {{^}}vector_clause: +; GCN: global_load_dwordx4 +; GCN-NEXT: global_load_dwordx4 +; GCN-NEXT: global_load_dwordx4 +; GCN-NEXT: global_load_dwordx4 +; GCN-NEXT: s_nop +define amdgpu_kernel void @vector_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp2 = zext i32 %tmp to i64 + %tmp3 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp2 + %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 16 + %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp2 + %tmp6 = add nuw nsw i64 %tmp2, 1 + %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp6 + %tmp8 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp7, align 16 + %tmp9 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp6 + %tmp10 = add nuw nsw i64 %tmp2, 2 + %tmp11 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp10 + %tmp12 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp11, align 16 + %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp10 + %tmp14 = add nuw nsw i64 %tmp2, 3 + %tmp15 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp14 + %tmp16 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp15, align 16 + %tmp17 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp14 + store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %tmp5, align 16 + store <4 x i32> %tmp8, <4 x i32> addrspace(1)* %tmp9, align 16 + store <4 x i32> %tmp12, <4 x i32> addrspace(1)* %tmp13, align 16 + store <4 x i32> %tmp16, <4 x i32> addrspace(1)* %tmp17, align 16 + ret void +} + +; GCN-LABEL: {{^}}scalar_clause: +; GCN: s_load_dwordx2 +; GCN-NEXT: s_load_dwordx2 +; GCN-NEXT: s_nop +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_load_dwordx4 +; GCN-NEXT: s_load_dwordx4 +; GCN-NEXT: s_load_dwordx4 +; GCN-NEXT: s_load_dwordx4 +define amdgpu_kernel void @scalar_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) { +bb: + %tmp = load <4 x i32>, <4 x i32> addrspace(1)* %arg, align 16 + %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 1 + %tmp3 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp2, align 16 + %tmp4 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 1 + %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 2 + %tmp6 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp5, align 16 + %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 2 + %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 3 + %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16 + %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 3 + store <4 x i32> %tmp, <4 x i32> addrspace(1)* %arg1, align 16 + store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %tmp4, align 16 + store <4 x i32> %tmp6, <4 x i32> addrspace(1)* %tmp7, align 16 + store <4 x i32> %tmp9, <4 x i32> addrspace(1)* %tmp10, align 16 + ret void +} + +; GCN-LABEL: {{^}}mubuf_clause: +; GCN: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: buffer_load_dword +; GCN-NEXT: s_nop +; GCN-NEXT: buffer_load_dword +define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %arg, <4 x i32> addrspace(5)* noalias nocapture %arg1) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp + %tmp3 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp2, align 16 + %tmp4 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp + %tmp5 = add nuw nsw i32 %tmp, 1 + %tmp6 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp5 + %tmp7 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp6, align 16 + %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp5 + %tmp9 = add nuw nsw i32 %tmp, 2 + %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp9 + %tmp11 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp10, align 16 + %tmp12 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp9 + %tmp13 = add nuw nsw i32 %tmp, 3 + %tmp14 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp13 + %tmp15 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp14, align 16 + %tmp16 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp13 + store <4 x i32> %tmp3, <4 x i32> addrspace(5)* %tmp4, align 16 + store <4 x i32> %tmp7, <4 x i32> addrspace(5)* %tmp8, align 16 + store <4 x i32> %tmp11, <4 x i32> addrspace(5)* %tmp12, align 16 + store <4 x i32> %tmp15, <4 x i32> addrspace(5)* %tmp16, align 16 + ret void +} + +; GCN-LABEL: {{^}}vector_clause_indirect: +; GCN: global_load_dwordx2 [[ADDR:v\[[0-9:]+\]]], v[{{[0-9:]+}}], off +; GCN-NEXT: s_nop +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off +; GCN-NEXT: global_load_dwordx4 v[{{[0-9:]+}}], [[ADDR]], off offset:16 +define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture readnone %arg1, <4 x i32> addrspace(1)* noalias nocapture %arg2) { +bb: + %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() + %tmp3 = zext i32 %tmp to i64 + %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp3 + %tmp5 = bitcast i64 addrspace(1)* %tmp4 to <4 x i32> addrspace(1)* addrspace(1)* + %tmp6 = load <4 x i32> addrspace(1)*, <4 x i32> addrspace(1)* addrspace(1)* %tmp5, align 8 + %tmp7 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp6, align 16 + %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %tmp6, i64 1 + %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16 + store <4 x i32> %tmp7, <4 x i32> addrspace(1)* %arg2, align 16 + %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg2, i64 1 + store <4 x i32> %tmp9, <4 x i32> addrspace(1)* %tmp10, align 16 + ret void +} + +; GCN-LABEL: {{^}}load_global_d16_hi: +; GCN: global_load_short_d16_hi v +; GCN-NEXT: s_nop +; GCN-NEXT: global_load_short_d16_hi v +define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrspace(1)* %out) { +entry: + %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32 + %load1 = load i16, i16 addrspace(1)* %in + %load2 = load i16, i16 addrspace(1)* %gep + %build0 = insertelement <2 x i16> undef, i16 %reg, i32 0 + %build1 = insertelement <2 x i16> %build0, i16 %load1, i32 1 + store <2 x i16> %build1, <2 x i16> addrspace(1)* %out + %build2 = insertelement <2 x i16> undef, i16 %reg, i32 0 + %build3 = insertelement <2 x i16> %build2, i16 %load2, i32 1 + %gep2 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 32 + store <2 x i16> %build3, <2 x i16> addrspace(1)* %gep2 + ret void +} + +; GCN-LABEL: {{^}}load_global_d16_lo: +; GCN: global_load_short_d16 v +; GCN-NEXT: s_nop +; GCN-NEXT: global_load_short_d16 v +define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrspace(1)* %out) { +entry: + %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32 + %reg.bc1 = bitcast i32 %reg to <2 x i16> + %reg.bc2 = bitcast i32 %reg to <2 x i16> + %load1 = load i16, i16 addrspace(1)* %in + %load2 = load i16, i16 addrspace(1)* %gep + %build1 = insertelement <2 x i16> %reg.bc1, i16 %load1, i32 0 + %build2 = insertelement <2 x i16> %reg.bc2, i16 %load2, i32 0 + %gep2 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 32 + store <2 x i16> %build1, <2 x i16> addrspace(1)* %out + store <2 x i16> %build2, <2 x i16> addrspace(1)* %gep2 + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() Index: test/CodeGen/AMDGPU/memory_clause.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/memory_clause.mir @@ -0,0 +1,388 @@ +# RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs -run-pass=phi-node-elimination,si-form-memory-clauses %s -o - | FileCheck -check-prefix=GCN %s + +# GCN-LABEL: {{^}}name: vector_clause{{$}} +# GCN: early-clobber %2:vreg_128, early-clobber %4:vreg_128, early-clobber %1:vreg_128, early-clobber %3:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec +# GCN-NEXT: %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec +# GCN-NEXT: %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: GLOBAL_STORE_DWORDX4 %0, %1, 0, 0, 0, implicit $exec + +--- +name: vector_clause +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_128 } + - { id: 2, class: vreg_128 } + - { id: 3, class: vreg_128 } + - { id: 4, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec + %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec + %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec + %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %1, 0, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %2, 16, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %3, 32, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %4, 48, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: subreg_full{{$}} +# GCN: early-clobber %1:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: undef %1.sub0:vreg_128 = GLOBAL_LOAD_DWORD %0.sub0_sub1, 0, 0, 0, implicit $exec +# GCN-NEXT: internal %1.sub1:vreg_128 = GLOBAL_LOAD_DWORD %0.sub1_sub2, 16, 0, 0, implicit $exec +# GCN-NEXT: internal %1.sub2:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec +# GCN-NEXT: internal %1.sub3:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: GLOBAL_STORE_DWORDX4 %0.sub0_sub1, %1, 0, 0, 0, implicit $exec + +--- +name: subreg_full +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_128 } + - { id: 1, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + undef %1.sub0:vreg_128 = GLOBAL_LOAD_DWORD %0.sub0_sub1, 0, 0, 0, implicit $exec + %1.sub1:vreg_128 = GLOBAL_LOAD_DWORD %0.sub1_sub2, 16, 0, 0, implicit $exec + %1.sub2:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec + %1.sub3:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0.sub0_sub1, %1, 0, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: subreg_part{{$}} +# GCN: undef early-clobber %1.sub0_sub1:vreg_128, undef early-clobber %1.sub3:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: undef %1.sub0:vreg_128 = GLOBAL_LOAD_DWORD %0.sub0_sub1, 0, 0, 0, implicit $exec +# GCN-NEXT: internal %1.sub1:vreg_128 = GLOBAL_LOAD_DWORD %0.sub1_sub2, 16, 0, 0, implicit $exec +# GCN-NEXT: internal %1.sub3:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: GLOBAL_STORE_DWORDX4 %0.sub0_sub1, %1, 0, 0, 0, implicit $exec + +--- +name: subreg_part +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_128 } + - { id: 1, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + undef %1.sub0:vreg_128 = GLOBAL_LOAD_DWORD %0.sub0_sub1, 0, 0, 0, implicit $exec + %1.sub1:vreg_128 = GLOBAL_LOAD_DWORD %0.sub1_sub2, 16, 0, 0, implicit $exec + %1.sub3:vreg_128 = GLOBAL_LOAD_DWORD %0.sub2_sub3, 32, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0.sub0_sub1, %1, 0, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: dead{{$}} +# GCN: dead early-clobber %2:vreg_128, dead early-clobber %4:vreg_128, dead early-clobber %1:vreg_128, dead early-clobber %3:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: dead %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec +# GCN-NEXT: dead %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec +# GCN-NEXT: dead %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: dead +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_128 } + - { id: 2, class: vreg_128 } + - { id: 3, class: vreg_128 } + - { id: 4, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + dead %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec + dead %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec + dead %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec + dead %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: subreg_dead{{$}} +# GCN: early-clobber %1:vreg_64 = BUNDLE %0, implicit $exec { +# GCN-NEXT: %1.sub0:vreg_64 = GLOBAL_LOAD_DWORD %0, 16, 0, 0, implicit $exec +# GCN-NEXT: dead %1.sub1:vreg_64 = GLOBAL_LOAD_DWORD %0, 32, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: GLOBAL_STORE_DWORD %0, %1.sub0, 0, 0, 0, implicit $exec + +--- +name: subreg_dead +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_64 } +body: | + bb.0: + %0 = IMPLICIT_DEF + undef %1.sub0:vreg_64 = GLOBAL_LOAD_DWORD %0, 16, 0, 0, implicit $exec + dead %1.sub1:vreg_64 = GLOBAL_LOAD_DWORD %0, 32, 0, 0, implicit $exec + GLOBAL_STORE_DWORD %0, %1.sub0, 0, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: kill{{$}} +# GCN: early-clobber %2:vreg_128, early-clobber %3:vreg_128 = BUNDLE %0, %1, implicit $exec { +# GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %1, 16, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: kill +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_64 } + - { id: 2, class: vreg_128 } + - { id: 3, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec + %3:vreg_128 = GLOBAL_LOAD_DWORDX4 killed %1, 16, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %2, 0, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %3, 16, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: indirect{{$}} +# GCN: %1:vreg_64 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: early-clobber %2:vreg_128, early-clobber %3:vreg_128 = BUNDLE %1, implicit $exec { +# GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %1, 0, 0, 0, implicit $exec +# GCN-NEXT: %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %1, 16, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: indirect +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_64 } + - { id: 2, class: vreg_128 } + - { id: 3, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1:vreg_64 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, 0, implicit $exec + %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %1, 0, 0, 0, implicit $exec + %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %1, 16, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %2, 0, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %3, 16, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: stack{{$}} +# GCN: %0:vreg_64 = IMPLICIT_DEF +# GCN-NEXT: %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %stack.0, 0, 0, 0, implicit $exec +# GCN-NEXT: %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %stack.0, 16, 0, 0, implicit $exec +# GCN-NEXT: GLOBAL_STORE_DWORDX4 %0, %1, 0, 0, 0, implicit $exec + +--- +name: stack +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_128 } + - { id: 2, class: vreg_128 } +stack: + - { id: 0, type: default, offset: 0, size: 64, alignment: 8 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %stack.0, 0, 0, 0, implicit $exec + %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %stack.0, 16, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %1, 0, 0, 0, implicit $exec + GLOBAL_STORE_DWORDX4 %0, %2, 16, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: overflow_counter{{$}} +# GCN: dead early-clobber %7:vgpr_32, dead early-clobber %14:vgpr_32, dead early-clobber %2:vgpr_32, dead early-clobber %9:vgpr_32, dead early-clobber %4:vgpr_32, dead early-clobber %11:vgpr_32, dead early-clobber %6:vgpr_32, dead early-clobber %13:vgpr_32, dead early-clobber %1:vgpr_32, dead early-clobber %8:vgpr_32, dead early-clobber %15:vgpr_32, dead early-clobber %3:vgpr_32, dead early-clobber %10:vgpr_32, dead early-clobber %5:vgpr_32, dead early-clobber %12:vgpr_32 = BUNDLE %0, implicit $exec { +# GCN-NEXT: dead %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, 0, implicit $exec +# GCN-NEXT: dead %2:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, 0, implicit $exec +# GCN-NEXT: dead %3:vgpr_32 = GLOBAL_LOAD_DWORD %0, 8, 0, 0, implicit $exec +# GCN-NEXT: dead %4:vgpr_32 = GLOBAL_LOAD_DWORD %0, 12, 0, 0, implicit $exec +# GCN-NEXT: dead %5:vgpr_32 = GLOBAL_LOAD_DWORD %0, 16, 0, 0, implicit $exec +# GCN-NEXT: dead %6:vgpr_32 = GLOBAL_LOAD_DWORD %0, 20, 0, 0, implicit $exec +# GCN-NEXT: dead %7:vgpr_32 = GLOBAL_LOAD_DWORD %0, 24, 0, 0, implicit $exec +# GCN-NEXT: dead %8:vgpr_32 = GLOBAL_LOAD_DWORD %0, 28, 0, 0, implicit $exec +# GCN-NEXT: dead %9:vgpr_32 = GLOBAL_LOAD_DWORD %0, 32, 0, 0, implicit $exec +# GCN-NEXT: dead %10:vgpr_32 = GLOBAL_LOAD_DWORD %0, 36, 0, 0, implicit $exec +# GCN-NEXT: dead %11:vgpr_32 = GLOBAL_LOAD_DWORD %0, 40, 0, 0, implicit $exec +# GCN-NEXT: dead %12:vgpr_32 = GLOBAL_LOAD_DWORD %0, 44, 0, 0, implicit $exec +# GCN-NEXT: dead %13:vgpr_32 = GLOBAL_LOAD_DWORD %0, 48, 0, 0, implicit $exec +# GCN-NEXT: dead %14:vgpr_32 = GLOBAL_LOAD_DWORD %0, 52, 0, 0, implicit $exec +# GCN-NEXT: dead %15:vgpr_32 = GLOBAL_LOAD_DWORD %0, 56, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: dead early-clobber %16:vgpr_32, dead early-clobber %17:vgpr_32 = BUNDLE %0, implicit $exec { +# GCN-NEXT: dead %16:vgpr_32 = GLOBAL_LOAD_DWORD %0, 60, 0, 0, implicit $exec +# GCN-NEXT: dead %17:vgpr_32 = GLOBAL_LOAD_DWORD %0, 64, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: overflow_counter +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vgpr_32 } + - { id: 2, class: vgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: vgpr_32 } + - { id: 5, class: vgpr_32 } + - { id: 6, class: vgpr_32 } + - { id: 7, class: vgpr_32 } + - { id: 8, class: vgpr_32 } + - { id: 9, class: vgpr_32 } + - { id: 10, class: vgpr_32 } + - { id: 11, class: vgpr_32 } + - { id: 12, class: vgpr_32 } + - { id: 13, class: vgpr_32 } + - { id: 14, class: vgpr_32 } + - { id: 15, class: vgpr_32 } + - { id: 16, class: vgpr_32 } + - { id: 17, class: vgpr_32 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, 0, implicit $exec + %2:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, 0, implicit $exec + %3:vgpr_32 = GLOBAL_LOAD_DWORD %0, 8, 0, 0, implicit $exec + %4:vgpr_32 = GLOBAL_LOAD_DWORD %0, 12, 0, 0, implicit $exec + %5:vgpr_32 = GLOBAL_LOAD_DWORD %0, 16, 0, 0, implicit $exec + %6:vgpr_32 = GLOBAL_LOAD_DWORD %0, 20, 0, 0, implicit $exec + %7:vgpr_32 = GLOBAL_LOAD_DWORD %0, 24, 0, 0, implicit $exec + %8:vgpr_32 = GLOBAL_LOAD_DWORD %0, 28, 0, 0, implicit $exec + %9:vgpr_32 = GLOBAL_LOAD_DWORD %0, 32, 0, 0, implicit $exec + %10:vgpr_32 = GLOBAL_LOAD_DWORD %0, 36, 0, 0, implicit $exec + %11:vgpr_32 = GLOBAL_LOAD_DWORD %0, 40, 0, 0, implicit $exec + %12:vgpr_32 = GLOBAL_LOAD_DWORD %0, 44, 0, 0, implicit $exec + %13:vgpr_32 = GLOBAL_LOAD_DWORD %0, 48, 0, 0, implicit $exec + %14:vgpr_32 = GLOBAL_LOAD_DWORD %0, 52, 0, 0, implicit $exec + %15:vgpr_32 = GLOBAL_LOAD_DWORD %0, 56, 0, 0, implicit $exec + %16:vgpr_32 = GLOBAL_LOAD_DWORD %0, 60, 0, 0, implicit $exec + %17:vgpr_32 = GLOBAL_LOAD_DWORD %0, 64, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: reg_pressure{{$}} +# GCN: dead early-clobber %2:vreg_128, dead early-clobber %4:vreg_128, dead early-clobber %1:vreg_128, dead early-clobber %3:vreg_128, dead early-clobber %5:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: dead %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: dead %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec +# GCN-NEXT: dead %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec +# GCN-NEXT: dead %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec +# GCN-NEXT: dead %5:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 64, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: dead early-clobber %7:vreg_128, dead early-clobber %6:vreg_128 = BUNDLE %0, implicit $exec { +# GCN-NEXT: dead %6:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 80, 0, 0, implicit $exec +# GCN-NEXT: dead %7:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 96, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: reg_pressure +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vreg_128 } + - { id: 2, class: vreg_128 } + - { id: 3, class: vreg_128 } + - { id: 4, class: vreg_128 } + - { id: 5, class: vreg_128 } + - { id: 6, class: vreg_128 } + - { id: 7, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec + %2:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 16, 0, 0, implicit $exec + %3:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 32, 0, 0, implicit $exec + %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 48, 0, 0, implicit $exec + %5:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 64, 0, 0, implicit $exec + %6:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 80, 0, 0, implicit $exec + %7:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 96, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: image_clause{{$}} +# GCN: early-clobber %4:vreg_128, early-clobber %3:vreg_128, early-clobber %5:vreg_128 = BUNDLE %0, undef %2:sreg_128, %1, implicit $exec { +# GCN-NEXT: %3:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec +# GCN-NEXT: %4:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec +# GCN-NEXT: %5:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec +# GCN-NEXT: } +# GCN-NEXT: IMAGE_STORE_V4_V2 %3, %0, %1, 15, -1, 0, 0, 0, 0, 0, 0, implicit $exec + +--- +name: image_clause +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: sreg_256 } + - { id: 2, class: sreg_128 } + - { id: 3, class: vreg_128 } + - { id: 4, class: vreg_128 } + - { id: 5, class: vreg_128 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + %3:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec + %4:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec + %5:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, undef %2:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec + IMAGE_STORE_V4_V2 %3, %0, %1, 15, -1, 0, 0, 0, 0, 0, 0, implicit $exec + IMAGE_STORE_V4_V2 %4, %0, %1, 15, -1, 0, 0, 0, 0, 0, 0, implicit $exec + IMAGE_STORE_V4_V2 %5, %0, %1, 15, -1, 0, 0, 0, 0, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: mixed_clause{{$}} +# GCN: dead early-clobber %4:vreg_128, dead early-clobber %3:vreg_128, dead early-clobber %5:vgpr_32 = BUNDLE %0, %2, %1, implicit $exec { +# GCN-NEXT: dead %3:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, %2, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec +# GCN-NEXT: dead %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec +# GCN-NEXT: dead %5:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %0, %2, 0, 0, 0, 0, 0, implicit $exec +# GCN-NEXT: } + +--- +name: mixed_clause +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: sreg_256 } + - { id: 2, class: sreg_128 } + - { id: 3, class: vreg_128 } + - { id: 4, class: vreg_128 } + - { id: 5, class: vgpr_32 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + %2 = IMPLICIT_DEF + %3:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %0, %1, %2, 15, 0, 0, 0, 0, 0, 0, 0, implicit $exec + %4:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, 0, implicit $exec + %5:vgpr_32 = BUFFER_LOAD_DWORD_ADDR64 %0, %2, 0, 0, 0, 0, 0, implicit $exec +... + +# GCN-LABEL: {{^}}name: atomic{{$}} +# GCN: %1:vgpr_32 = IMPLICIT_DEF +# GCN-NEXT: dead %2:vgpr_32 = FLAT_ATOMIC_ADD_RTN %0, %1, 0, 0, implicit $exec, implicit $flat_scr +# GCN-NEXT: dead %3:vgpr_32 = FLAT_ATOMIC_ADD_RTN %0, %1, 0, 0, implicit $exec, implicit $flat_scr +# GCN-NEXT: FLAT_ATOMIC_ADD %0, %1, 0, 0, implicit $exec, implicit $flat_scr +# GCN-NEXT: FLAT_ATOMIC_ADD %0, %1, 0, 0, implicit $exec, implicit $flat_scr +# GCN-NEXT: S_ENDPGM + +--- +name: atomic +tracksRegLiveness: true +registers: + - { id: 0, class: vreg_64 } + - { id: 1, class: vgpr_32 } + - { id: 2, class: vgpr_32 } + - { id: 3, class: vgpr_32 } +body: | + bb.0: + %0 = IMPLICIT_DEF + %1 = IMPLICIT_DEF + %2:vgpr_32 = FLAT_ATOMIC_ADD_RTN %0, %1, 0, 0, implicit $exec, implicit $flat_scr + %3:vgpr_32 = FLAT_ATOMIC_ADD_RTN %0, %1, 0, 0, implicit $exec, implicit $flat_scr + FLAT_ATOMIC_ADD %0, %1, 0, 0, implicit $exec, implicit $flat_scr + FLAT_ATOMIC_ADD %0, %1, 0, 0, implicit $exec, implicit $flat_scr + S_ENDPGM +...