Index: lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp @@ -16,10 +16,13 @@ #include "AMDGPU.h" #include "AMDGPUIntrinsicInfo.h" #include "llvm/Analysis/DivergenceAnalysis.h" +#include "llvm/Analysis/MemoryDependenceAnalysis.h" +#include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/SetVector.h" #define DEBUG_TYPE "amdgpu-annotate-uniform" @@ -30,6 +33,9 @@ class AMDGPUAnnotateUniformValues : public FunctionPass, public InstVisitor { DivergenceAnalysis *DA; + MemoryDependenceResults * MDR; + LoopInfo * LI; + DenseMap noClobberClones; public: static char ID; @@ -42,12 +48,14 @@ } void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); + AU.addRequired(); + AU.addRequired(); AU.setPreservesAll(); } void visitBranchInst(BranchInst &I); void visitLoadInst(LoadInst &I); - + bool isClobberedInFunction(LoadInst * load); }; } // End anonymous namespace @@ -55,6 +63,8 @@ INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE, "Add AMDGPU uniform metadata", false, false) INITIALIZE_PASS_DEPENDENCY(DivergenceAnalysis) +INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE, "Add AMDGPU uniform metadata", false, false) @@ -63,6 +73,51 @@ static void setUniformMetadata(Instruction *I) { I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {})); } +static void setNoClobberMetadata(Instruction *I) { + I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {})); +} + +static void DFS(const BasicBlock * root, SetVector & set) { + for (auto I : predecessors(root)) + { + if (set.insert(I)) + DFS(I, set); + } +} + +bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * load) { + /* + 1. get Loop for the load->getparent(); + 2. if it exists, collect all the BBs from the most outer + loop and check for the writes. If NOT - start DFS over all preds. + 3. Start DFS over all preds from the most outer loop header. + */ + SetVector checklist; + const BasicBlock * Start = load->getParent(); + checklist.insert(Start); + const Value *Ptr = load->getPointerOperand(); + const Loop * L = LI->getLoopFor(Start); + if (L) { + const Loop * P = L; + do { + L = P; + P = P->getParentLoop(); + } while (P); + checklist.insert(L->block_begin(), L->block_end()); + Start = L->getHeader(); + } + + DFS(Start, checklist); + for (const BasicBlock *BB : checklist) { + BasicBlock * block = const_cast(BB); + BasicBlock::iterator startIt = (block == load->getParent()) ? + BasicBlock::iterator(load) : block->end(); + if (MDR->getSimplePointerDependencyFrom(MemoryLocation(Ptr), + true, startIt, block, load).isClobber()) + return true; + } + return false; +} void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) { if (I.isUnconditional()) @@ -79,10 +134,45 @@ Value *Ptr = I.getPointerOperand(); if (!DA->isUniform(Ptr)) return; + auto isGlobalLoad = [](LoadInst & load)->bool { + return load.getPointerAddressSpace() + == AMDGPUAS::GLOBAL_ADDRESS; + }; + auto isInKernel = [](Instruction & I)->bool { + return I.getParent()->getParent()->getCallingConv() + == CallingConv::AMDGPU_KERNEL; + }; + // We're tracking up to the Function boundaries + // We cannot go beyond because of FunctionPass restrictions + // Thus we can ensure that memory not clobbered for memory + // operations that live in kernel only. + bool notClobbered = !isClobberedInFunction(&I) && isInKernel(I); + Instruction *PtrI = dyn_cast(Ptr); + if (!PtrI && notClobbered && isGlobalLoad(I)) { + if (isa(Ptr) || isa(Ptr)) { + // Lookup for the existing GEP + if (noClobberClones.count(Ptr)) { + PtrI = noClobberClones[Ptr]; + } + else { + // Create GEP of the Value + Function * F = I.getParent()->getParent(); + Value * idx = Constant::getIntegerValue( + Type::getInt32Ty(Ptr->getContext()), APInt(64, 0)); + // Insert GEP at the entry to make it dominate all uses + PtrI = GetElementPtrInst::Create( + Ptr->getType()->getPointerElementType(), Ptr, + ArrayRef(idx), Twine(""), F->getEntryBlock().getFirstNonPHI()); + } + I.replaceUsesOfWith(Ptr, PtrI); + } + } - if (Instruction *PtrI = dyn_cast(Ptr)) + if (PtrI) { setUniformMetadata(PtrI); - + if (notClobbered) + setNoClobberMetadata(PtrI); + } } bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) { @@ -93,9 +183,11 @@ if (skipFunction(F)) return false; - DA = &getAnalysis(); + DA = &getAnalysis(); + MDR = &getAnalysis().getMemDep(); + LI = &getAnalysis().getLoopInfo(); visit(F); - + noClobberClones.clear(); return true; } Index: lib/Target/AMDGPU/AMDGPUSubtarget.h =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.h +++ lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -113,6 +113,7 @@ bool CFALUBug; bool HasVertexCache; short TexVTXClauseSize; + bool ScalarizeGlobal; // Dummy feature to use for assembler in tablegen. bool FeatureDisable; @@ -397,6 +398,9 @@ return alignTo(FlatWorkGroupSize, getWavefrontSize()) / getWavefrontSize(); } + void setScalarizeGlobalBehavior(bool b) { ScalarizeGlobal = b;} + bool getScalarizeGlobalBehavior() const { return ScalarizeGlobal;} + /// \returns Subtarget's default pair of minimum/maximum flat work group sizes /// for function \p F, or minimum/maximum flat work group sizes explicitly /// requested using "amdgpu-flat-work-group-size" attribute attached to Index: lib/Target/AMDGPU/AMDGPUSubtarget.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -119,6 +119,7 @@ CFALUBug(false), HasVertexCache(false), TexVTXClauseSize(0), + ScalarizeGlobal(false), FeatureDisable(false), InstrItins(getInstrItineraryForCPU(GPU)), Index: lib/Target/AMDGPU/AMDGPUTargetMachine.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetMachine.cpp +++ lib/Target/AMDGPU/AMDGPUTargetMachine.cpp @@ -61,6 +61,14 @@ cl::init(true), cl::Hidden); +// Option to to control global loads scalarization +static cl::opt ScalarizeGlobal( + "amdgpu-scalarize-global-loads", + cl::desc("Enable global load scalarization"), + cl::init(false), + cl::Hidden); + + extern "C" void LLVMInitializeAMDGPUTarget() { // Register the target RegisterTargetMachine X(getTheAMDGPUTarget()); @@ -260,6 +268,8 @@ I->setGISelAccessor(*GISel); } + I->setScalarizeGlobalBehavior(ScalarizeGlobal); + return I.get(); } Index: lib/Target/AMDGPU/SIISelLowering.h =================================================================== --- lib/Target/AMDGPU/SIISelLowering.h +++ lib/Target/AMDGPU/SIISelLowering.h @@ -116,6 +116,7 @@ MachineFunction &MF) const override; bool isMemOpUniform(const SDNode *N) const; + bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const; bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; TargetLoweringBase::LegalizeTypeAction Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -524,6 +524,13 @@ return isFlatGlobalAddrSpace(SrcAS) && isFlatGlobalAddrSpace(DestAS); } +bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { + const MemSDNode *MemNode = cast(N); + const Value *Ptr = MemNode->getMemOperand()->getValue(); + const Instruction *I = dyn_cast(Ptr); + return I && I->getMetadata("amdgpu.noclobber"); +} + bool SITargetLowering::isMemOpUniform(const SDNode *N) const { const MemSDNode *MemNode = cast(N); const Value *Ptr = MemNode->getMemOperand()->getValue(); @@ -2605,11 +2612,20 @@ if (isMemOpUniform(Load)) return SDValue(); // Non-uniform loads will be selected to MUBUF instructions, so they - // have the same legalization requires ments as global and private + // have the same legalization requirements as global and private // loads. // LLVM_FALLTHROUGH; case AMDGPUAS::GLOBAL_ADDRESS: + { + if (isMemOpUniform(Load) && isMemOpHasNoClobberedMemOperand(Load)) + return SDValue(); + // Non-uniform loads will be selected to MUBUF instructions, so they + // have the same legalization requirements as global and private + // loads. + // + } + LLVM_FALLTHROUGH; case AMDGPUAS::FLAT_ADDRESS: if (NumElements > 4) return SplitVectorLoad(Op, DAG); Index: lib/Target/AMDGPU/SMInstructions.td =================================================================== --- lib/Target/AMDGPU/SMInstructions.td +++ lib/Target/AMDGPU/SMInstructions.td @@ -217,11 +217,15 @@ // Scalar Memory Patterns //===----------------------------------------------------------------------===// + def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{ auto Ld = cast(N); return Ld->getAlignment() >= 4 && - Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && - static_cast(getTargetLowering())->isMemOpUniform(N); + ((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS && + static_cast(getTargetLowering())->isMemOpUniform(N)) || + (Subtarget->getScalarizeGlobalBehavior() && Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && + static_cast(getTargetLowering())->isMemOpUniform(N) && + static_cast(getTargetLowering())->isMemOpHasNoClobberedMemOperand(N))); }]>; def SMRDImm : ComplexPattern; Index: test/CodeGen/AMDGPU/global_smrd.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/global_smrd.ll @@ -0,0 +1,126 @@ +; RUN: llc -O2 -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s + +; uniform loads +; CHECK-LABEL: @uniform_load +; CHECK: s_load_dwordx4 +; CHECK-NOT: flat_load_dword + +define amdgpu_kernel void @uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) { +bb: + %tmp2 = load float, float addrspace(1)* %arg, align 4, !tbaa !8 + %tmp3 = fadd float %tmp2, 0.000000e+00 + %tmp4 = getelementptr inbounds float, float addrspace(1)* %arg, i64 1 + %tmp5 = load float, float addrspace(1)* %tmp4, align 4, !tbaa !8 + %tmp6 = fadd float %tmp3, %tmp5 + %tmp7 = getelementptr inbounds float, float addrspace(1)* %arg, i64 2 + %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !tbaa !8 + %tmp9 = fadd float %tmp6, %tmp8 + %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i64 3 + %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8 + %tmp12 = fadd float %tmp9, %tmp11 + %tmp13 = getelementptr inbounds float, float addrspace(1)* %arg1 + store float %tmp12, float addrspace(1)* %tmp13, align 4, !tbaa !8 + ret void +} + +; non-uniform loads +; CHECK-LABEL: @non-uniform_load +; CHECK: flat_load_dword +; CHECK-NOT: s_load_dwordx4 + +define amdgpu_kernel void @non-uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) #0 { +bb: + %tmp = call i32 @llvm.amdgcn.workitem.id.x() #1 + %tmp2 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp + %tmp3 = load float, float addrspace(1)* %tmp2, align 4, !tbaa !8 + %tmp4 = fadd float %tmp3, 0.000000e+00 + %tmp5 = add i32 %tmp, 1 + %tmp6 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp5 + %tmp7 = load float, float addrspace(1)* %tmp6, align 4, !tbaa !8 + %tmp8 = fadd float %tmp4, %tmp7 + %tmp9 = add i32 %tmp, 2 + %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp9 + %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8 + %tmp12 = fadd float %tmp8, %tmp11 + %tmp13 = add i32 %tmp, 3 + %tmp14 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp13 + %tmp15 = load float, float addrspace(1)* %tmp14, align 4, !tbaa !8 + %tmp16 = fadd float %tmp12, %tmp15 + %tmp17 = getelementptr inbounds float, float addrspace(1)* %arg1, i32 %tmp + store float %tmp16, float addrspace(1)* %tmp17, align 4, !tbaa !8 + ret void +} + + +; uniform load dominated by no-alias store - scalarize +; CHECK-LABEL: @no_memdep_alias_arg +; CHECK: flat_store_dword +; CHECK: s_load_dword [[SVAL:s[0-9]+]] +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]] +; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] + +define amdgpu_kernel void @no_memdep_alias_arg(i32 addrspace(1)* noalias %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) { + store i32 0, i32 addrspace(1)* %out0 + %val = load i32, i32 addrspace(1)* %in + store i32 %val, i32 addrspace(1)* %out1 + ret void +} + +; uniform load dominated by alias store - vector +; CHECK-LABEL: {{^}}memdep: +; CHECK: flat_store_dword +; CHECK: flat_load_dword [[VVAL:v[0-9]+]] +; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] +define amdgpu_kernel void @memdep(i32 addrspace(1)* %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) { + store i32 0, i32 addrspace(1)* %out0 + %val = load i32, i32 addrspace(1)* %in + store i32 %val, i32 addrspace(1)* %out1 + ret void +} + +; uniform load from global array +; CHECK-LABEL: @global_array +; CHECK: s_load_dwordx2 [[A_ADDR:s\[[0-9]+:[0-9]+\]]] +; CHECK: s_load_dwordx2 [[A_ADDR1:s\[[0-9]+:[0-9]+\]]], [[A_ADDR]], 0x0 +; CHECK: s_load_dword [[SVAL:s[0-9]+]], [[A_ADDR1]], 0x0 +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]] +; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] + +@A = common local_unnamed_addr addrspace(1) global i32 addrspace(1)* null, align 4 + +define amdgpu_kernel void @global_array(i32 addrspace(1)* nocapture %out) { +entry: + %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4 + %1 = load i32, i32 addrspace(1)* %0, align 4 + store i32 %1, i32 addrspace(1)* %out, align 4 + ret void +} + + +; uniform load from global array dominated by alias store +; CHECK-LABEL: @global_array_alias_store +; CHECK: flat_store_dword +; CHECK: v_mov_b32_e32 v[[ADDR_LO:[0-9]+]], s{{[0-9]+}} +; CHECK: v_mov_b32_e32 v[[ADDR_HI:[0-9]+]], s{{[0-9]+}} +; CHECK: flat_load_dwordx2 [[A_ADDR:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[ADDR_LO]]:[[ADDR_HI]]{{\]}} +; CHECK: flat_load_dword [[VVAL:v[0-9]+]], [[A_ADDR]] +; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] +define amdgpu_kernel void @global_array_alias_store(i32 addrspace(1)* nocapture %out, i32 %n) { +entry: + %gep = getelementptr i32, i32 addrspace(1) * %out, i32 %n + store i32 12, i32 addrspace(1) * %gep + %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4 + %1 = load i32, i32 addrspace(1)* %0, align 4 + store i32 %1, i32 addrspace(1)* %out, align 4 + ret void +} + + +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +attributes #1 = { nounwind readnone } + +!8 = !{!9, !9, i64 0} +!9 = !{!"float", !10, i64 0} +!10 = !{!"omnipotent char", !11, i64 0} +!11 = !{!"Simple C/C++ TBAA"} Index: test/CodeGen/AMDGPU/global_smrd_cfg.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/global_smrd_cfg.ll @@ -0,0 +1,91 @@ +; RUN: llc -O2 -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s +; CHECK-LABEL: %entry +; CHECK: s_load_dwordx2 s{{\[}}[[REG_IN_LO:[0-9]+]]:[[REG_IN_HI:[0-9]+]]{{\]}}, s[4:5], 0x0 +; CHECK: s_load_dwordx2 s{{\[}}[[REG_OUT_LO:[0-9]+]]:[[REG_OUT_HI:[0-9]+]]{{\]}}, s[4:5], 0x8 +; CHECK-LABEL: %for.body.preheader +; CHECK-DAG: v_mov_b32_e32 v[[ADDR_IN_LO:[0-9]+]], s[[REG_IN_LO]] +; CHECK-DAG: v_mov_b32_e32 v[[ADDR_IN_HI:[0-9]+]], s[[REG_IN_HI]] +; CHECK-DAG: v_mov_b32_e32 v[[ADDR_OUT_LO:[0-9]+]], s[[REG_OUT_LO]] +; CHECK-DAG: v_mov_b32_e32 v[[ADDR_OUT_HI:[0-9]+]], s[[REG_OUT_HI]] + +; ##################################################################### + +; CHECK-LABEL: %for.body + +; Load from %in in a Loop body has alias store + +; CHECK: flat_load_dword + +; CHECK-LABEL: %if.then +; CHECK: flat_store_dword v{{\[}}[[ADDR_OUT_LO]]:[[ADDR_OUT_HI]]{{\]}} + +; ##################################################################### + +; CHECK-LABEL: %if.end + +; Load from %in has alias store in Loop + +; CHECK: flat_load_dword v{{[0-9]+}}, v{{\[}}[[ADDR_IN_LO]]:[[ADDR_IN_HI]]{{\]}} + +; ##################################################################### + +; CHECK: v_readfirstlane_b32 s[[SREG_LO:[0-9]+]], v[[ADDR_OUT_LO]] +; CHECK: v_readfirstlane_b32 s[[SREG_HI:[0-9]+]], v[[ADDR_OUT_HI]] + +; Load from %out has no-alias store in Loop - out[i+1] never alias out[i] + +; CHECK: s_load_dword s{{[0-9]+}}, s{{\[}}[[SREG_LO]]:[[SREG_HI]]{{\]}}, 0x4 + +define amdgpu_kernel void @cfg(i32 addrspace(1)* nocapture readonly %in, i32 addrspace(1)* nocapture %out, i32 %n) { +entry: + %idxprom = sext i32 %n to i64 + %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %idxprom + %0 = load i32, i32 addrspace(1)* %arrayidx, align 4, !tbaa !7 + %cmp30 = icmp sgt i32 %0, 0 + br i1 %cmp30, label %for.body.preheader, label %for.cond.cleanup + +for.body.preheader: ; preds = %entry + br label %for.body + +for.cond.cleanup.loopexit: ; preds = %if.end + br label %for.cond.cleanup + +for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry + %sum.0.lcssa = phi i32 [ 0, %entry ], [ %add11, %for.cond.cleanup.loopexit ] + %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %idxprom + store i32 %sum.0.lcssa, i32 addrspace(1)* %arrayidx13, align 4, !tbaa !7 + ret void + +for.body: ; preds = %if.end, %for.body.preheader + %sum.032 = phi i32 [ %add11, %if.end ], [ 0, %for.body.preheader ] + %i.031 = phi i32 [ %add, %if.end ], [ 0, %for.body.preheader ] + %rem = srem i32 %i.031, %n + %idxprom1 = sext i32 %rem to i64 + %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %idxprom1 + %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4, !tbaa !7 + %cmp3 = icmp sgt i32 %1, 100 + %idxprom4 = sext i32 %i.031 to i64 + br i1 %cmp3, label %if.then, label %if.end + +if.then: ; preds = %for.body + %arrayidx5 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %idxprom4 + store i32 0, i32 addrspace(1)* %arrayidx5, align 4, !tbaa !7 + br label %if.end + +if.end: ; preds = %if.then, %for.body + %arrayidx7 = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %idxprom4 + %2 = load i32, i32 addrspace(1)* %arrayidx7, align 4, !tbaa !7 + %add = add nuw nsw i32 %i.031, 1 + %idxprom8 = sext i32 %add to i64 + %arrayidx9 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %idxprom8 + %3 = load i32, i32 addrspace(1)* %arrayidx9, align 4, !tbaa !7 + %add10 = add i32 %2, %sum.032 + %add11 = add i32 %add10, %3 + %exitcond = icmp eq i32 %add, %0 + br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body +} + +!7 = !{!8, !8, i64 0} +!8 = !{!"int", !9, i64 0} +!9 = !{!"omnipotent char", !10, i64 0} +!10 = !{!"Simple C/C++ TBAA"}