diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp @@ -611,7 +611,7 @@ static GlobalVariable * chooseBestVariableForModuleStrategy(const DataLayout &DL, - VariableFunctionMap &LDSVars) { + const VariableFunctionMap &LDSVars) { // Find the global variable with the most indirect uses from kernels struct CandidateTy { @@ -755,7 +755,7 @@ static void partitionVariablesIntoIndirectStrategies( Module &M, LDSUsesInfoTy const &LDSUsesInfo, - VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly, + const VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly, DenseSet &ModuleScopeVariables, DenseSet &TableLookupVariables, DenseSet &KernelAccessVariables, @@ -770,7 +770,7 @@ DenseSet const EmptySet; DenseSet const &HybridModuleRootKernels = HybridModuleRoot - ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot] + ? LDSToKernelsThatNeedToAccessItIndirectly.at(HybridModuleRoot) : EmptySet; for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) { @@ -910,6 +910,8 @@ // Create a struct for each kernel for the non-module-scope variables. + IRBuilder<> Builder(M.getContext()); + DenseMap KernelToReplacement; for (Function &Func : M.functions()) { if (Func.isDeclaration() || !isKernelLDS(&Func)) @@ -962,6 +964,9 @@ auto Replacement = createLDSVariableReplacement(M, VarName, KernelUsedVariables); + // In case all uses are from called functions + markUsedByKernel(Builder, &Func, Replacement.SGV); + // remove preserves existing codegen removeLocalVarsFromUsedLists(M, KernelUsedVariables); KernelToReplacement[&Func] = Replacement; @@ -1155,8 +1160,6 @@ DenseSet Vec; Vec.insert(GV); - // TODO: Looks like a latent bug, Replacement may not be marked - // UsedByKernel here replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) { return isa(U.getUser()); }); @@ -1171,11 +1174,6 @@ LLVMContext &Ctx = M.getContext(); IRBuilder<> Builder(Ctx); - for (size_t i = 0; i < OrderedKernels.size(); i++) { - markUsedByKernel(Builder, OrderedKernels[i], - KernelToReplacement[OrderedKernels[i]].SGV); - } - // The order must be consistent between lookup table and accesses to // lookup table std::vector TableLookupVariablesOrdered( diff --git a/llvm/test/CodeGen/AMDGPU/lower-module-lds-all-indirect-accesses.ll b/llvm/test/CodeGen/AMDGPU/lower-module-lds-all-indirect-accesses.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/lower-module-lds-all-indirect-accesses.ll @@ -0,0 +1,80 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 2 +; RUN: opt -S -mtriple=amdgcn-- -passes=amdgpu-lower-module-lds --amdgpu-lower-module-lds-strategy=hybrid < %s | FileCheck %s + +@A = external addrspace(3) global [8 x ptr] +@B = external addrspace(3) global [0 x i32] + +define amdgpu_kernel void @kernel_0() { +; CHECK-LABEL: define amdgpu_kernel void @kernel_0() !llvm.amdgcn.lds.kernel.id !1 { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kernel_0.lds) ] +; CHECK-NEXT: call void @call_store_A() +; CHECK-NEXT: ret void +; + call void @call_store_A() + ret void +} + +define amdgpu_kernel void @kernel_1() { +; CHECK-LABEL: define amdgpu_kernel void @kernel_1() !llvm.amdgcn.lds.kernel.id !2 { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_1.dynlds) ] +; CHECK-NEXT: [[PTR:%.*]] = call ptr @get_B_ptr() +; CHECK-NEXT: ret void +; + %ptr = call ptr @get_B_ptr() + ret void +} + +define amdgpu_kernel void @kernel_2() { +; CHECK-LABEL: define amdgpu_kernel void @kernel_2() !llvm.amdgcn.lds.kernel.id !3 { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel.kernel_2.lds) ] +; CHECK-NEXT: call void @store_A() +; CHECK-NEXT: ret void +; + call void @store_A() + ret void +} + +define amdgpu_kernel void @kernel_3() { +; CHECK-LABEL: define amdgpu_kernel void @kernel_3() !llvm.amdgcn.lds.kernel.id !4 { +; CHECK-NEXT: call void @llvm.donothing() [ "ExplicitUse"(ptr addrspace(3) @llvm.amdgcn.kernel_3.dynlds) ] +; CHECK-NEXT: [[PTR:%.*]] = call ptr @get_B_ptr() +; CHECK-NEXT: ret void +; + %ptr = call ptr @get_B_ptr() + ret void +} + +define private void @call_store_A() { +; CHECK-LABEL: define private void @call_store_A() { +; CHECK-NEXT: call void @store_A() +; CHECK-NEXT: ret void +; + call void @store_A() + ret void +} + +define private void @store_A() { +; CHECK-LABEL: define private void @store_A() { +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [4 x [1 x i32]], ptr addrspace(4) @llvm.amdgcn.lds.offset.table, i32 0, i32 [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[A]], align 4 +; CHECK-NEXT: [[A1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr addrspace(3) [[A1]] to ptr +; CHECK-NEXT: store ptr [[TMP3]], ptr null, align 8 +; CHECK-NEXT: ret void +; + store ptr addrspacecast (ptr addrspace(3) @A to ptr), ptr null + ret void +} + +define private ptr @get_B_ptr() { +; CHECK-LABEL: define private ptr @get_B_ptr() { +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.amdgcn.lds.kernel.id() +; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [4 x i32], ptr addrspace(4) @llvm.amdgcn.dynlds.offset.table, i32 0, i32 [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspace(4) [[B]], align 4 +; CHECK-NEXT: [[B1:%.*]] = inttoptr i32 [[TMP2]] to ptr addrspace(3) +; CHECK-NEXT: [[TMP3:%.*]] = addrspacecast ptr addrspace(3) [[B1]] to ptr +; CHECK-NEXT: ret ptr [[TMP3]] +; + ret ptr addrspacecast (ptr addrspace(3) @B to ptr) +}