Index: include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- include/llvm/IR/IntrinsicsAMDGPU.td +++ include/llvm/IR/IntrinsicsAMDGPU.td @@ -216,6 +216,10 @@ GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; +def int_amdgcn_get_groupstaticsize : + GCCBuiltin<"__builtin_amdgcn_get_groupstaticsize">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; + //===----------------------------------------------------------------------===// // CI+ Intrinsics //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -1051,10 +1051,22 @@ MachineInstr * MI, MachineBasicBlock * BB) const { switch (MI->getOpcode()) { - default: - return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); case AMDGPU::BRANCH: return BB; + case AMDGPU::GET_GROUPSTATICSIZE: { + const SIInstrInfo *TII = + static_cast(Subtarget->getInstrInfo()); + MachineFunction *MF = BB->getParent(); + SIMachineFunctionInfo *MFI = MF->getInfo(); + DebugLoc DL = MI->getDebugLoc(); + BuildMI (*BB, MI, DL, TII->get(AMDGPU::S_MOVK_I32)) + .addOperand(MI->getOperand(0)) + .addImm(MFI->LDSSize); + MI->eraseFromParent(); + return BB; + } + default: + return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); } return BB; } Index: lib/Target/AMDGPU/SIInstructions.td =================================================================== --- lib/Target/AMDGPU/SIInstructions.td +++ lib/Target/AMDGPU/SIInstructions.td @@ -1867,6 +1867,11 @@ def SGPR_USE : InstSI <(outs),(ins), "", []>; } +let usesCustomInserter = 1, SALU = 1 in { +def GET_GROUPSTATICSIZE : InstSI <(outs SReg_32:$sdst), (ins), "", + [(set SReg_32:$sdst, (int_amdgcn_get_groupstaticsize))]>; +} // End let usesCustomInserter = 1, SALU = 1 + // SI pseudo instructions. These are used by the CFG structurizer pass // and should be lowered to ISA instructions prior to codegen. Index: test/CodeGen/AMDGPU/llvm.amdgcn.get.groupstaticsize.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.get.groupstaticsize.ll @@ -0,0 +1,26 @@ +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s + + +@lds = addrspace(3) global [512 x float] undef, align 4 + +; FUNC-LABEL: {{^}}get_groupstaticsize_test: +; CHECK: s_movk_i32 s{{[0-9]+}}, 0x800 +define void @get_groupstaticsize_test(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 { + %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1 + %idx.0 = add nsw i32 %tid.x, 64 + %static_lds_size = call i32 @llvm.amdgcn.get.groupstaticsize() #1 + store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4 + %arrayidx = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds, i32 0, i32 %idx.0 + %val = load float, float addrspace(3)* %arrayidx, align 4 + store float %val, float addrspace(1)* %out, align 4 + + ret void +} + +declare i32 @llvm.amdgcn.get.groupstaticsize() #1 +declare i32 @llvm.amdgcn.workitem.id.x() #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone }