Index: lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -132,8 +132,16 @@ // // TODO: We should not add the attributes if the known compile time workgroup // size is 1 for y/z. -static StringRef intrinsicToAttrName(Intrinsic::ID ID, bool &IsQueuePtr) { +static StringRef intrinsicToAttrName(Intrinsic::ID ID, + bool &NonKernelOnly, + bool &IsQueuePtr) { switch (ID) { + case Intrinsic::amdgcn_workitem_id_x: + NonKernelOnly = true; + return "amdgpu-work-item-id-x"; + case Intrinsic::amdgcn_workgroup_id_x: + NonKernelOnly = true; + return "amdgpu-work-group-id-x"; case Intrinsic::amdgcn_workitem_id_y: case Intrinsic::r600_read_tidig_y: return "amdgpu-work-item-id-y"; @@ -174,12 +182,12 @@ static void copyFeaturesToFunction(Function &Parent, const Function &Callee, bool &NeedQueuePtr) { - + // X ids unnecessarily propagated to kernels. static const StringRef AttrNames[] = { - // .x omitted + { "amdgpu-work-item-id-x" }, { "amdgpu-work-item-id-y" }, { "amdgpu-work-item-id-z" }, - // .x omitted + { "amdgpu-work-group-id-x" }, { "amdgpu-work-group-id-y" }, { "amdgpu-work-group-id-z" }, { "amdgpu-dispatch-ptr" }, @@ -200,6 +208,7 @@ bool Changed = false; bool NeedQueuePtr = false; + bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv()); for (BasicBlock &BB : F) { for (Instruction &I : BB) { @@ -216,8 +225,10 @@ copyFeaturesToFunction(F, *Callee, NeedQueuePtr); Changed = true; } else { - StringRef AttrName = intrinsicToAttrName(IID, NeedQueuePtr); - if (!AttrName.empty()) { + bool NonKernelOnly = false; + StringRef AttrName = intrinsicToAttrName(IID, + NonKernelOnly, NeedQueuePtr); + if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) { F.addFnAttr(AttrName); Changed = true; } Index: lib/Target/AMDGPU/SIMachineFunctionInfo.h =================================================================== --- lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -119,6 +119,11 @@ unsigned WorkGroupInfoSystemSGPR; unsigned PrivateSegmentWaveByteOffsetSystemSGPR; + // VGPR inputs. These are always v0, v1 and v2 for entry functions. + unsigned WorkItemIDXVGPR; + unsigned WorkItemIDYVGPR; + unsigned WorkItemIDZVGPR; + // Graphics info. unsigned PSInputAddr; unsigned PSInputEnable; Index: lib/Target/AMDGPU/SIMachineFunctionInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -42,6 +42,9 @@ WorkGroupIDZSystemSGPR(AMDGPU::NoRegister), WorkGroupInfoSystemSGPR(AMDGPU::NoRegister), PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister), + WorkItemIDXVGPR(AMDGPU::NoRegister), + WorkItemIDYVGPR(AMDGPU::NoRegister), + WorkItemIDZVGPR(AMDGPU::NoRegister), PSInputAddr(0), PSInputEnable(0), ReturnsVoid(true), @@ -87,7 +90,6 @@ ScratchWaveOffsetReg = AMDGPU::SGPR4; FrameOffsetReg = AMDGPU::SGPR5; StackPtrOffsetReg = AMDGPU::SGPR32; - return; } CallingConv::ID CC = F->getCallingConv(); @@ -106,12 +108,18 @@ WorkItemIDY = true; WorkItemIDZ = true; } else { + if (F->hasFnAttribute("amdgpu-work-group-id-x")) + WorkGroupIDX = true; + if (F->hasFnAttribute("amdgpu-work-group-id-y")) WorkGroupIDY = true; if (F->hasFnAttribute("amdgpu-work-group-id-z")) WorkGroupIDZ = true; + if (F->hasFnAttribute("amdgpu-work-item-id-x")) + WorkItemIDX = true; + if (F->hasFnAttribute("amdgpu-work-item-id-y")) WorkItemIDY = true; @@ -119,10 +127,12 @@ WorkItemIDZ = true; } - // X, XY, and XYZ are the only supported combinations, so make sure Y is - // enabled if Z is. - if (WorkItemIDZ) - WorkItemIDY = true; + if (isEntryFunction()) { + // X, XY, and XYZ are the only supported combinations, so make sure Y is + // enabled if Z is. + if (WorkItemIDZ) + WorkItemIDY = true; + } const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); bool MaySpill = ST.isVGPRSpillingEnabled(*F); Index: test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll =================================================================== --- test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll +++ test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll @@ -1,8 +1,10 @@ ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-annotate-kernel-features %s | FileCheck -check-prefix=HSA %s +declare i32 @llvm.amdgcn.workgroup.id.x() #0 declare i32 @llvm.amdgcn.workgroup.id.y() #0 declare i32 @llvm.amdgcn.workgroup.id.z() #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 declare i32 @llvm.amdgcn.workitem.id.y() #0 declare i32 @llvm.amdgcn.workitem.id.z() #0 @@ -11,56 +13,70 @@ declare i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() #0 declare i64 @llvm.amdgcn.dispatch.id() #0 -; HSA: define void @use_workitem_id_y() #1 { +; HSA: define void @use_workitem_id_x() #1 { +define void @use_workitem_id_x() #1 { + %val = call i32 @llvm.amdgcn.workitem.id.x() + store volatile i32 %val, i32 addrspace(1)* undef + ret void +} + +; HSA: define void @use_workitem_id_y() #2 { define void @use_workitem_id_y() #1 { %val = call i32 @llvm.amdgcn.workitem.id.y() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workitem_id_z() #2 { +; HSA: define void @use_workitem_id_z() #3 { define void @use_workitem_id_z() #1 { %val = call i32 @llvm.amdgcn.workitem.id.z() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_y() #3 { +; HSA: define void @use_workgroup_id_x() #4 { +define void @use_workgroup_id_x() #1 { + %val = call i32 @llvm.amdgcn.workgroup.id.x() + store volatile i32 %val, i32 addrspace(1)* undef + ret void +} + +; HSA: define void @use_workgroup_id_y() #5 { define void @use_workgroup_id_y() #1 { %val = call i32 @llvm.amdgcn.workgroup.id.y() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_z() #4 { +; HSA: define void @use_workgroup_id_z() #6 { define void @use_workgroup_id_z() #1 { %val = call i32 @llvm.amdgcn.workgroup.id.z() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_dispatch_ptr() #5 { +; HSA: define void @use_dispatch_ptr() #7 { define void @use_dispatch_ptr() #1 { %dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() store volatile i8 addrspace(2)* %dispatch.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @use_queue_ptr() #6 { +; HSA: define void @use_queue_ptr() #8 { define void @use_queue_ptr() #1 { %queue.ptr = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr() store volatile i8 addrspace(2)* %queue.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @use_dispatch_id() #7 { +; HSA: define void @use_dispatch_id() #9 { define void @use_dispatch_id() #1 { %val = call i64 @llvm.amdgcn.dispatch.id() store volatile i64 %val, i64 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_y_workgroup_id_z() #8 { +; HSA: define void @use_workgroup_id_y_workgroup_id_z() #10 { define void @use_workgroup_id_y_workgroup_id_z() #1 { %val0 = call i32 @llvm.amdgcn.workgroup.id.y() %val1 = call i32 @llvm.amdgcn.workgroup.id.z() @@ -69,67 +85,91 @@ ret void } -; HSA: define void @func_indirect_use_workitem_id_y() #1 { +; HSA: define void @func_indirect_use_workitem_id_x() #1 { +define void @func_indirect_use_workitem_id_x() #1 { + call void @use_workitem_id_x() + ret void +} + +; HSA: define void @kernel_indirect_use_workitem_id_x() #1 { +define void @kernel_indirect_use_workitem_id_x() #1 { + call void @use_workitem_id_x() + ret void +} + +; HSA: define void @func_indirect_use_workitem_id_y() #2 { define void @func_indirect_use_workitem_id_y() #1 { call void @use_workitem_id_y() ret void } -; HSA: define void @func_indirect_use_workitem_id_z() #2 { +; HSA: define void @func_indirect_use_workitem_id_z() #3 { define void @func_indirect_use_workitem_id_z() #1 { call void @use_workitem_id_z() ret void } -; HSA: define void @func_indirect_use_workgroup_id_y() #3 { +; HSA: define void @func_indirect_use_workgroup_id_x() #4 { +define void @func_indirect_use_workgroup_id_x() #1 { + call void @use_workgroup_id_x() + ret void +} + +; HSA: define void @kernel_indirect_use_workgroup_id_x() #4 { +define void @kernel_indirect_use_workgroup_id_x() #1 { + call void @use_workgroup_id_x() + ret void +} + +; HSA: define void @func_indirect_use_workgroup_id_y() #5 { define void @func_indirect_use_workgroup_id_y() #1 { call void @use_workgroup_id_y() ret void } -; HSA: define void @func_indirect_use_workgroup_id_z() #4 { +; HSA: define void @func_indirect_use_workgroup_id_z() #6 { define void @func_indirect_use_workgroup_id_z() #1 { call void @use_workgroup_id_z() ret void } -; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #3 { +; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #5 { define void @func_indirect_indirect_use_workgroup_id_y() #1 { call void @func_indirect_use_workgroup_id_y() ret void } -; HSA: define void @indirect_x2_use_workgroup_id_y() #3 { +; HSA: define void @indirect_x2_use_workgroup_id_y() #5 { define void @indirect_x2_use_workgroup_id_y() #1 { call void @func_indirect_indirect_use_workgroup_id_y() ret void } -; HSA: define void @func_indirect_use_dispatch_ptr() #5 { +; HSA: define void @func_indirect_use_dispatch_ptr() #7 { define void @func_indirect_use_dispatch_ptr() #1 { call void @use_dispatch_ptr() ret void } -; HSA: define void @func_indirect_use_queue_ptr() #6 { +; HSA: define void @func_indirect_use_queue_ptr() #8 { define void @func_indirect_use_queue_ptr() #1 { call void @use_queue_ptr() ret void } -; HSA: define void @func_indirect_use_dispatch_id() #7 { +; HSA: define void @func_indirect_use_dispatch_id() #9 { define void @func_indirect_use_dispatch_id() #1 { call void @use_dispatch_id() ret void } -; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #9 { +; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #11 { define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #1 { call void @func_indirect_use_workgroup_id_y_workgroup_id_z() ret void } -; HSA: define void @recursive_use_workitem_id_y() #1 { +; HSA: define void @recursive_use_workitem_id_y() #2 { define void @recursive_use_workitem_id_y() #1 { %val = call i32 @llvm.amdgcn.workitem.id.y() store volatile i32 %val, i32 addrspace(1)* undef @@ -137,27 +177,27 @@ ret void } -; HSA: define void @call_recursive_use_workitem_id_y() #1 { +; HSA: define void @call_recursive_use_workitem_id_y() #2 { define void @call_recursive_use_workitem_id_y() #1 { call void @recursive_use_workitem_id_y() ret void } -; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #6 { +; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #8 { define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof ret void } -; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #10 { +; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #12 { define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #2 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof ret void } -; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #11 { +; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #13 { define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #2 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof @@ -165,32 +205,32 @@ ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast() #6 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast() #8 { define void @indirect_use_group_to_flat_addrspacecast() #1 { call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null) ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #9 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #11 { define void @indirect_use_group_to_flat_addrspacecast_gfx9() #1 { call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null) ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #6 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #8 { define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #1 { call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null) ret void } -; HSA: define void @use_kernarg_segment_ptr() #12 { +; HSA: define void @use_kernarg_segment_ptr() #14 { define void @use_kernarg_segment_ptr() #1 { %kernarg.segment.ptr = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() store volatile i8 addrspace(2)* %kernarg.segment.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @func_indirect_use_kernarg_segment_ptr() #12 { +; HSA: define void @func_indirect_use_kernarg_segment_ptr() #14 { define void @func_indirect_use_kernarg_segment_ptr() #1 { call void @use_kernarg_segment_ptr() ret void @@ -201,15 +241,17 @@ attributes #2 = { nounwind "target-cpu"="gfx900" } ; HSA: attributes #0 = { nounwind readnone speculatable } -; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" } -; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" } -; HSA: attributes #3 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" } -; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" } -; HSA: attributes #5 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" } -; HSA: attributes #6 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" } -; HSA: attributes #7 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" } -; HSA: attributes #8 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" } -; HSA: attributes #9 = { nounwind "target-cpu"="fiji" } -; HSA: attributes #10 = { nounwind "target-cpu"="gfx900" } -; HSA: attributes #11 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" } -; HSA: attributes #12 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" } +; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-x" "target-cpu"="fiji" } +; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" } +; HSA: attributes #3 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" } +; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-x" "target-cpu"="fiji" } +; HSA: attributes #5 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" } +; HSA: attributes #6 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" } +; HSA: attributes #7 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" } +; HSA: attributes #8 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" } +; HSA: attributes #9 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" } +; HSA: attributes #10 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" } +; HSA: attributes #11 = { nounwind "target-cpu"="fiji" } +; HSA: attributes #12 = { nounwind "target-cpu"="gfx900" } +; HSA: attributes #13 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" } +; HSA: attributes #14 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" }