Index: include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- include/llvm/IR/IntrinsicsAMDGPU.td +++ include/llvm/IR/IntrinsicsAMDGPU.td @@ -1469,6 +1469,18 @@ LLVMMatchType<0>], // value for the inactive lanes to take [IntrNoMem, IntrConvergent]>; +// Return if the given flat pointer points to a local memory address. +def int_amdgcn_is_shared : GCCBuiltin<"__builtin_amdgcn_is_shared">, + Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], + [IntrNoMem, IntrSpeculatable, NoCapture<0>] +>; + +// Return if the given flat pointer points to a prvate memory address. +def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">, + Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], + [IntrNoMem, IntrSpeculatable, NoCapture<0>] +>; + //===----------------------------------------------------------------------===// // CI+ Intrinsics //===----------------------------------------------------------------------===// Index: lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -173,6 +173,9 @@ case Intrinsic::amdgcn_implicitarg_ptr: return "amdgpu-implicitarg-ptr"; case Intrinsic::amdgcn_queue_ptr: + case Intrinsic::amdgcn_is_shared: + case Intrinsic::amdgcn_is_private: + // TODO: Does not require queue ptr on gfx9+ case Intrinsic::trap: case Intrinsic::debugtrap: IsQueuePtr = true; Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.h =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.h +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.h @@ -70,6 +70,8 @@ bool legalizeImplicitArgPtr(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const; + bool legalizeIsAddrSpace(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &B, unsigned AddrSpace) const; bool legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder) const override; Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -1371,6 +1371,18 @@ return true; } +bool AMDGPULegalizerInfo::legalizeIsAddrSpace(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &B, + unsigned AddrSpace) const { + B.setInstr(MI); + Register ApertureReg = getSegmentAperture(AddrSpace, MRI, B); + auto Hi32 = B.buildExtract(LLT::scalar(32), MI.getOperand(2).getReg(), 32); + B.buildICmp(ICmpInst::ICMP_EQ, MI.getOperand(0), Hi32, ApertureReg); + MI.eraseFromParent(); + return true; +} + bool AMDGPULegalizerInfo::legalizeIntrinsic(MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &B) const { @@ -1453,6 +1465,10 @@ AMDGPUFunctionArgInfo::DISPATCH_ID); case Intrinsic::amdgcn_fdiv_fast: return legalizeFDIVFast(MI, MRI, B); + case Intrinsic::amdgcn_is_shared: + return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::LOCAL_ADDRESS); + case Intrinsic::amdgcn_is_private: + return legalizeIsAddrSpace(MI, MRI, B, AMDGPUAS::PRIVATE_ADDRESS); default: return true; } Index: lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -598,6 +598,8 @@ case Intrinsic::amdgcn_ds_fadd: case Intrinsic::amdgcn_ds_fmin: case Intrinsic::amdgcn_ds_fmax: + case Intrinsic::amdgcn_is_shared: + case Intrinsic::amdgcn_is_private: OpIndexes.push_back(0); return true; default: @@ -607,7 +609,8 @@ bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace( IntrinsicInst *II, Value *OldV, Value *NewV) const { - switch (II->getIntrinsicID()) { + auto IntrID = II->getIntrinsicID(); + switch (IntrID) { case Intrinsic::amdgcn_atomic_inc: case Intrinsic::amdgcn_atomic_dec: case Intrinsic::amdgcn_ds_fadd: @@ -625,6 +628,18 @@ II->setCalledFunction(NewDecl); return true; } + case Intrinsic::amdgcn_is_shared: + case Intrinsic::amdgcn_is_private: { + unsigned TrueAS = IntrID == Intrinsic::amdgcn_is_shared ? + AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; + unsigned NewAS = NewV->getType()->getPointerAddressSpace(); + LLVMContext &Ctx = NewV->getType()->getContext(); + ConstantInt *NewVal = (TrueAS == NewAS) ? + ConstantInt::getTrue(Ctx) : ConstantInt::getFalse(Ctx); + II->replaceAllUsesWith(NewVal); + II->eraseFromParent(); + return true; + } default: return false; } Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -6060,6 +6060,19 @@ SIInstrInfo::MO_ABS32_LO); return {DAG.getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, GA), 0}; } + case Intrinsic::amdgcn_is_shared: + case Intrinsic::amdgcn_is_private: { + SDLoc SL(Op); + unsigned AS = (IntrinsicID == Intrinsic::amdgcn_is_shared) ? + AMDGPUAS::LOCAL_ADDRESS : AMDGPUAS::PRIVATE_ADDRESS; + SDValue Aperture = getSegmentAperture(AS, SL, DAG); + SDValue SrcVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, + Op.getOperand(1)); + + SDValue SrcHi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, SrcVec, + DAG.getConstant(1, SL, MVT::i32)); + return DAG.getSetCC(SL, MVT::i1, SrcHi, Aperture, ISD::SETEQ); + } default: if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr = AMDGPU::getImageDimIntrinsicInfo(IntrinsicID)) Index: test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.private.ll @@ -0,0 +1,103 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=CI %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s + +; TODO: Merge with DAG test + +define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) { +; CI-LABEL: is_private_vgpr: +; CI: ; %bb.0: +; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; CI-NEXT: v_mul_lo_u32 v2, 0, v0 +; CI-NEXT: v_mul_lo_u32 v1, 8, v1 +; CI-NEXT: v_mul_lo_u32 v3, 8, v0 +; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; CI-NEXT: v_mul_hi_u32 v0, 8, v0 +; CI-NEXT: v_add_i32_e32 v1, vcc, v2, v1 +; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v3 +; CI-NEXT: v_mov_b32_e32 v2, s1 +; CI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; CI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; CI-NEXT: s_load_dword s0, s[4:5], 0x11 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1 +; CI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; CI-NEXT: flat_store_dword v[0:1], v0 +; CI-NEXT: s_endpgm +; +; GFX9-LABEL: is_private_vgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0 +; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0 +; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0 +; GFX9-NEXT: v_add_u32_e32 v1, v2, v1 +; GFX9-NEXT: v_add_u32_e32 v1, v1, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc +; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off +; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 0, 16) +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: s_endpgm + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id + %ptr = load volatile i8*, i8* addrspace(1)* %gep + %val = call i1 @llvm.amdgcn.is.private(i8* %ptr) + %ext = zext i1 %val to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +define amdgpu_kernel void @is_private_sgpr(i8* %ptr) { +; CI-LABEL: is_private_sgpr: +; CI: ; %bb.0: +; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dword s0, s[4:5], 0x11 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_cmp_eq_u32 s1, s0 +; CI-NEXT: s_cbranch_scc0 BB1_2 +; CI-NEXT: ; %bb.1: ; %bb0 +; CI-NEXT: v_mov_b32_e32 v0, 0 +; CI-NEXT: flat_store_dword v[0:1], v0 +; CI-NEXT: BB1_2: ; %bb1 +; CI-NEXT: s_endpgm +; +; GFX9-LABEL: is_private_sgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 0, 16) +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_cmp_eq_u32 s1, s0 +; GFX9-NEXT: s_cbranch_scc0 BB1_2 +; GFX9-NEXT: ; %bb.1: ; %bb0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: BB1_2: ; %bb1 +; GFX9-NEXT: s_endpgm + %val = call i1 @llvm.amdgcn.is.private(i8* %ptr) + br i1 %val, label %bb0, label %bb1 + +bb0: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb1 + +bb1: + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 +declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0 + +attributes #0 = { nounwind readnone speculatable } Index: test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.is.shared.ll @@ -0,0 +1,103 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=CI %s +; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s + +; TODO: Merge with DAG test + +define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) { +; CI-LABEL: is_local_vgpr: +; CI: ; %bb.0: +; CI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; CI-NEXT: v_mul_lo_u32 v2, 0, v0 +; CI-NEXT: v_mul_lo_u32 v1, 8, v1 +; CI-NEXT: v_mul_lo_u32 v3, 8, v0 +; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; CI-NEXT: v_mul_hi_u32 v0, 8, v0 +; CI-NEXT: v_add_i32_e32 v1, vcc, v2, v1 +; CI-NEXT: v_add_i32_e32 v1, vcc, v1, v0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: v_add_i32_e32 v0, vcc, s0, v3 +; CI-NEXT: v_mov_b32_e32 v2, s1 +; CI-NEXT: v_addc_u32_e32 v1, vcc, v2, v1, vcc +; CI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] +; CI-NEXT: s_load_dword s0, s[4:5], 0x10 +; CI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) +; CI-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1 +; CI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; CI-NEXT: flat_store_dword v[0:1], v0 +; CI-NEXT: s_endpgm +; +; GFX9-LABEL: is_local_vgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; GFX9-NEXT: v_mul_lo_u32 v2, 0, v0 +; GFX9-NEXT: v_mul_lo_u32 v1, 8, v1 +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; GFX9-NEXT: v_mul_hi_u32 v3, 8, v0 +; GFX9-NEXT: v_mul_lo_u32 v0, 8, v0 +; GFX9-NEXT: v_add_u32_e32 v1, v2, v1 +; GFX9-NEXT: v_add_u32_e32 v1, v1, v3 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, s1 +; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0 +; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v2, v1, vcc +; GFX9-NEXT: global_load_dwordx2 v[0:1], v[0:1], off +; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 16, 16) +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_waitcnt vmcnt(0) +; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, s0, v1 +; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: s_endpgm + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id + %ptr = load volatile i8*, i8* addrspace(1)* %gep + %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr) + %ext = zext i1 %val to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +define amdgpu_kernel void @is_local_sgpr(i8* %ptr) { +; CI-LABEL: is_local_sgpr: +; CI: ; %bb.0: +; CI-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_load_dword s0, s[4:5], 0x10 +; CI-NEXT: s_waitcnt lgkmcnt(0) +; CI-NEXT: s_cmp_eq_u32 s1, s0 +; CI-NEXT: s_cbranch_scc0 BB1_2 +; CI-NEXT: ; %bb.1: ; %bb0 +; CI-NEXT: v_mov_b32_e32 v0, 0 +; CI-NEXT: flat_store_dword v[0:1], v0 +; CI-NEXT: BB1_2: ; %bb1 +; CI-NEXT: s_endpgm +; +; GFX9-LABEL: is_local_sgpr: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_load_dwordx2 s[0:1], s[6:7], 0x0 +; GFX9-NEXT: s_waitcnt lgkmcnt(0) +; GFX9-NEXT: s_getreg_b32 s0, hwreg(HW_REG_SH_MEM_BASES, 16, 16) +; GFX9-NEXT: s_lshl_b32 s0, s0, 16 +; GFX9-NEXT: s_cmp_eq_u32 s1, s0 +; GFX9-NEXT: s_cbranch_scc0 BB1_2 +; GFX9-NEXT: ; %bb.1: ; %bb0 +; GFX9-NEXT: v_mov_b32_e32 v0, 0 +; GFX9-NEXT: global_store_dword v[0:1], v0, off +; GFX9-NEXT: BB1_2: ; %bb1 +; GFX9-NEXT: s_endpgm + %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr) + br i1 %val, label %bb0, label %bb1 + +bb0: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb1 + +bb1: + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 +declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0 + +attributes #0 = { nounwind readnone speculatable } Index: test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll =================================================================== --- test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll +++ test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll @@ -12,6 +12,9 @@ declare i8 addrspace(4)* @llvm.amdgcn.queue.ptr() #0 declare i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() #0 +declare i1 @llvm.amdgcn.is.local(i8* nocapture) #2 +declare i1 @llvm.amdgcn.is.private(i8* nocapture) #2 + ; HSA: define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 { define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 { %val = call i32 @llvm.amdgcn.workgroup.id.x() @@ -231,6 +234,22 @@ ret void } +; HSA: define amdgpu_kernel void @use_is_local(i8* %ptr) #11 { +define amdgpu_kernel void @use_is_local(i8* %ptr) #1 { + %is.local = call i1 @llvm.amdgcn.is.local(i8* %ptr) + %ext = zext i1 %is.local to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +; HSA: define amdgpu_kernel void @use_is_private(i8* %ptr) #11 { +define amdgpu_kernel void @use_is_private(i8* %ptr) #1 { + %is.private = call i1 @llvm.amdgcn.is.private(i8* %ptr) + %ext = zext i1 %is.private to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + attributes #0 = { nounwind readnone speculatable } attributes #1 = { nounwind } Index: test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll @@ -0,0 +1,50 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s + +; GCN-LABEL: {{^}}is_private_vgpr: +; GCN-DAG: {{flat|global}}_load_dwordx2 v{{\[[0-9]+}}:[[PTR_HI:[0-9]+]]{{\]}} +; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11 +; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16) +; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16 +; GCN: v_cmp_eq_u32_e32 vcc, [[APERTURE]], v[[PTR_HI]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) { + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id + %ptr = load volatile i8*, i8* addrspace(1)* %gep + %val = call i1 @llvm.amdgcn.is.private(i8* %ptr) + %ext = zext i1 %val to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +; FIXME: setcc (zero_extend (setcc)), 1) not folded out, resulting in +; select and vcc branch. + +; GCN-LABEL: {{^}}is_private_sgpr: +; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}} +; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16) + +; CI-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x1{{$}} +; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}} +; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16 + +; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]] +; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]] +; GCN: s_cbranch_vccnz +define amdgpu_kernel void @is_private_sgpr(i8* %ptr) { + %val = call i1 @llvm.amdgcn.is.private(i8* %ptr) + br i1 %val, label %bb0, label %bb1 + +bb0: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb1 + +bb1: + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 +declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0 + +attributes #0 = { nounwind readnone speculatable } Index: test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll @@ -0,0 +1,51 @@ +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI %s +; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s + +; GCN-LABEL: {{^}}is_local_vgpr: +; GCN-DAG: {{flat|global}}_load_dwordx2 v{{\[[0-9]+}}:[[PTR_HI:[0-9]+]]{{\]}} +; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10 +; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16) +; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16 + +; GCN: v_cmp_eq_u32_e32 vcc, [[APERTURE]], v[[PTR_HI]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) { + %id = call i32 @llvm.amdgcn.workitem.id.x() + %gep = getelementptr inbounds i8*, i8* addrspace(1)* %ptr.ptr, i32 %id + %ptr = load volatile i8*, i8* addrspace(1)* %gep + %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr) + %ext = zext i1 %val to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +; FIXME: setcc (zero_extend (setcc)), 1) not folded out, resulting in +; select and vcc branch. + +; GCN-LABEL: {{^}}is_local_sgpr: +; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}} +; GFX9-DAG: s_getreg_b32 [[APERTURE:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16) +; GFX9-DAG: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16 + +; CI-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x1{{$}} +; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}} + +; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]] +; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]] +; GCN: s_cbranch_vccnz +define amdgpu_kernel void @is_local_sgpr(i8* %ptr) { + %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr) + br i1 %val, label %bb0, label %bb1 + +bb0: + store volatile i32 0, i32 addrspace(1)* undef + br label %bb1 + +bb1: + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #0 +declare i1 @llvm.amdgcn.is.shared(i8* nocapture) #0 + +attributes #0 = { nounwind readnone speculatable } Index: test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll =================================================================== --- /dev/null +++ test/Transforms/InferAddressSpaces/AMDGPU/address-space-id-funcs.ll @@ -0,0 +1,55 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces -instsimplify %s | FileCheck %s + +define amdgpu_kernel void @is_local_true(i8 addrspace(3)* %lptr) { +; CHECK-LABEL: @is_local_true( +; CHECK-NEXT: store i32 1, i32 addrspace(1)* undef +; CHECK-NEXT: ret void +; + %cast = addrspacecast i8 addrspace(3)* %lptr to i8* + %is.local = call i1 @llvm.amdgcn.is.local(i8* %cast) + %ext = zext i1 %is.local to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +define amdgpu_kernel void @is_local_false(i8 addrspace(1)* %gptr) { +; CHECK-LABEL: @is_local_false( +; CHECK-NEXT: store i32 0, i32 addrspace(1)* undef +; CHECK-NEXT: ret void +; + %cast = addrspacecast i8 addrspace(1)* %gptr to i8* + %is.local = call i1 @llvm.amdgcn.is.local(i8* %cast) + %ext = zext i1 %is.local to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +define void @is_private_true(i8 addrspace(5)* %lptr) { +; CHECK-LABEL: @is_private_true( +; CHECK-NEXT: store i32 1, i32 addrspace(1)* undef +; CHECK-NEXT: ret void +; + %cast = addrspacecast i8 addrspace(5)* %lptr to i8* + %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast) + %ext = zext i1 %is.private to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +define void @is_private_false(i8 addrspace(1)* %gptr) { +; CHECK-LABEL: @is_private_false( +; CHECK-NEXT: store i32 0, i32 addrspace(1)* undef +; CHECK-NEXT: ret void +; + %cast = addrspacecast i8 addrspace(1)* %gptr to i8* + %is.private = call i1 @llvm.amdgcn.is.private(i8* %cast) + %ext = zext i1 %is.private to i32 + store i32 %ext, i32 addrspace(1)* undef + ret void +} + +declare i1 @llvm.amdgcn.is.local(i8* nocapture) #0 +declare i1 @llvm.amdgcn.is.private(i8* nocapture) #0 + +attributes #0 = { nounwind readnone speculatable willreturn }