diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -10189,8 +10189,11 @@ public: SPIRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) { setCCs(); } + void computeInfo(CGFunctionInfo &FI) const override; + private: void setCCs(); + ABIArgInfo classifyKernelArgumentType(QualType Ty) const; }; } // end anonymous namespace namespace { @@ -10205,6 +10208,7 @@ } unsigned getOpenCLKernelCallingConv() const override; + void setCUDAKernelCallingConvention(const FunctionType *&FT) const override; }; } // End anonymous namespace. @@ -10213,6 +10217,40 @@ RuntimeCC = llvm::CallingConv::SPIR_FUNC; } +ABIArgInfo SPIRABIInfo::classifyKernelArgumentType(QualType Ty) const { + if (getContext().getLangOpts().HIP && getTarget().getTriple().isSPIRV()) { + // Coerce pointer arguments with default address space to CrossWorkGroup + // pointers for HIPSPV. When the language mode is HIP, the SPIRTargetInfo + // maps cuda_device to SPIR-V's CrossWorkGroup address space. + llvm::Type *LTy = CGT.ConvertType(Ty); + auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default); + auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device); + if (LTy->isPointerTy() && LTy->getPointerAddressSpace() == DefaultAS) { + LTy = llvm::PointerType::get( + cast(LTy)->getElementType(), GlobalAS); + return ABIArgInfo::getDirect(LTy, 0, nullptr, false); + } + } + return classifyArgumentType(Ty); +} + +void SPIRABIInfo::computeInfo(CGFunctionInfo &FI) const { + // The logic is same as in DefaultABIInfo with an exception on the kernel + // arguments handling. + llvm::CallingConv::ID CC = FI.getCallingConvention(); + + if (!getCXXABI().classifyReturnType(FI)) + FI.getReturnInfo() = classifyReturnType(FI.getReturnType()); + + for (auto &I : FI.arguments()) { + if (CC == llvm::CallingConv::SPIR_KERNEL) { + I.info = classifyKernelArgumentType(I.type); + } else { + I.info = classifyArgumentType(I.type); + } + } +} + namespace clang { namespace CodeGen { void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) { @@ -10226,6 +10264,18 @@ return llvm::CallingConv::SPIR_KERNEL; } +void SPIRTargetCodeGenInfo::setCUDAKernelCallingConvention( + const FunctionType *&FT) const { + // Convert HIP kernels to SPIR-V kernels. + if (getABIInfo().getContext().getLangOpts().HIP && + getABIInfo().getTarget().getTriple().isSPIRV()) { + FT = getABIInfo().getContext().adjustFunctionType( + FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel)); + return; + } + TargetCodeGenInfo::setCUDAKernelCallingConvention(FT); +} + static bool appendType(SmallStringEnc &Enc, QualType QType, const CodeGen::CodeGenModule &CGM, TypeStringCache &TSC); diff --git a/clang/test/CodeGenHIP/hipspv-kernel.cpp b/clang/test/CodeGenHIP/hipspv-kernel.cpp new file mode 100644 --- /dev/null +++ b/clang/test/CodeGenHIP/hipspv-kernel.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple spirv64 -x hip -emit-llvm -fcuda-is-device \ +// RUN: -o - %s | FileCheck %s + +#define __global__ __attribute__((global)) + +// CHECK: define {{.*}}spir_kernel void @_Z3fooPff(float addrspace(1)* {{.*}}, float {{.*}}) +__global__ void foo(float *a, float b) { + *a = b; +}