Index: llvm/lib/IR/Verifier.cpp =================================================================== --- llvm/lib/IR/Verifier.cpp +++ llvm/lib/IR/Verifier.cpp @@ -2292,6 +2292,17 @@ case CallingConv::AMDGPU_CS: Assert(!F.hasStructRetAttr(), "Calling convention does not allow sret", &F); + if (F.getCallingConv() != CallingConv::SPIR_KERNEL) { + for (unsigned i = 0, e = F.arg_size(); i != e; ++i) { + Assert(!Attrs.hasParamAttribute(i, Attribute::ByVal), + "Calling convention disallows byval", &F); + Assert(!Attrs.hasParamAttribute(i, Attribute::Preallocated), + "Calling convention disallows preallocated", &F); + Assert(!Attrs.hasParamAttribute(i, Attribute::InAlloca), + "Calling convention disallows inalloca", &F); + } + } + LLVM_FALLTHROUGH; case CallingConv::Fast: case CallingConv::Cold: Index: llvm/test/Verifier/amdgpu-cc.ll =================================================================== --- llvm/test/Verifier/amdgpu-cc.ll +++ llvm/test/Verifier/amdgpu-cc.ll @@ -1,5 +1,7 @@ ; RUN: not llvm-as < %s 2>&1 | FileCheck %s +target datalayout = "A5" + ; CHECK: Calling convention requires void return type ; CHECK-NEXT: i32 ()* @nonvoid_cc_amdgpu_kernel define amdgpu_kernel i32 @nonvoid_cc_amdgpu_kernel() { @@ -13,8 +15,14 @@ } ; CHECK: Calling convention does not allow sret -; CHECK-NEXT: void (i32*)* @sret_cc_amdgpu_kernel -define amdgpu_kernel void @sret_cc_amdgpu_kernel(i32* sret %ptr) { +; CHECK-NEXT: void (i32*)* @sret_cc_amdgpu_kernel_as0 +define amdgpu_kernel void @sret_cc_amdgpu_kernel_as0(i32* sret %ptr) { + ret void +} + +; CHECK: Calling convention does not allow sret +; CHECK-NEXT: void (i32 addrspace(5)*)* @sret_cc_amdgpu_kernel +define amdgpu_kernel void @sret_cc_amdgpu_kernel(i32 addrspace(5)* sret %ptr) { ret void } @@ -53,3 +61,63 @@ define spir_kernel void @varargs_spir_kernel(...) { ret void } + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_kernel +define amdgpu_kernel void @byval_cc_amdgpu_kernel(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(1)*)* @byval_as1_cc_amdgpu_kernel +define amdgpu_kernel void @byval_as1_cc_amdgpu_kernel(i32 addrspace(1)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32*)* @byval_as0_cc_amdgpu_kernel +define amdgpu_kernel void @byval_as0_cc_amdgpu_kernel(i32* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_vs +define amdgpu_vs void @byval_cc_amdgpu_vs(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_hs +define amdgpu_hs void @byval_cc_amdgpu_hs(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_gs +define amdgpu_gs void @byval_cc_amdgpu_gs(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_ps +define amdgpu_ps void @byval_cc_amdgpu_ps(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows byval +; CHECK-NEXT: void (i32 addrspace(5)*)* @byval_cc_amdgpu_cs +define amdgpu_cs void @byval_cc_amdgpu_cs(i32 addrspace(5)* byval %ptr) { + ret void +} + +; CHECK: Calling convention disallows preallocated +; CHECK-NEXT: void (i32*)* @preallocated_as0_cc_amdgpu_kernel +define amdgpu_kernel void @preallocated_as0_cc_amdgpu_kernel(i32* preallocated(i32) %ptr) { + ret void +} + +; CHECK: Calling convention disallows inalloca +; CHECK-NEXT: void (i32*)* @inalloca_as0_cc_amdgpu_kernel +define amdgpu_kernel void @inalloca_as0_cc_amdgpu_kernel(i32* inalloca %ptr) { + ret void +}