Index: cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td =================================================================== --- cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td +++ cfe/trunk/include/clang/Basic/DiagnosticSemaKinds.td @@ -6713,6 +6713,10 @@ def err_device_static_local_var : Error< "Within a __device__/__global__ function, " "only __shared__ variables may be marked \"static\"">; +def err_cuda_vla : Error< + "cannot use variable-length arrays in " + "%select{__device__|__global__|__host__|__host__ __device__}0 functions">; + def warn_non_pod_vararg_with_format_string : Warning< "cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic " "%select{function|block|method|constructor}2; expected type from format " Index: cfe/trunk/include/clang/Sema/Sema.h =================================================================== --- cfe/trunk/include/clang/Sema/Sema.h +++ cfe/trunk/include/clang/Sema/Sema.h @@ -9255,6 +9255,8 @@ /// ExprTy should be the string "try" or "throw", as appropriate. bool CheckCUDAExceptionExpr(SourceLocation Loc, StringRef ExprTy); + bool CheckCUDAVLA(SourceLocation Loc); + /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. Index: cfe/trunk/lib/Sema/SemaCUDA.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaCUDA.cpp +++ cfe/trunk/lib/Sema/SemaCUDA.cpp @@ -539,3 +539,23 @@ } return true; } + +bool Sema::CheckCUDAVLA(SourceLocation Loc) { + assert(getLangOpts().CUDA && "Should only be called during CUDA compilation"); + FunctionDecl *CurFn = dyn_cast(CurContext); + if (!CurFn) + return true; + CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn); + if (Target == CFT_Global || Target == CFT_Device) { + Diag(Loc, diag::err_cuda_vla) << Target; + return false; + } + if (Target == CFT_HostDevice && getLangOpts().CUDAIsDevice) { + PartialDiagnostic ErrPD{PartialDiagnostic::NullDiagnostic()}; + ErrPD.Reset(diag::err_cuda_vla); + ErrPD << Target; + CurFn->addDeferredDiag({Loc, std::move(ErrPD)}); + return false; + } + return true; +} Index: cfe/trunk/lib/Sema/SemaType.cpp =================================================================== --- cfe/trunk/lib/Sema/SemaType.cpp +++ cfe/trunk/lib/Sema/SemaType.cpp @@ -2241,6 +2241,10 @@ Diag(Loc, diag::err_opencl_vla); return QualType(); } + // CUDA device code doesn't support VLAs. + if (getLangOpts().CUDA && T->isVariableArrayType() && !CheckCUDAVLA(Loc)) + return QualType(); + // If this is not C99, extwarn about VLA's and C99 array size modifiers. if (!getLangOpts().C99) { if (T->isVariableArrayType()) { Index: cfe/trunk/test/SemaCUDA/vla-host-device.cu =================================================================== --- cfe/trunk/test/SemaCUDA/vla-host-device.cu +++ cfe/trunk/test/SemaCUDA/vla-host-device.cu @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -fcuda-is-device -verify -S %s -o /dev/null +// RUN: %clang_cc1 -verify -DHOST %s -S -o /dev/null + +#include "Inputs/cuda.h" + +#ifdef HOST +// expected-no-diagnostics +#endif + +__host__ __device__ void hd(int n) { + int x[n]; +#ifndef HOST + // expected-error@-2 {{cannot use variable-length arrays in __host__ __device__ functions}} +#endif +} + +// No error because never codegen'ed for device. +__host__ __device__ inline void hd_inline(int n) { + int x[n]; +} +void call_hd_inline() { hd_inline(42); } Index: cfe/trunk/test/SemaCUDA/vla.cu =================================================================== --- cfe/trunk/test/SemaCUDA/vla.cu +++ cfe/trunk/test/SemaCUDA/vla.cu @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -fcuda-is-device -fsyntax-only -verify %s +// RUN: %clang_cc1 -fsyntax-only -verify -DHOST %s + +#include "Inputs/cuda.h" + +void host(int n) { + int x[n]; +} + +__device__ void device(int n) { + int x[n]; // expected-error {{cannot use variable-length arrays in __device__ functions}} +}