Index: clang/lib/Sema/SemaOverload.cpp =================================================================== --- clang/lib/Sema/SemaOverload.cpp +++ clang/lib/Sema/SemaOverload.cpp @@ -12750,6 +12750,13 @@ DeclAccessPair DAP; SmallVector AmbiguousDecls; + // Return positive for better, negative for worse, 0 for equal preference. + auto CheckCUDAPreference = [&](FunctionDecl *FD1, FunctionDecl *FD2) { + FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true); + return static_cast(IdentifyCUDAPreference(Caller, FD1)) - + static_cast(IdentifyCUDAPreference(Caller, FD2)); + }; + auto CheckMoreConstrained = [&](FunctionDecl *FD1, FunctionDecl *FD2) -> std::optional { if (FunctionDecl *MF = FD1->getInstantiatedFromMemberFunction()) @@ -12780,9 +12787,31 @@ if (!checkAddressOfFunctionIsAvailable(FD)) continue; + // If we found a better result, update Result. + auto FoundBetter = [&]() { + IsResultAmbiguous = false; + DAP = I.getPair(); + Result = FD; + }; + // We have more than one result - see if it is more constrained than the // previous one. if (Result) { + // Check CUDA preference first. If the candidates have differennt CUDA + // preference, choose the one with higher CUDA preference. Otherwise, + // choose the one with more constraints. + if (getLangOpts().CUDA) { + int PreferenceByCUDA = CheckCUDAPreference(FD, Result); + // FD has different preference than Result. + if (PreferenceByCUDA != 0) { + // FD is more preferable than Result. + if (PreferenceByCUDA > 0) + FoundBetter(); + continue; + } + } + // FD has the same CUDA prefernece than Result. Continue check + // constraints. std::optional MoreConstrainedThanPrevious = CheckMoreConstrained(FD, Result); if (!MoreConstrainedThanPrevious) { @@ -12794,9 +12823,7 @@ continue; // FD is more constrained - replace Result with it. } - IsResultAmbiguous = false; - DAP = I.getPair(); - Result = FD; + FoundBetter(); } if (IsResultAmbiguous) @@ -12806,9 +12833,15 @@ SmallVector ResultAC; // We skipped over some ambiguous declarations which might be ambiguous with // the selected result. - for (FunctionDecl *Skipped : AmbiguousDecls) + for (FunctionDecl *Skipped : AmbiguousDecls) { + // If skipped candidate has different CUDA preference than the result, + // there is no ambiguity. Otherwise check whether they have different + // constraints. + if (getLangOpts().CUDA && CheckCUDAPreference(Skipped, Result) != 0) + continue; if (!CheckMoreConstrained(Skipped, Result)) return nullptr; + } Pair = DAP; } return Result; Index: clang/test/SemaCUDA/template-arg-deduction.cu =================================================================== --- /dev/null +++ clang/test/SemaCUDA/template-arg-deduction.cu @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s +// RUN: %clang_cc1 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device -verify %s + +// expected-no-diagnostics + +#include "Inputs/cuda.h" + +void foo(); +__device__ void foo(); + +template +void host_temp(F f); + +template +__device__ void device_temp(F f); + +void host_caller() { + host_temp(foo); +} + +__global__ void kernel_caller() { + device_temp(foo); +} + +__device__ void device_caller() { + device_temp(foo); +}