diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -8442,6 +8442,25 @@ return MappableExprsHandler::OMP_MAP_PRIVATE | MappableExprsHandler::OMP_MAP_TO; } + const ValueDecl *VD = Cap.getCapturedVar()->getCanonicalDecl(); + const auto *RD = VD->getType() + .getCanonicalType() + .getNonReferenceType() + ->getAsCXXRecordDecl(); + const auto *CurExecDir = CurDir.get(); + for (const auto *C : CurExecDir->getClausesOfKind()) + if (!C->decl_component_lists(VD).empty()) + if (RD && RD->isLambda()) + if (C->getMapType() == OMPC_MAP_to) { + // for map(to: lambda): using user specified map type. + ArrayRef MapModifiers; + ArrayRef MotionModifiers; + return getMapTypeBits(C->getMapType(), MapModifiers, + MotionModifiers, false, + /*AddPtrFlag=*/false, + /*AddIsTargetParamFlag=*/false, + /*isNonContiguous=*/false); + } return MappableExprsHandler::OMP_MAP_TO | MappableExprsHandler::OMP_MAP_FROM; } @@ -9143,9 +9162,19 @@ assert(CurDir.is() && "Expect a executable directive"); const auto *CurExecDir = CurDir.get(); + const auto *RD = VD ? VD->getType() + .getCanonicalType() + .getNonReferenceType() + ->getAsCXXRecordDecl() + : nullptr; for (const auto *C : CurExecDir->getClausesOfKind()) { const auto *EI = C->getVarRefs().begin(); for (const auto L : C->decl_component_lists(VD)) { + if (RD && RD->isLambda()) + // for map(to: lambda): skip here, processing it in + // generateDefaultMapInfo + if (C->getMapType() == OMPC_MAP_to) + return; const ValueDecl *VDecl, *Mapper; // The Expression is not correct if the mapping is implicit const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr; diff --git a/clang/test/OpenMP/nvptx_lambda_pointer_capturing.cpp b/clang/test/OpenMP/nvptx_lambda_pointer_capturing.cpp --- a/clang/test/OpenMP/nvptx_lambda_pointer_capturing.cpp +++ b/clang/test/OpenMP/nvptx_lambda_pointer_capturing.cpp @@ -24,6 +24,32 @@ } } +template +void omp_loop_ref(int start, int end, F& body) { +#pragma omp target teams distribute parallel for map(to: body) + for (int i = start; i < end; ++i) { + body(i); + } + int *p; + const auto &body_ref = [=](int i) {p[i]=0;}; + #pragma omp target map(to: body_ref) + body_ref(10); +} +template +struct C { + static void xoo(const FTy& f) { + int x = 10; + #pragma omp target map(to:f) + f(x); + } +}; + +template +void zoo(const FTy &functor) { + C::xoo(functor); +} + + // CHECK: define {{.*}}[[MAIN:@.+]]( int main() { @@ -32,6 +58,7 @@ auto body = [=](int i){ p[i] = q[i]; }; + zoo([=](int i){p[i] = 0;}); #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { @@ -82,6 +109,7 @@ omp_loop(0,100,body); + omp_loop_ref(0,100,body); } // CHECK: [[BASE_PTRS:%.+]] = alloca [5 x i8*]{{.+}} @@ -122,4 +150,34 @@ // CHECK: [[PTRS_GEP:%.+]] = getelementptr {{.+}} [5 x {{.+}}*], [5 x {{.+}}*]* [[PTRS]], {{.+}} 0, {{.+}} 0 // CHECK: {{%.+}} = call{{.+}} @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, {{.+}}, {{.+}}, {{.+}}, i8** [[BASES_GEP]], i8** [[PTRS_GEP]], i[[PTRSZ]]* getelementptr inbounds ([5 x i{{.+}}], [5 x i{{.+}}]* [[SIZES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[TYPES_TEMPLATE]], i{{.+}} 0, i{{.+}} 0), i8** null, i8** null, {{.+}}, {{.+}}) +// CHECK: define internal void @{{.+}}omp_loop_ref{{.+}}( +// CHECK: [[BODY:%body.addr]] = alloca %class.anon* +// CHECK: [[TMP:%tmp]] = alloca %class.anon* +// CHECK: [[BODY_REF:%body_ref]] = alloca %class.anon.1* +// CHECK: [[REF_TMP:%ref.tmp]] = alloca %class.anon.1 +// CHECK: [[TMP8:%tmp.+]] = alloca %class.anon.1* +// CHECK: [[L0:%.+]] = load %class.anon*, %class.anon** [[BODY]] +// CHECK: store %class.anon* [[L0]], %class.anon** [[TMP]] +// CHECK: [[L5:%.+]] = load %class.anon*, %class.anon** [[TMP]] +// CHECK-NOT [[L6:%.+]] = load %class.anon*, %class.anon** [[TMP]] +// CHECK-NOT [[L7:%.+]] = load %class.anon*, %class.anon** [[TMP]] +// CHECK: store %class.anon.1* [[REF_TMP]], %class.anon.1** [[BODY_REF]] +// CHECK:[[L47:%.+]] = load %class.anon.1*, %class.anon.1** [[BODY_REF]] +// CHECK: store %class.anon.1* [[L47]], %class.anon.1** [[TMP8]] +// CHECK: [[L48:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]] +// CHECK-NOT: [[L49:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]] +// CHECK-NOT: [[L50:%.+]] = load %class.anon.1*, %class.anon.1** [[TMP8]] +// CHECK: ret void + +// CHECK: define internal void @{{.+}}xoo{{.+}}( +// CHECK: [[FADDR:%f.addr]] = alloca %class.anon.0* +// CHECK: [[L0:%.+]] = load %class.anon.0*, %class.anon.0** [[FADDR]] +// CHECK: store %class.anon.0* [[L0]], %class.anon.0** [[TMP:%tmp]] +// CHECK: [[L1:%.+]] = load %class.anon.0*, %class.anon.0** [[TMP]] +// CHECK-NOT: %4 = load %class.anon.0*, %class.anon.0** [[TMP]] +// CHECK-NOT: %5 = load %class.anon.0*, %class.anon.0** [[TMP]] +// CHECK: [[L4:%.+]] = getelementptr inbounds %class.anon.0, %class.anon.0* [[L1]], i32 0, i32 0 +// CHECK: [[L5:%.+]] = load i{{.*}}*, i{{.*}}** [[L4]] +// CHECK: ret void + #endif