diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h --- a/clang/include/clang/Sema/Sema.h +++ b/clang/include/clang/Sema/Sema.h @@ -11307,6 +11307,7 @@ /// on the parameter of the bind clause. In the methods for the /// mapped directives, check the parameters of the lastprivate clause. bool checkLastPrivateForMappedDirectives(ArrayRef Clauses); + /// Depending on the bind clause of OMPD_loop map the directive to new /// directives. /// 1) loop bind(parallel) --> OMPD_for @@ -11316,9 +11317,12 @@ /// rigorous semantic checking in the new mapped directives. bool mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, ArrayRef Clauses, - OpenMPBindClauseKind BindKind, + OpenMPBindClauseKind &BindKind, OpenMPDirectiveKind &Kind, - OpenMPDirectiveKind &PrevMappedDirective); + OpenMPDirectiveKind &PrevMappedDirective, + SourceLocation StartLoc, SourceLocation EndLoc, + const DeclarationNameInfo &DirName, + OpenMPDirectiveKind CancelRegion); public: /// The declarator \p D defines a function in the scope \p S which is nested diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp --- a/clang/lib/Sema/SemaOpenMP.cpp +++ b/clang/lib/Sema/SemaOpenMP.cpp @@ -5062,6 +5062,18 @@ CurrentRegion != OMPD_cancellation_point && CurrentRegion != OMPD_cancel && CurrentRegion != OMPD_scan) return false; + // Checks needed for mapping "loop" construct. Please check mapLoopConstruct + // for a detailed explanation + if (SemaRef.LangOpts.OpenMP >= 50 && CurrentRegion == OMPD_loop && + ((BindKind == OMPC_BIND_parallel) || (BindKind == OMPC_BIND_teams)) && + (isOpenMPWorksharingDirective(ParentRegion) || + ParentRegion == OMPD_loop)) { + int ErrorMsgNumber = (BindKind == OMPC_BIND_parallel) ? 1 : 4; + SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region) + << true << getOpenMPDirectiveName(ParentRegion) << ErrorMsgNumber + << getOpenMPDirectiveName(CurrentRegion); + return true; + } if (CurrentRegion == OMPD_cancellation_point || CurrentRegion == OMPD_cancel) { // OpenMP [2.16, Nesting of Regions] @@ -6114,35 +6126,40 @@ bool Sema::mapLoopConstruct(llvm::SmallVector &ClausesWithoutBind, ArrayRef Clauses, - OpenMPBindClauseKind BindKind, + OpenMPBindClauseKind &BindKind, OpenMPDirectiveKind &Kind, - OpenMPDirectiveKind &PrevMappedDirective) { + OpenMPDirectiveKind &PrevMappedDirective, + SourceLocation StartLoc, SourceLocation EndLoc, + const DeclarationNameInfo &DirName, + OpenMPDirectiveKind CancelRegion) { bool UseClausesWithoutBind = false; // Restricting to "#pragma omp loop bind" if (getLangOpts().OpenMP >= 50 && Kind == OMPD_loop) { + + const OpenMPDirectiveKind ParentDirective = DSAStack->getParentDirective(); + if (BindKind == OMPC_BIND_unknown) { // Setting the enclosing teams or parallel construct for the loop // directive without bind clause. BindKind = OMPC_BIND_thread; // Default bind(thread) if binding is unknown - const OpenMPDirectiveKind ParentDirective = - DSAStack->getParentDirective(); if (ParentDirective == OMPD_unknown) { Diag(DSAStack->getDefaultDSALocation(), diag::err_omp_bind_required_on_loop); - } else if (ParentDirective == OMPD_parallel || - ParentDirective == OMPD_target_parallel) { + } else if (isOpenMPParallelDirective(ParentDirective) && + !isOpenMPTeamsDirective(ParentDirective)) { BindKind = OMPC_BIND_parallel; - } else if (ParentDirective == OMPD_teams || - ParentDirective == OMPD_target_teams) { + } else if (isOpenMPNestingTeamsDirective(ParentDirective) || + (ParentDirective == OMPD_target_teams)) { BindKind = OMPC_BIND_teams; } } else { - // bind clause is present, so we should set flag indicating to only - // use the clauses that aren't the bind clause for the new directive that - // loop is lowered to. + // bind clause is present in loop directive. When the loop directive is + // changed to a new directive the bind clause is not used. So, we should + // set flag indicating to only use the clauses that aren't the + // bind clause. UseClausesWithoutBind = true; } @@ -6203,26 +6220,35 @@ OpenMPDirectiveKind PrevMappedDirective) { StmtResult Res = StmtError(); OpenMPBindClauseKind BindKind = OMPC_BIND_unknown; + llvm::SmallVector ClausesWithoutBind; + bool UseClausesWithoutBind = false; + if (const OMPBindClause *BC = OMPExecutableDirective::getSingleClause(Clauses)) BindKind = BC->getBindKind(); + + // Variable used to note down the DirectiveKind because mapLoopConstruct may + // change "Kind" variable, due to mapping of "omp loop" to other directives. + OpenMPDirectiveKind DK = Kind; + if ((Kind == OMPD_loop) || (PrevMappedDirective == OMPD_loop)) { + UseClausesWithoutBind = mapLoopConstruct( + ClausesWithoutBind, Clauses, BindKind, Kind, PrevMappedDirective, + StartLoc, EndLoc, DirName, CancelRegion); + DK = OMPD_loop; + } + // First check CancelRegion which is then used in checkNestingOfRegions. if (checkCancelRegion(*this, Kind, CancelRegion, StartLoc) || - checkNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion, - BindKind, StartLoc)) + checkNestingOfRegions(*this, DSAStack, DK, DirName, CancelRegion, + BindKind, StartLoc)) { return StmtError(); + } // Report affected OpenMP target offloading behavior when in HIP lang-mode. if (getLangOpts().HIP && (isOpenMPTargetExecutionDirective(Kind) || isOpenMPTargetDataManagementDirective(Kind))) Diag(StartLoc, diag::warn_hip_omp_target_directives); - llvm::SmallVector ClausesWithoutBind; - bool UseClausesWithoutBind = false; - - UseClausesWithoutBind = mapLoopConstruct(ClausesWithoutBind, Clauses, - BindKind, Kind, PrevMappedDirective); - llvm::SmallVector ClausesWithImplicit; VarsWithInheritedDSAType VarsWithInheritedDSA; bool ErrorFound = false; diff --git a/clang/test/OpenMP/loop_bind_messages.cpp b/clang/test/OpenMP/loop_bind_messages.cpp --- a/clang/test/OpenMP/loop_bind_messages.cpp +++ b/clang/test/OpenMP/loop_bind_messages.cpp @@ -4,6 +4,7 @@ #define NNN 50 int aaa[NNN]; +int aaa2[NNN][NNN]; void parallel_loop() { #pragma omp parallel @@ -15,6 +16,91 @@ } } +void parallel_for_AND_loop_bind() { + #pragma omp parallel for + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } +} + +void parallel_nowait() { + #pragma omp parallel + #pragma omp for nowait + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } +} + +void parallel_for_with_nothing() { + #pragma omp parallel for + for (int i = 0 ; i < NNN ; i++) { + #pragma omp nothing + #pragma omp loop // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } +} + +void parallel_targetfor_with_loop_bind() { + #pragma omp target teams distribute parallel for + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } +} + +void parallel_targetparallel_with_loop() { + #pragma omp target parallel + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } +} + +void loop_bind_AND_loop_bind() { + #pragma omp parallel for + for (int i = 0; i < 100; ++i) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'loop' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } + } + } +} + +void parallel_with_sections_loop() { + #pragma omp parallel + { + #pragma omp sections + { + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(parallel) // expected-error{{region cannot be closely nested inside 'sections' region; perhaps you forget to enclose 'omp loop' directive into a parallel region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } + } + + #pragma omp section + { + aaa[NNN-1] = NNN; + } + } + } +} + void teams_loop() { int var1, var2; @@ -34,17 +120,23 @@ } } -void orphan_loop_with_bind() { - #pragma omp loop bind(parallel) - for (int j = 0 ; j < NNN ; j++) { - aaa[j] = j*NNN; +void teams_targetteams_with_loop() { + #pragma omp target teams + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(teams) + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } } } -void orphan_loop_no_bind() { - #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}} - for (int j = 0 ; j < NNN ; j++) { - aaa[j] = j*NNN; +void teams_targetfor_with_loop_bind() { + #pragma omp target teams distribute parallel for + for (int i = 0 ; i < NNN ; i++) { + #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'target teams distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa2[i][j] = i+j; + } } } @@ -65,12 +157,80 @@ } } +void teams_loop_distribute() { + int total = 0; + + #pragma omp teams num_teams(8) thread_limit(256) + #pragma omp distribute parallel for dist_schedule(static, 1024) \ + schedule(static, 64) + for (int i = 0; i < NNN; i++) { + #pragma omp loop bind(teams) // expected-error{{'distribute parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}} + for (int j = 0; j < NNN; j++) { + aaa2[i][j] = i+j; + } + } +} + +void parallel_for_with_loop_teams_bind(){ + #pragma omp parallel for + for (int i = 0; i < NNN; i++) { + #pragma omp loop bind(teams) // expected-error{{region cannot be closely nested inside 'parallel for' region; perhaps you forget to enclose 'omp loop' directive into a teams region?}} + for (int j = 0 ; j < NNN ; j++) { + aaa[i] = i+i*NNN; + } + } +} + +void teams_with_loop_thread_bind(){ + #pragma omp teams + for (int i = 0; i < NNN; i++) { + #pragma omp loop bind(thread) + for (int j = 0 ; j < NNN ; j++) { + aaa[i] = i+i*NNN; + } + } +} + +void orphan_loop_no_bind() { + #pragma omp loop // expected-error{{expected 'bind' clause for 'loop' construct without an enclosing OpenMP construct}} + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + +void orphan_loop_parallel_bind() { + #pragma omp loop bind(parallel) + for (int j = 0 ; j < NNN ; j++) { + aaa[j] = j*NNN; + } +} + +void orphan_loop_teams_bind(){ + #pragma omp loop bind(teams) + for (int i = 0; i < NNN; i++) { + aaa[i] = i+i*NNN; + } +} + int main(int argc, char *argv[]) { parallel_loop(); + parallel_for_AND_loop_bind(); + parallel_nowait(); + parallel_for_with_nothing(); + parallel_targetfor_with_loop_bind(); + parallel_targetparallel_with_loop(); + loop_bind_AND_loop_bind(); + parallel_with_sections_loop(); teams_loop(); - orphan_loop_with_bind(); - orphan_loop_no_bind(); + teams_targetteams_with_loop(); + teams_targetfor_with_loop_bind(); teams_loop_reduction(); + teams_loop_distribute(); + parallel_for_with_loop_teams_bind(); + teams_with_loop_thread_bind(); + orphan_loop_no_bind(); + orphan_loop_parallel_bind(); + orphan_loop_teams_bind(); } #endif diff --git a/clang/test/PCH/pragma-loop.cpp b/clang/test/PCH/pragma-loop.cpp --- a/clang/test/PCH/pragma-loop.cpp +++ b/clang/test/PCH/pragma-loop.cpp @@ -116,9 +116,13 @@ inline void run10(int *List, int Length) { int i = 0; -#pragma omp loop bind(teams) + int j = 0; + #pragma omp teams for (int i = 0; i < Length; i++) { - List[i] = i; + #pragma omp loop bind(teams) + for (int j = 0; j < Length; j++) { + List[i] = i+j; + } } }