Index: clang/include/clang-c/Index.h =================================================================== --- clang/include/clang-c/Index.h +++ clang/include/clang-c/Index.h @@ -2608,7 +2608,11 @@ */ CXCursor_OMPTargetTeamsGenericLoopDirective = 297, - CXCursor_LastStmt = CXCursor_OMPTargetTeamsGenericLoopDirective, + /** OpenMP parallel loop directive. + */ + CXCursor_OMPParallelGenericLoopDirective = 298, + + CXCursor_LastStmt = CXCursor_OMPParallelGenericLoopDirective, /** * Cursor that represents the translation unit itself. Index: clang/include/clang/AST/RecursiveASTVisitor.h =================================================================== --- clang/include/clang/AST/RecursiveASTVisitor.h +++ clang/include/clang/AST/RecursiveASTVisitor.h @@ -3080,6 +3080,8 @@ DEF_TRAVERSE_STMT(OMPTargetTeamsGenericLoopDirective, { TRY_TO(TraverseOMPExecutableDirective(S)); }) +DEF_TRAVERSE_STMT(OMPParallelGenericLoopDirective, + { TRY_TO(TraverseOMPExecutableDirective(S)); }) // OpenMP clauses. template bool RecursiveASTVisitor::TraverseOMPClause(OMPClause *C) { Index: clang/include/clang/AST/StmtOpenMP.h =================================================================== --- clang/include/clang/AST/StmtOpenMP.h +++ clang/include/clang/AST/StmtOpenMP.h @@ -1528,6 +1528,7 @@ T->getStmtClass() == OMPGenericLoopDirectiveClass || T->getStmtClass() == OMPTeamsGenericLoopDirectiveClass || T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass || + T->getStmtClass() == OMPParallelGenericLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || @@ -5701,6 +5702,71 @@ return T->getStmtClass() == OMPTargetTeamsGenericLoopDirectiveClass; } }; + +/// This represents '#pragma omp parallel loop' directive. +/// +/// \code +/// #pragma omp parallel loop private(a,b) order(concurrent) +/// \endcode +/// In this example directive '#pragma omp parallel loop' has +/// clauses 'private' with the variables 'a' and 'b', and order(concurrent). +/// +class OMPParallelGenericLoopDirective final : public OMPLoopDirective { + friend class ASTStmtReader; + friend class OMPExecutableDirective; + /// Build directive with the given start and end location. + /// + /// \param StartLoc Starting location of the directive kind. + /// \param EndLoc Ending location of the directive. + /// \param CollapsedNum Number of collapsed nested loops. + /// + OMPParallelGenericLoopDirective(SourceLocation StartLoc, + SourceLocation EndLoc, unsigned CollapsedNum) + : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass, + llvm::omp::OMPD_parallel_loop, StartLoc, EndLoc, + CollapsedNum) {} + + /// Build an empty directive. + /// + /// \param CollapsedNum Number of collapsed nested loops. + /// + explicit OMPParallelGenericLoopDirective(unsigned CollapsedNum) + : OMPLoopDirective(OMPParallelGenericLoopDirectiveClass, + llvm::omp::OMPD_parallel_loop, SourceLocation(), + SourceLocation(), CollapsedNum) {} + +public: + /// Creates directive with a list of \p Clauses. + /// + /// \param C AST context. + /// \param StartLoc Starting location of the directive kind. + /// \param EndLoc Ending Location of the directive. + /// \param CollapsedNum Number of collapsed loops. + /// \param Clauses List of clauses. + /// \param AssociatedStmt Statement, associated with the directive. + /// \param Exprs Helper expressions for CodeGen. + /// + static OMPParallelGenericLoopDirective * + Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef Clauses, + Stmt *AssociatedStmt, const HelperExprs &Exprs); + + /// Creates an empty directive with the place + /// for \a NumClauses clauses. + /// + /// \param C AST context. + /// \param CollapsedNum Number of collapsed nested loops. + /// \param NumClauses Number of clauses. + /// + static OMPParallelGenericLoopDirective *CreateEmpty(const ASTContext &C, + unsigned NumClauses, + unsigned CollapsedNum, + EmptyShell); + + static bool classof(const Stmt *T) { + return T->getStmtClass() == OMPParallelGenericLoopDirectiveClass; + } +}; } // end namespace clang #endif Index: clang/include/clang/Basic/StmtNodes.td =================================================================== --- clang/include/clang/Basic/StmtNodes.td +++ clang/include/clang/Basic/StmtNodes.td @@ -285,3 +285,4 @@ def OMPGenericLoopDirective : StmtNode; def OMPTeamsGenericLoopDirective : StmtNode; def OMPTargetTeamsGenericLoopDirective : StmtNode; +def OMPParallelGenericLoopDirective : StmtNode; Index: clang/include/clang/Sema/Sema.h =================================================================== --- clang/include/clang/Sema/Sema.h +++ clang/include/clang/Sema/Sema.h @@ -10914,6 +10914,11 @@ StmtResult ActOnOpenMPTargetTeamsGenericLoopDirective( ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); + /// Called on well-formed '\#pragma omp parallel loop' after parsing of the + /// associated statement. + StmtResult ActOnOpenMPParallelGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, Index: clang/include/clang/Serialization/ASTBitCodes.h =================================================================== --- clang/include/clang/Serialization/ASTBitCodes.h +++ clang/include/clang/Serialization/ASTBitCodes.h @@ -1963,6 +1963,7 @@ STMT_OMP_GENERIC_LOOP_DIRECTIVE, STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE, STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE, + STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE, EXPR_OMP_ARRAY_SECTION, EXPR_OMP_ARRAY_SHAPING, EXPR_OMP_ITERATOR, Index: clang/lib/AST/StmtOpenMP.cpp =================================================================== --- clang/lib/AST/StmtOpenMP.cpp +++ clang/lib/AST/StmtOpenMP.cpp @@ -2225,3 +2225,46 @@ C, NumClauses, /*HasAssociatedStmt=*/true, numLoopChildren(CollapsedNum, OMPD_target_teams_loop), CollapsedNum); } + +OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::Create( + const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, + unsigned CollapsedNum, ArrayRef Clauses, Stmt *AssociatedStmt, + const HelperExprs &Exprs) { + auto *Dir = createDirective( + C, Clauses, AssociatedStmt, + numLoopChildren(CollapsedNum, OMPD_parallel_loop), StartLoc, EndLoc, + CollapsedNum); + Dir->setIterationVariable(Exprs.IterationVarRef); + Dir->setLastIteration(Exprs.LastIteration); + Dir->setCalcLastIteration(Exprs.CalcLastIteration); + Dir->setPreCond(Exprs.PreCond); + Dir->setCond(Exprs.Cond); + Dir->setInit(Exprs.Init); + Dir->setInc(Exprs.Inc); + Dir->setIsLastIterVariable(Exprs.IL); + Dir->setLowerBoundVariable(Exprs.LB); + Dir->setUpperBoundVariable(Exprs.UB); + Dir->setStrideVariable(Exprs.ST); + Dir->setEnsureUpperBound(Exprs.EUB); + Dir->setNextLowerBound(Exprs.NLB); + Dir->setNextUpperBound(Exprs.NUB); + Dir->setNumIterations(Exprs.NumIterations); + Dir->setCounters(Exprs.Counters); + Dir->setPrivateCounters(Exprs.PrivateCounters); + Dir->setInits(Exprs.Inits); + Dir->setUpdates(Exprs.Updates); + Dir->setFinals(Exprs.Finals); + Dir->setDependentCounters(Exprs.DependentCounters); + Dir->setDependentInits(Exprs.DependentInits); + Dir->setFinalsConditions(Exprs.FinalsConditions); + Dir->setPreInits(Exprs.PreInits); + return Dir; +} + +OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty( + const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, + EmptyShell) { + return createEmptyDirective( + C, NumClauses, /*HasAssociatedStmt=*/true, + numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum); +} Index: clang/lib/AST/StmtPrinter.cpp =================================================================== --- clang/lib/AST/StmtPrinter.cpp +++ clang/lib/AST/StmtPrinter.cpp @@ -1017,6 +1017,12 @@ PrintOMPExecutableDirective(Node); } +void StmtPrinter::VisitOMPParallelGenericLoopDirective( + OMPParallelGenericLoopDirective *Node) { + Indent() << "#pragma omp parallel loop"; + PrintOMPExecutableDirective(Node); +} + //===----------------------------------------------------------------------===// // Expr printing methods. //===----------------------------------------------------------------------===// Index: clang/lib/AST/StmtProfile.cpp =================================================================== --- clang/lib/AST/StmtProfile.cpp +++ clang/lib/AST/StmtProfile.cpp @@ -1217,6 +1217,11 @@ VisitOMPLoopDirective(S); } +void StmtProfiler::VisitOMPParallelGenericLoopDirective( + const OMPParallelGenericLoopDirective *S) { + VisitOMPLoopDirective(S); +} + void StmtProfiler::VisitExpr(const Expr *S) { VisitStmt(S); } Index: clang/lib/Basic/OpenMPKinds.cpp =================================================================== --- clang/lib/Basic/OpenMPKinds.cpp +++ clang/lib/Basic/OpenMPKinds.cpp @@ -496,7 +496,8 @@ DKind == OMPD_target_teams_distribute_parallel_for_simd || DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile || DKind == OMPD_unroll || DKind == OMPD_loop || - DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop; + DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop || + DKind == OMPD_parallel_loop; } bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) { @@ -534,7 +535,8 @@ DKind == OMPD_target_teams_distribute_parallel_for_simd || DKind == OMPD_parallel_master || DKind == OMPD_parallel_master_taskloop || - DKind == OMPD_parallel_master_taskloop_simd; + DKind == OMPD_parallel_master_taskloop_simd || + DKind == OMPD_parallel_loop; } bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) { @@ -604,7 +606,7 @@ bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) { return Kind == OMPD_loop || Kind == OMPD_teams_loop || - Kind == OMPD_target_teams_loop; + Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop; } bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) { @@ -650,6 +652,7 @@ case OMPD_parallel_sections: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: + case OMPD_parallel_loop: CaptureRegions.push_back(OMPD_parallel); break; case OMPD_target_teams: Index: clang/lib/CodeGen/CGStmt.cpp =================================================================== --- clang/lib/CodeGen/CGStmt.cpp +++ clang/lib/CodeGen/CGStmt.cpp @@ -402,6 +402,9 @@ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: llvm_unreachable("target teams loop directive not supported yet."); break; + case Stmt::OMPParallelGenericLoopDirectiveClass: + llvm_unreachable("parallel loop directive not supported yet."); + break; } } Index: clang/lib/Parse/ParseOpenMP.cpp =================================================================== --- clang/lib/Parse/ParseOpenMP.cpp +++ clang/lib/Parse/ParseOpenMP.cpp @@ -150,6 +150,7 @@ {OMPD_for, OMPD_simd, OMPD_for_simd}, {OMPD_parallel, OMPD_for, OMPD_parallel_for}, {OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd}, + {OMPD_parallel, OMPD_loop, OMPD_parallel_loop}, {OMPD_parallel, OMPD_sections, OMPD_parallel_sections}, {OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd}, {OMPD_target, OMPD_parallel, OMPD_target_parallel}, @@ -2403,6 +2404,7 @@ case OMPD_loop: case OMPD_teams_loop: case OMPD_target_teams_loop: + case OMPD_parallel_loop: Diag(Tok, diag::err_omp_unexpected_directive) << 1 << getOpenMPDirectiveName(DKind); break; @@ -2760,6 +2762,7 @@ case OMPD_loop: case OMPD_teams_loop: case OMPD_target_teams_loop: + case OMPD_parallel_loop: case OMPD_taskloop: case OMPD_taskloop_simd: case OMPD_master_taskloop: Index: clang/lib/Sema/SemaExceptionSpec.cpp =================================================================== --- clang/lib/Sema/SemaExceptionSpec.cpp +++ clang/lib/Sema/SemaExceptionSpec.cpp @@ -1501,6 +1501,7 @@ case Stmt::OMPGenericLoopDirectiveClass: case Stmt::OMPTeamsGenericLoopDirectiveClass: case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: case Stmt::ReturnStmtClass: case Stmt::SEHExceptStmtClass: case Stmt::SEHFinallyStmtClass: Index: clang/lib/Sema/SemaOpenMP.cpp =================================================================== --- clang/lib/Sema/SemaOpenMP.cpp +++ clang/lib/Sema/SemaOpenMP.cpp @@ -3935,6 +3935,7 @@ case OMPD_parallel_for_simd: case OMPD_parallel_sections: case OMPD_parallel_master: + case OMPD_parallel_loop: case OMPD_teams: case OMPD_teams_distribute: case OMPD_teams_distribute_simd: { @@ -6320,6 +6321,10 @@ Res = ActOnOpenMPTargetTeamsGenericLoopDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; + case OMPD_parallel_loop: + Res = ActOnOpenMPParallelGenericLoopDirective( + ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); + break; case OMPD_declare_target: case OMPD_end_declare_target: case OMPD_threadprivate: @@ -10312,6 +10317,54 @@ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } +StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective( + ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, + SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) { + if (!AStmt) + return StmtError(); + + // OpenMP 5.1 [2.11.7, loop construct, Restrictions] + // A list item may not appear in a lastprivate clause unless it is the + // loop iteration variable of a loop that is associated with the construct. + if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack)) + return StmtError(); + + auto *CS = cast(AStmt); + // 1.2.2 OpenMP Language Terminology + // Structured block - An executable statement with a single entry at the + // top and a single exit at the bottom. + // The point of exit cannot be a branch out of the structured block. + // longjmp() and throw() must not violate the entry/exit criteria. + CS->getCapturedDecl()->setNothrow(); + for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_parallel_loop); + ThisCaptureLevel > 1; --ThisCaptureLevel) { + CS = cast(CS->getCapturedStmt()); + // 1.2.2 OpenMP Language Terminology + // Structured block - An executable statement with a single entry at the + // top and a single exit at the bottom. + // The point of exit cannot be a branch out of the structured block. + // longjmp() and throw() must not violate the entry/exit criteria. + CS->getCapturedDecl()->setNothrow(); + } + + OMPLoopDirective::HelperExprs B; + // In presence of clause 'collapse', it will define the nested loops number. + unsigned NestedLoopCount = + checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses), + /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack, + VarsWithImplicitDSA, B); + if (NestedLoopCount == 0) + return StmtError(); + + assert((CurContext->isDependentContext() || B.builtAll()) && + "omp loop exprs were not built"); + + setFunctionHasBranchProtectedScope(); + + return OMPParallelGenericLoopDirective::Create( + Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); +} + StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef Clauses, Stmt *AStmt, SourceLocation StartLoc, @@ -14619,6 +14672,7 @@ case OMPD_parallel_master: case OMPD_parallel_sections: case OMPD_parallel_for: + case OMPD_parallel_loop: case OMPD_target: case OMPD_target_teams: case OMPD_target_teams_distribute: @@ -14693,6 +14747,7 @@ case OMPD_parallel_sections: case OMPD_parallel_for: case OMPD_parallel_for_simd: + case OMPD_parallel_loop: case OMPD_distribute_parallel_for: case OMPD_distribute_parallel_for_simd: case OMPD_parallel_master_taskloop: @@ -14798,6 +14853,7 @@ case OMPD_parallel_sections: case OMPD_parallel_for: case OMPD_parallel_for_simd: + case OMPD_parallel_loop: case OMPD_target: case OMPD_target_simd: case OMPD_target_parallel: @@ -14882,6 +14938,7 @@ case OMPD_parallel_sections: case OMPD_parallel_for: case OMPD_parallel_for_simd: + case OMPD_parallel_loop: case OMPD_target: case OMPD_target_simd: case OMPD_target_parallel: @@ -14990,6 +15047,7 @@ case OMPD_loop: case OMPD_teams_loop: case OMPD_target_teams_loop: + case OMPD_parallel_loop: case OMPD_simd: case OMPD_tile: case OMPD_unroll: @@ -15074,6 +15132,7 @@ case OMPD_loop: case OMPD_teams_loop: case OMPD_target_teams_loop: + case OMPD_parallel_loop: case OMPD_simd: case OMPD_tile: case OMPD_unroll: @@ -15159,6 +15218,7 @@ case OMPD_end_declare_target: case OMPD_loop: case OMPD_teams_loop: + case OMPD_parallel_loop: case OMPD_simd: case OMPD_tile: case OMPD_unroll: @@ -15245,6 +15305,7 @@ case OMPD_loop: case OMPD_teams_loop: case OMPD_target_teams_loop: + case OMPD_parallel_loop: case OMPD_simd: case OMPD_tile: case OMPD_unroll: Index: clang/lib/Sema/TreeTransform.h =================================================================== --- clang/lib/Sema/TreeTransform.h +++ clang/lib/Sema/TreeTransform.h @@ -9271,6 +9271,17 @@ return Res; } +template +StmtResult TreeTransform::TransformOMPParallelGenericLoopDirective( + OMPParallelGenericLoopDirective *D) { + DeclarationNameInfo DirName; + getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_loop, DirName, + nullptr, D->getBeginLoc()); + StmtResult Res = getDerived().TransformOMPExecutableDirective(D); + getDerived().getSema().EndOpenMPDSABlock(Res.get()); + return Res; +} + //===----------------------------------------------------------------------===// // OpenMP clause transformation //===----------------------------------------------------------------------===// Index: clang/lib/Serialization/ASTReaderStmt.cpp =================================================================== --- clang/lib/Serialization/ASTReaderStmt.cpp +++ clang/lib/Serialization/ASTReaderStmt.cpp @@ -2648,6 +2648,11 @@ VisitOMPLoopDirective(D); } +void ASTStmtReader::VisitOMPParallelGenericLoopDirective( + OMPParallelGenericLoopDirective *D) { + VisitOMPLoopDirective(D); +} + //===----------------------------------------------------------------------===// // ASTReader Implementation //===----------------------------------------------------------------------===// @@ -3619,6 +3624,14 @@ break; } + case STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE: { + unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields]; + unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1]; + S = OMPParallelGenericLoopDirective::CreateEmpty(Context, NumClauses, + CollapsedNum, Empty); + break; + } + case EXPR_CXX_OPERATOR_CALL: S = CXXOperatorCallExpr::CreateEmpty( Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields], Index: clang/lib/Serialization/ASTWriterStmt.cpp =================================================================== --- clang/lib/Serialization/ASTWriterStmt.cpp +++ clang/lib/Serialization/ASTWriterStmt.cpp @@ -2607,6 +2607,12 @@ Code = serialization::STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE; } +void ASTStmtWriter::VisitOMPParallelGenericLoopDirective( + OMPParallelGenericLoopDirective *D) { + VisitOMPLoopDirective(D); + Code = serialization::STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE; +} + //===----------------------------------------------------------------------===// // ASTWriter Implementation //===----------------------------------------------------------------------===// Index: clang/lib/StaticAnalyzer/Core/ExprEngine.cpp =================================================================== --- clang/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ clang/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -1299,6 +1299,7 @@ case Stmt::OMPGenericLoopDirectiveClass: case Stmt::OMPTeamsGenericLoopDirectiveClass: case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: + case Stmt::OMPParallelGenericLoopDirectiveClass: case Stmt::CapturedStmtClass: case Stmt::OMPUnrollDirectiveClass: case Stmt::OMPMetaDirectiveClass: { Index: clang/test/Analysis/cfg-openmp.cpp =================================================================== --- clang/test/Analysis/cfg-openmp.cpp +++ clang/test/Analysis/cfg-openmp.cpp @@ -725,3 +725,25 @@ for (int i = 0; i < 10; ++i) argc = x; } + +// CHECK-LABEL: void parallelloop(int argc) +void parallelloop(int argc) { + int x, cond, fp, rd; +// CHECK-DAG: [B3] +// CHECK-DAG: [[#PFB:]]: x +// CHECK-DAG: [[#PFB+1]]: [B3.[[#PFB]]] (ImplicitCastExpr, LValueToRValue, int) +// CHECK-DAG: [[#PFB+2]]: argc +// CHECK-DAG: [[#PFB+3]]: [B3.[[#PFB+2]]] = [B3.[[#PFB+1]]] +// CHECK-DAG: [B1] +// CHECK-DAG: [[#PF:]]: cond +// CHECK-DAG: [[#PF+1]]: [B1.[[#PF]]] (ImplicitCastExpr, LValueToRValue, int) +// CHECK-DAG: [[#PF+2]]: [B1.[[#PF+1]]] (ImplicitCastExpr, IntegralToBoolean, _Bool) +// CHECK-DAG: [[#PF+3]]: fp +// CHECK-DAG: [[#PF+4]]: rd +// CHECK-DAG: [[#PF+5]]: #pragma omp parallel loop if(cond) firstprivate(fp) reduction(&: rd) +// CHECK-DAG: for (int i = 0; +// CHECK-DAG: [B3.[[#PFB+3]]]; +#pragma omp parallel loop if(cond) firstprivate(fp) reduction(&:rd) + for (int i = 0; i < 10; ++i) + argc = x; +} Index: clang/test/OpenMP/parallel_generic_loop_ast_print.cpp =================================================================== --- /dev/null +++ clang/test/OpenMP/parallel_generic_loop_ast_print.cpp @@ -0,0 +1,123 @@ +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \ +// RUN: -fsyntax-only -verify %s + +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \ +// RUN: -ast-print %s | FileCheck %s + +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \ +// RUN: -emit-pch -o %t %s + +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fopenmp -fopenmp-version=51 \ +// RUN: -include-pch %t -ast-print %s | FileCheck %s + +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +typedef void **omp_allocator_handle_t; +extern const omp_allocator_handle_t omp_null_allocator; +extern const omp_allocator_handle_t omp_default_mem_alloc; +extern const omp_allocator_handle_t omp_large_cap_mem_alloc; +extern const omp_allocator_handle_t omp_const_mem_alloc; +extern const omp_allocator_handle_t omp_high_bw_mem_alloc; +extern const omp_allocator_handle_t omp_low_lat_mem_alloc; +extern const omp_allocator_handle_t omp_cgroup_mem_alloc; +extern const omp_allocator_handle_t omp_pteam_mem_alloc; +extern const omp_allocator_handle_t omp_thread_mem_alloc; + +//CHECK: template void templ_foo(T t) { +//CHECK: T j, z; +//CHECK: #pragma omp parallel loop collapse(C) reduction(+: z) lastprivate(j) bind(thread) +//CHECK: for (T i = 0; i < t; ++i) +//CHECK: for (j = 0; j < t; ++j) +//CHECK: z += i + j; +//CHECK: } + +//CHECK: template<> void templ_foo(int t) { +//CHECK: int j, z; +//CHECK: #pragma omp parallel loop collapse(2) reduction(+: z) lastprivate(j) bind(thread) +//CHECK: for (int i = 0; i < t; ++i) +//CHECK: for (j = 0; j < t; ++j) +//CHECK: z += i + j; +//CHECK: } +template +void templ_foo(T t) { + + T j,z; + #pragma omp parallel loop collapse(C) reduction(+:z) lastprivate(j) bind(thread) + for (T i = 0; i(8); +} + +#endif // HEADER Index: clang/test/OpenMP/parallel_generic_loop_messages.cpp =================================================================== --- /dev/null +++ clang/test/OpenMP/parallel_generic_loop_messages.cpp @@ -0,0 +1,158 @@ +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -verify -fopenmp \ +// RUN: -fopenmp-version=51 -Wuninitialized %s + +void foo() +{ + int i,j,k; + int z; + + // expected-error@+2 {{statement after '#pragma omp parallel loop' must be a for loop}} + #pragma omp parallel loop bind(thread) + i = 0; + + // OpenMP 5.1 [2.22 Nesting of regions] + // + // A barrier region may not be closely nested inside a worksharing, loop, + // task, taskloop, critical, ordered, atomic, or masked region. + + // expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region}} + #pragma omp parallel loop bind(thread) + for (i=0; i<1000; ++i) { + #pragma omp barrier + } + + // A masked region may not be closely nested inside a worksharing, loop, + // atomic, task, or taskloop region. + + // expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region}} + #pragma omp parallel loop bind(thread) + for (i=0; i<1000; ++i) { + #pragma omp masked filter(2) + { } + } + + // An ordered region that corresponds to an ordered construct without any + // clause or with the threads or depend clause may not be closely nested + // inside a critical, ordered, loop, atomic, task, or taskloop region. + + // expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}} + #pragma omp parallel loop bind(thread) + for (i=0; i<1000; ++i) { + #pragma omp ordered + { } + } + + // expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}} + #pragma omp parallel loop bind(thread) + for (i=0; i<1000; ++i) { + #pragma omp ordered threads + { } + } + + // expected-error@+3 {{region cannot be closely nested inside 'parallel loop' region; perhaps you forget to enclose 'omp ordered' directive into a for or a parallel for region with 'ordered' clause?}} + #pragma omp parallel loop bind(thread) + for (i=0; i<1000; ++i) { + #pragma omp ordered depend(source) + } + + // bind clause + + // expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'bind' clause}} + #pragma omp parallel loop bind(thread) bind(thread) + for (i=0; i<1000; ++i) { + } + + // expected-error@+1 {{expected 'teams', 'parallel' or 'thread' in OpenMP clause 'bind'}} + #pragma omp parallel loop bind(other) + for (i=0; i<1000; ++i) { + } + + // collapse clause + + // expected-error@+4 {{expected 2 for loops after '#pragma omp parallel loop', but found only 1}} + // expected-note@+1 {{as specified in 'collapse' clause}} + #pragma omp parallel loop collapse(2) bind(thread) + for (i=0; i<1000; ++i) + z = i+11; + + // expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'collapse' clause}} + #pragma omp parallel loop collapse(2) collapse(2) bind(thread) + for (i=0; i<1000; ++i) + for (j=0; j<1000; ++j) + z = i+j+11; + + // order clause + + // expected-error@+1 {{expected 'concurrent' in OpenMP clause 'order'}} + #pragma omp parallel loop order(foo) bind(thread) + for (i=0; i<1000; ++i) + z = i+11; + + // private clause + + // expected-error@+1 {{use of undeclared identifier 'undef_var'}} + #pragma omp parallel loop private(undef_var) bind(thread) + for (i=0; i<1000; ++i) + z = i+11; + + // lastprivate + + // A list item may not appear in a lastprivate clause unless it is the loop + // iteration variable of a loop that is associated with the construct. + + // expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}} + #pragma omp parallel loop lastprivate(z) bind(thread) + for (i=0; i<1000; ++i) { + z = i+11; + } + + // expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}} + #pragma omp parallel loop lastprivate(k) collapse(2) bind(thread) + for (i=0; i<1000; ++i) + for (j=0; j<1000; ++j) + for (k=0; k<1000; ++k) + z = i+j+k+11; + + // reduction + + // expected-error@+1 {{use of undeclared identifier 'undef_var'}} + #pragma omp parallel loop reduction(+:undef_var) bind(thread) + for (i=0; i<1000; ++i) + z = i+11; + + // num_threads + + // expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'num_threads' clause}} + #pragma omp parallel loop num_threads(4) num_threads(4) + for (i=0; i<1000; ++i) + z = i+11; + + // proc_bind + + // expected-error@+1 {{directive '#pragma omp parallel loop' cannot contain more than one 'proc_bind' clause}} + #pragma omp parallel loop proc_bind(close) proc_bind(primary) + for (i=0; i<1000; ++i) + z = i+11; +} + +template +void templ_test(T t) { + T i,z; + + // expected-error@+4 {{expected 2 for loops after '#pragma omp parallel loop', but found only 1}} + // expected-note@+1 {{as specified in 'collapse' clause}} + #pragma omp parallel loop collapse(C) bind(thread) + for (i=0; i<1000; ++i) + z = i+11; + + // expected-error@+1 {{only loop iteration variables are allowed in 'lastprivate' clause in 'omp parallel loop' directives}} + #pragma omp parallel loop lastprivate(z) bind(thread) + for (i=0; i<1000; ++i) { + z = i+11; + } +} + +void bar() +{ + templ_test(16); // expected-note {{in instantiation of function template specialization 'templ_test' requested here}} +} Index: clang/tools/libclang/CIndex.cpp =================================================================== --- clang/tools/libclang/CIndex.cpp +++ clang/tools/libclang/CIndex.cpp @@ -5730,6 +5730,8 @@ return cxstring::createRef("OMPTeamsGenericLoopDirective"); case CXCursor_OMPTargetTeamsGenericLoopDirective: return cxstring::createRef("OMPTargetTeamsGenericLoopDirective"); + case CXCursor_OMPParallelGenericLoopDirective: + return cxstring::createRef("OMPParallelGenericLoopDirective"); case CXCursor_OverloadCandidate: return cxstring::createRef("OverloadCandidate"); case CXCursor_TypeAliasTemplateDecl: Index: clang/tools/libclang/CXCursor.cpp =================================================================== --- clang/tools/libclang/CXCursor.cpp +++ clang/tools/libclang/CXCursor.cpp @@ -832,6 +832,9 @@ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass: K = CXCursor_OMPTargetTeamsGenericLoopDirective; break; + case Stmt::OMPParallelGenericLoopDirectiveClass: + K = CXCursor_OMPParallelGenericLoopDirective; + break; case Stmt::BuiltinBitCastExprClass: K = CXCursor_BuiltinBitCastExpr; } Index: llvm/include/llvm/Frontend/OpenMP/OMP.td =================================================================== --- llvm/include/llvm/Frontend/OpenMP/OMP.td +++ llvm/include/llvm/Frontend/OpenMP/OMP.td @@ -1808,6 +1808,26 @@ VersionedClause, ]; } +def OMP_parallel_loop : Directive<"parallel loop"> { + let allowedClauses = [ + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + ]; + let allowedOnceClauses = [ + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + VersionedClause, + ]; +} def OMP_Metadirective : Directive<"metadirective"> { let allowedClauses = [VersionedClause]; let allowedOnceClauses = [VersionedClause];