Index: flang/include/flang/Lower/AbstractConverter.h =================================================================== --- flang/include/flang/Lower/AbstractConverter.h +++ flang/include/flang/Lower/AbstractConverter.h @@ -16,6 +16,7 @@ #include "flang/Common/Fortran.h" #include "flang/Lower/PFTDefs.h" #include "flang/Optimizer/Builder/BoxValue.h" +#include "flang/Semantics/symbol.h" #include "mlir/IR/BuiltinOps.h" #include "llvm/ADT/ArrayRef.h" @@ -76,6 +77,9 @@ /// Get the mlir instance of a symbol. virtual mlir::Value getSymbolAddress(SymbolRef sym) = 0; + virtual fir::ExtendedValue + getSymbolExtValue(const Fortran::semantics::Symbol &sym) = 0; + /// Get the binding of an implied do variable by name. virtual mlir::Value impliedDoBinding(llvm::StringRef name) = 0; @@ -99,6 +103,12 @@ virtual void copyHostAssociateVar(const Fortran::semantics::Symbol &sym) = 0; + /// Collect the set of symbols flagged as \p flag in \p eval region. + virtual void collectSymbolSet( + pft::Evaluation &eval, + llvm::SetVector &symbolSet, + Fortran::semantics::Symbol::Flag flag) = 0; + //===--------------------------------------------------------------------===// // Expressions //===--------------------------------------------------------------------===// Index: flang/include/flang/Lower/OpenMP.h =================================================================== --- flang/include/flang/Lower/OpenMP.h +++ flang/include/flang/Lower/OpenMP.h @@ -25,12 +25,14 @@ namespace pft { struct Evaluation; +struct Variable; } // namespace pft void genOpenMPConstruct(AbstractConverter &, pft::Evaluation &, const parser::OpenMPConstruct &); void genOpenMPDeclarativeConstruct(AbstractConverter &, pft::Evaluation &, const parser::OpenMPDeclarativeConstruct &); +void genThreadprivateOp(AbstractConverter &, const pft::Variable &); } // namespace lower } // namespace Fortran Index: flang/include/flang/Lower/PFTBuilder.h =================================================================== --- flang/include/flang/Lower/PFTBuilder.h +++ flang/include/flang/Lower/PFTBuilder.h @@ -778,6 +778,11 @@ void visitAllSymbols(const FunctionLikeUnit &funit, std::function callBack); +/// Call the provided \p callBack on all symbols that are referenced inside \p +/// eval region. +void visitAllSymbols(const Evaluation &eval, + std::function callBack); + } // namespace Fortran::lower::pft namespace Fortran::lower { Index: flang/lib/Lower/Bridge.cpp =================================================================== --- flang/lib/Lower/Bridge.cpp +++ flang/lib/Lower/Bridge.cpp @@ -266,6 +266,13 @@ return lookupSymbol(sym).getAddr(); } + fir::ExtendedValue + getSymbolExtValue(const Fortran::semantics::Symbol &sym) override final { + Fortran::lower::SymbolBox sb = localSymbols.lookupSymbol(sym); + assert(sb && "symbol box not found"); + return sb.toExtendedValue(); + } + mlir::Value impliedDoBinding(llvm::StringRef name) override final { mlir::Value val = localSymbols.lookupImpliedDo(name); if (!val) @@ -457,6 +464,18 @@ // Utility methods //===--------------------------------------------------------------------===// + void collectSymbolSet( + Fortran::lower::pft::Evaluation &eval, + llvm::SetVector &symbolSet, + Fortran::semantics::Symbol::Flag flag) override final { + auto addToList = [&](const Fortran::semantics::Symbol &sym) { + const Fortran::semantics::Symbol &ultimate = sym.GetUltimate(); + if (ultimate.test(flag)) + symbolSet.insert(&ultimate); + }; + Fortran::lower::pft::visitAllSymbols(eval, addToList); + } + mlir::Location getCurrentLocation() override final { return toLocation(); } /// Generate a dummy location. @@ -2193,6 +2212,10 @@ void instantiateVar(const Fortran::lower::pft::Variable &var, Fortran::lower::AggregateStoreMap &storeMap) { Fortran::lower::instantiateVariable(*this, var, localSymbols, storeMap); + if (var.hasSymbol() && + var.getSymbol().test( + Fortran::semantics::Symbol::Flag::OmpThreadprivate)) + Fortran::lower::genThreadprivateOp(*this, var); } /// Prepare to translate a new function Index: flang/lib/Lower/OpenMP.cpp =================================================================== --- flang/lib/Lower/OpenMP.cpp +++ flang/lib/Lower/OpenMP.cpp @@ -78,6 +78,88 @@ firOpBuilder.restoreInsertionPoint(insPt); } +static mlir::Value genCommonMember(Fortran::lower::AbstractConverter &converter, + const Fortran::semantics::Symbol &sym, + mlir::Value commonValue) { + auto &firOpBuilder = converter.getFirOpBuilder(); + mlir::Location currentLocation = converter.getCurrentLocation(); + mlir::IntegerType i8Ty = firOpBuilder.getIntegerType(8); + mlir::Type i8Ptr = firOpBuilder.getRefType(i8Ty); + mlir::Type seqTy = firOpBuilder.getRefType(firOpBuilder.getVarLenSeqTy(i8Ty)); + mlir::Value base = + firOpBuilder.createConvert(currentLocation, seqTy, commonValue); + std::size_t byteOffset = sym.GetUltimate().offset(); + mlir::Value offs = firOpBuilder.createIntegerConstant( + currentLocation, firOpBuilder.getIndexType(), byteOffset); + mlir::Value varAddr = firOpBuilder.create( + currentLocation, i8Ptr, base, mlir::ValueRange{offs}); + mlir::Type symType = converter.genType(sym); + return firOpBuilder.createConvert(currentLocation, + firOpBuilder.getRefType(symType), varAddr); +} + +static fir::ExtendedValue getExtValue(fir::ExtendedValue base, + mlir::Value val) { + return base.match( + [&](const fir::MutableBoxValue &box) -> fir::ExtendedValue { + return fir::MutableBoxValue(val, box.nonDeferredLenParams(), {}); + }, + [&](const auto &) -> fir::ExtendedValue { + return fir::substBase(base, val); + }); +} + +static void threadPrivatizeVars(Fortran::lower::AbstractConverter &converter, + Fortran::lower::pft::Evaluation &eval) { + auto &firOpBuilder = converter.getFirOpBuilder(); + mlir::Location currentLocation = converter.getCurrentLocation(); + auto insPt = firOpBuilder.saveInsertionPoint(); + firOpBuilder.setInsertionPointToStart(firOpBuilder.getAllocaBlock()); + + auto genThreadprivateOp = [&](Fortran::lower::SymbolRef sym) -> mlir::Value { + mlir::Value symOriThreadprivateValue = converter.getSymbolAddress(sym); + mlir::Operation *op = symOriThreadprivateValue.getDefiningOp(); + assert(mlir::isa(op) && + "The threadprivate operation not created"); + mlir::Value symValue = op->getOperand(0); + return firOpBuilder.create( + currentLocation, symValue.getType(), symValue); + }; + + llvm::SetVector threadprivateSyms; + converter.collectSymbolSet( + eval, threadprivateSyms, + Fortran::semantics::Symbol::Flag::OmpThreadprivate); + + llvm::SetVector commonSyms; + for (std::size_t i = 0; i < threadprivateSyms.size(); i++) { + auto sym = threadprivateSyms[i]; + mlir::Value symThreadprivateValue; + if (const Fortran::semantics::Symbol *common = + Fortran::semantics::FindCommonBlockContaining(sym->GetUltimate())) { + mlir::Value commonThreadprivateValue; + if (commonSyms.contains(common)) { + commonThreadprivateValue = converter.getSymbolAddress(*common); + } else { + commonThreadprivateValue = genThreadprivateOp(*common); + converter.bindSymbol(*common, commonThreadprivateValue); + commonSyms.insert(common); + } + symThreadprivateValue = + genCommonMember(converter, *sym, commonThreadprivateValue); + } else { + symThreadprivateValue = genThreadprivateOp(*sym); + } + + fir::ExtendedValue sexv = converter.getSymbolExtValue(*sym); + fir::ExtendedValue symThreadprivateExv = + getExtValue(sexv, symThreadprivateValue); + converter.bindSymbol(*sym, symThreadprivateExv); + } + + firOpBuilder.restoreInsertionPoint(insPt); +} + static void genObjectList(const Fortran::parser::OmpObjectList &objectList, Fortran::lower::AbstractConverter &converter, llvm::SmallVectorImpl &operands) { @@ -111,7 +193,7 @@ template static void createBodyOfOp(Op &op, Fortran::lower::AbstractConverter &converter, - mlir::Location &loc, + mlir::Location &loc, Fortran::lower::pft::Evaluation &eval, const Fortran::parser::OmpClauseList *clauses = nullptr, bool outerCombined = false) { fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); @@ -125,6 +207,9 @@ // Handle privatization. Do not privatize if this is the outer operation. if (clauses && !outerCombined) privatizeVars(converter, *clauses); + + if (std::is_same_v) + threadPrivatizeVars(converter, eval); } static void genOMP(Fortran::lower::AbstractConverter &converter, @@ -313,19 +398,20 @@ allocateOperands, allocatorOperands, /*reduction_vars=*/ValueRange(), /*reductions=*/nullptr, procBindKindAttr); createBodyOfOp(parallelOp, converter, currentLocation, - &opClauseList, /*isCombined=*/false); + eval, &opClauseList, /*isCombined=*/false); } else if (blockDirective.v == llvm::omp::OMPD_master) { auto masterOp = firOpBuilder.create(currentLocation, argTy); - createBodyOfOp(masterOp, converter, currentLocation); + createBodyOfOp(masterOp, converter, currentLocation, eval); } else if (blockDirective.v == llvm::omp::OMPD_single) { auto singleOp = firOpBuilder.create( currentLocation, allocateOperands, allocatorOperands, nowaitAttr); - createBodyOfOp(singleOp, converter, currentLocation); + createBodyOfOp(singleOp, converter, currentLocation, eval); } else if (blockDirective.v == llvm::omp::OMPD_ordered) { auto orderedOp = firOpBuilder.create( currentLocation, /*simd=*/nullptr); - createBodyOfOp(orderedOp, converter, currentLocation); + createBodyOfOp(orderedOp, converter, currentLocation, + eval); } else { TODO(converter.getCurrentLocation(), "Unhandled block directive"); } @@ -371,7 +457,7 @@ firOpBuilder.getContext(), global.sym_name())); } }(); - createBodyOfOp(criticalOp, converter, currentLocation); + createBodyOfOp(criticalOp, converter, currentLocation, eval); } static void @@ -383,7 +469,7 @@ auto currentLocation = converter.getCurrentLocation(); mlir::omp::SectionOp sectionOp = firOpBuilder.create(currentLocation); - createBodyOfOp(sectionOp, converter, currentLocation); + createBodyOfOp(sectionOp, converter, currentLocation, eval); } // TODO: Add support for reduction @@ -436,19 +522,20 @@ currentLocation, /*if_expr_var*/ nullptr, /*num_threads_var*/ nullptr, allocateOperands, allocatorOperands, /*reduction_vars=*/ValueRange(), /*reductions=*/nullptr, /*proc_bind_val*/ nullptr); - createBodyOfOp(parallelOp, converter, currentLocation); + createBodyOfOp(parallelOp, converter, currentLocation, eval); auto sectionsOp = firOpBuilder.create( currentLocation, /*reduction_vars*/ ValueRange(), /*reductions=*/nullptr, /*allocate_vars*/ ValueRange(), /*allocators_vars*/ ValueRange(), /*nowait=*/nullptr); - createBodyOfOp(sectionsOp, converter, currentLocation); + createBodyOfOp(sectionsOp, converter, currentLocation, eval); // Sections Construct } else if (dir == llvm::omp::Directive::OMPD_sections) { auto sectionsOp = firOpBuilder.create( currentLocation, reductionVars, /*reductions = */ nullptr, allocateOperands, allocatorOperands, noWaitClauseOperand); - createBodyOfOp(sectionsOp, converter, currentLocation); + createBodyOfOp(sectionsOp, converter, currentLocation, + eval); } } @@ -634,6 +721,82 @@ ompConstruct.u); } +void Fortran::lower::genThreadprivateOp( + Fortran::lower::AbstractConverter &converter, + const Fortran::lower::pft::Variable &var) { + fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder(); + mlir::Location currentLocation = converter.getCurrentLocation(); + + const Fortran::semantics::Symbol &sym = var.getSymbol(); + mlir::Value symThreadprivateValue; + if (const Fortran::semantics::Symbol *common = + Fortran::semantics::FindCommonBlockContaining(sym.GetUltimate())) { + mlir::Value commonValue = converter.getSymbolAddress(*common); + if (mlir::isa(commonValue.getDefiningOp())) { + // Generate ThreadprivateOp for a common block instead of its members and + // only do it once for a common block. + return; + } + // Generate ThreadprivateOp and rebind the common block. + mlir::Value commonThreadprivateValue = + firOpBuilder.create( + currentLocation, commonValue.getType(), commonValue); + converter.bindSymbol(*common, commonThreadprivateValue); + // Generate the threadprivate value for the common block member. + symThreadprivateValue = + genCommonMember(converter, sym, commonThreadprivateValue); + } else if (!var.isGlobal()) { + // Non-global variable which can be in threadprivate directive must be one + // variable in main program, and it has implicit SAVE attribute. Take it as + // with SAVE attribute, so to create GlobalOp for it to simplify the + // translation to LLVM IR. + mlir::Type ty = converter.genType(sym); + std::string globalName = converter.mangleName(sym); + mlir::StringAttr linkage = firOpBuilder.createInternalLinkage(); + fir::GlobalOp global = + firOpBuilder.createGlobal(currentLocation, ty, globalName, linkage); + + auto createInit = [&](std::function genInit) { + mlir::Region ®ion = global.getRegion(); + region.push_back(new mlir::Block); + mlir::Block &block = region.back(); + auto insertPt = firOpBuilder.saveInsertionPoint(); + firOpBuilder.setInsertionPointToStart(&block); + genInit(firOpBuilder); + firOpBuilder.restoreInsertionPoint(insertPt); + }; + + // Create default initialization for non-character scalar. + if (Fortran::semantics::IsAllocatableOrPointer(sym)) { + mlir::Type baseAddrType = ty.dyn_cast().getEleTy(); + createInit([&](fir::FirOpBuilder &b) { + mlir::Value nullAddr = + b.createNullConstant(currentLocation, baseAddrType); + mlir::Value box = b.create(currentLocation, ty, nullAddr); + b.create(currentLocation, box); + }); + } else { + createInit([&](fir::FirOpBuilder &b) { + mlir::Value undef = b.create(currentLocation, ty); + b.create(currentLocation, undef); + }); + } + mlir::Value symValue = firOpBuilder.create( + currentLocation, global.resultType(), global.getSymbol()); + symThreadprivateValue = firOpBuilder.create( + currentLocation, symValue.getType(), symValue); + } else { + mlir::Value symValue = converter.getSymbolAddress(sym); + symThreadprivateValue = firOpBuilder.create( + currentLocation, symValue.getType(), symValue); + } + + fir::ExtendedValue sexv = converter.getSymbolExtValue(sym); + fir::ExtendedValue symThreadprivateExv = + getExtValue(sexv, symThreadprivateValue); + converter.bindSymbol(sym, symThreadprivateExv); +} + void Fortran::lower::genOpenMPDeclarativeConstruct( Fortran::lower::AbstractConverter &converter, Fortran::lower::pft::Evaluation &eval, @@ -660,7 +823,8 @@ "OpenMPDeclareTargetConstruct"); }, [&](const Fortran::parser::OpenMPThreadprivate &threadprivate) { - TODO(converter.getCurrentLocation(), "OpenMPThreadprivate"); + // The directive is lowered when instantiating the variable to + // support the case of threadprivate variable declared in module. }, }, ompDeclConstruct.u); Index: flang/lib/Lower/PFTBuilder.cpp =================================================================== --- flang/lib/Lower/PFTBuilder.cpp +++ flang/lib/Lower/PFTBuilder.cpp @@ -1813,3 +1813,12 @@ parser::Walk(functionParserNode, visitor); }); } + +void Fortran::lower::pft::visitAllSymbols( + const Fortran::lower::pft::Evaluation &eval, + const std::function callBack) { + SymbolVisitor visitor{callBack}; + eval.visit([&](const auto &functionParserNode) { + parser::Walk(functionParserNode, visitor); + }); +} Index: flang/test/Integration/OpenMPLLVM/threadprivate-char-array-chararray.f90 =================================================================== --- /dev/null +++ flang/test/Integration/OpenMPLLVM/threadprivate-char-array-chararray.f90 @@ -0,0 +1,58 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for character, array, and character array. + +!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s --check-prefix=LLVMIR + +module test + character :: x + integer :: y(5) + character(5) :: z(5) + + !$omp threadprivate(x, y, z) + +!LLVMIR-DAG: @_QMtestEx = global [1 x i8] undef +!LLVMIR-DAG: @_QMtestEy = global [5 x i32] undef +!LLVMIR-DAG: @_QMtestEz = global [5 x [5 x i8]] undef + +!LLVMIR-DAG: @_QMtestEx.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEy.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEz.cache = common global i8** null + +contains + subroutine sub() +! CHECK-LABEL: @_QMtestPsub() +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* getelementptr inbounds ([1 x i8], [1 x i8]* @_QMtestEx, i32 0, i32 0), i64 1, i8*** @_QMtestEx.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to [1 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast ([5 x i32]* @_QMtestEy to i8*), i64 20, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[LOAD1:%.*]] = bitcast i8* [[CACHE1]] to [5 x i32]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* getelementptr inbounds ([5 x [5 x i8]], [5 x [5 x i8]]* @_QMtestEz, i32 0, i32 0, i32 0), i64 25, i8*** @_QMtestEz.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to [5 x [5 x i8]]*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = bitcast [1 x i8]* [[LOAD0]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = insertvalue { [5 x i32]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [5 x i32]* undef, i64 4, i32 20180515, i8 1, i8 9, i8 0, i8 0 +!LLVMIR-DAG: {{.*}} = insertvalue { [5 x [5 x i8]]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [5 x [5 x i8]]* undef, i64 5, i32 20180515, i8 1, i8 40, i8 0, i8 0 + print *, x, y, z + + !$omp parallel + print *, x, y, z + !$omp end parallel + + print *, x, y, z + +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* getelementptr inbounds ([1 x i8], [1 x i8]* @_QMtestEx, i32 0, i32 0), i64 1, i8*** @_QMtestEx.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to [1 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast ([5 x i32]* @_QMtestEy to i8*), i64 20, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[LOAD1:%.*]] = bitcast i8* [[CACHE1]] to [5 x i32]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* getelementptr inbounds ([5 x [5 x i8]], [5 x [5 x i8]]* @_QMtestEz, i32 0, i32 0, i32 0), i64 25, i8*** @_QMtestEz.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to [5 x [5 x i8]]*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = bitcast [1 x i8]* [[LOAD0]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = insertvalue { [5 x i32]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [5 x i32]* undef, i64 4, i32 20180515, i8 1, i8 9, i8 0, i8 0 +!LLVMIR-DAG: {{.*}} = insertvalue { [5 x [5 x i8]]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [5 x [5 x i8]]* undef, i64 5, i32 20180515, i8 1, i8 40, i8 0, i8 0 + end +end Index: flang/test/Integration/OpenMPLLVM/threadprivate-commonblock.f90 =================================================================== --- /dev/null +++ flang/test/Integration/OpenMPLLVM/threadprivate-commonblock.f90 @@ -0,0 +1,78 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for common block. + +!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s --check-prefix=LLVMIR + +module test + integer:: a + real :: b(2) + complex, pointer :: c, d(:) + character(5) :: e, f(2) + common /blk/ a, b, c, d, e, f + + !$omp threadprivate(/blk/) + +!LLVMIR-DAG: @_QBblk = common global [103 x i8] zeroinitializer + +!LLVMIR-DAG: @_QBblk.cache = common global i8** null + +contains + subroutine sub() +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR: [[TMP5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0]], i32 [[TMP0]], i8* getelementptr inbounds ([103 x i8], [103 x i8]* @_QBblk, i32 0, i32 0), i64 103, i8*** @_QBblk.cache) +!LLVMIR: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to [103 x i8]*, !dbg !{{.*}} +!LLVMIR: [[TMP7:%.*]] = bitcast [103 x i8]* [[TMP6]] to i8*, !dbg !{{.*}} +!LLVMIR: [[TMP8:%.*]] = getelementptr i8, i8* [[TMP7]], i64 0, !dbg !{{.*}} +!LLVMIR: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i32*, !dbg !{{.*}} +!LLVMIR: [[TMP10:%.*]] = getelementptr i8, i8* [[TMP7]], i64 4, !dbg !{{.*}} +!LLVMIR: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to [2 x float]*, !dbg !{{.*}} +!LLVMIR: [[TMP12:%.*]] = getelementptr i8, i8* [[TMP7]], i64 16, !dbg !{{.*}} +!LLVMIR: [[TMP13:%.*]] = bitcast i8* [[TMP12]] to { { float, float }*, i64, i32, i8, i8, i8, i8 }*, !dbg !{{.*}} +!LLVMIR: [[TMP14:%.*]] = getelementptr i8, i8* [[TMP7]], i64 40, !dbg !{{.*}} +!LLVMIR: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }*, !dbg !{{.*}} +!LLVMIR: [[TMP16:%.*]] = getelementptr i8, i8* [[TMP7]], i64 88, !dbg !{{.*}} +!LLVMIR: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to [5 x i8]*, !dbg !{{.*}} +!LLVMIR: [[TMP18:%.*]] = getelementptr i8, i8* [[TMP7]], i64 93, !dbg !{{.*}} +!LLVMIR: [[TMP19:%.*]] = bitcast i8* [[TMP18]] to [2 x [5 x i8]]*, !dbg !{{.*}} +!LLVMIR: %{{.*}} = load i32, i32* [[TMP9]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = insertvalue { [2 x float]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [2 x float]* undef, i64 4, i32 20180515, i8 1, i8 27, i8 0, i8 0 +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8 }, { { float, float }*, i64, i32, i8, i8, i8, i8 }* [[TMP13]], i32 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }* [[TMP15]], i32 0, i32 7, i64 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast [5 x i8]* [[TMP17]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = insertvalue { [2 x [5 x i8]]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [2 x [5 x i8]]* undef, i64 5, i32 20180515, i8 1, i8 40, i8 0, i8 0 + print *, a, b, c, d, e, f + + !$omp parallel + print *, a, b, c, d, e, f + !$omp end parallel + +!LLVMIR-DAG: %{{.*}} = load i32, i32* [[TMP9]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8 }, { { float, float }*, i64, i32, i8, i8, i8, i8 }* [[TMP13]], i32 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }* [[TMP15]], i32 0, i32 7, i64 0, i32 0, !dbg !{{.*}} + print *, a, b, c, d, e, f + +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR: [[TMP5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0]], i32 [[TMP0]], i8* getelementptr inbounds ([103 x i8], [103 x i8]* @_QBblk, i32 0, i32 0), i64 103, i8*** @_QBblk.cache) +!LLVMIR-DAG: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to [103 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP7:%.*]] = bitcast [103 x i8]* [[TMP6]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP8:%.*]] = getelementptr i8, i8* [[TMP7]], i64 0, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP9:%.*]] = bitcast i8* [[TMP8:%.*]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP10:%.*]] = getelementptr i8, i8* [[TMP7]], i64 4, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP11:%.*]] = bitcast i8* [[TMP10:%.*]] to [2 x float]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP12:%.*]] = getelementptr i8, i8* [[TMP7]], i64 16, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP13:%.*]] = bitcast i8* [[TMP12:%.*]] to { { float, float }*, i64, i32, i8, i8, i8, i8 }*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP14:%.*]] = getelementptr i8, i8* [[TMP7]], i64 40, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP15:%.*]] = bitcast i8* [[TMP14:%.*]] to { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP16:%.*]] = getelementptr i8, i8* [[TMP7]], i64 88, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP17:%.*]] = bitcast i8* [[TMP16:%.*]] to [5 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP18:%.*]] = getelementptr i8, i8* [[TMP7]], i64 93, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP19:%.*]] = bitcast i8* [[TMP18:%.*]] to [2 x [5 x i8]]*, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load i32, i32* [[TMP9]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = insertvalue { [2 x float]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [2 x float]* undef, i64 4, i32 20180515, i8 1, i8 27, i8 0, i8 0 +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8 }, { { float, float }*, i64, i32, i8, i8, i8, i8 }* [[TMP13]], i32 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = getelementptr { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }, { { float, float }*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] }* [[TMP15]], i32 0, i32 7, i64 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast [5 x i8]* [[TMP17]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = insertvalue { [2 x [5 x i8]]*, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } { [2 x [5 x i8]]* undef, i64 5, i32 20180515, i8 1, i8 40, i8 0, i8 0 + end +end Index: flang/test/Integration/OpenMPLLVM/threadprivate-integer-different-kinds.f90 =================================================================== --- /dev/null +++ flang/test/Integration/OpenMPLLVM/threadprivate-integer-different-kinds.f90 @@ -0,0 +1,92 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for variables with different kind. + +!REQUIRES: shell +!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s --check-prefix=LLVMIR + +program test + integer, save :: i + integer(kind=1), save :: i1 + integer(kind=2), save :: i2 + integer(kind=4), save :: i4 + integer(kind=8), save :: i8 + integer(kind=16), save :: i16 + +!LLVMIR-DAG: @_QFEi = internal global i32 undef +!LLVMIR-DAG: @_QFEi1 = internal global i8 undef +!LLVMIR-DAG: @_QFEi16 = internal global i128 undef +!LLVMIR-DAG: @_QFEi2 = internal global i16 undef +!LLVMIR-DAG: @_QFEi4 = internal global i32 undef +!LLVMIR-DAG: @_QFEi8 = internal global i64 undef + !$omp threadprivate(i, i1, i2, i4, i8, i16) + +!LLVMIR-DAG: @_QFEi.cache = common global i8** null +!LLVMIR-DAG: @_QFEi1.cache = common global i8** null +!LLVMIR-DAG: @_QFEi16.cache = common global i8** null +!LLVMIR-DAG: @_QFEi2.cache = common global i8** null +!LLVMIR-DAG: @_QFEi4.cache = common global i8** null +!LLVMIR-DAG: @_QFEi8.cache = common global i8** null + +! CHECK-LABEL: @_QQmain() +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* bitcast (i32* @_QFEi to i8*), i64 4, i8*** @_QFEi.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* @_QFEi1, i64 1, i8*** @_QFEi1.cache) +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* bitcast (i16* @_QFEi2 to i8*), i64 2, i8*** @_QFEi2.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to i16*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]) +!LLVMIR-DAG: [[CACHE3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3:%.*]], i8* bitcast (i32* @_QFEi4 to i8*), i64 4, i8*** @_QFEi4.cache) +!LLVMIR-DAG: [[LOAD3:%.*]] = bitcast i8* [[CACHE3]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]]) +!LLVMIR-DAG: [[CACHE4:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP4:%.*]], i8* bitcast (i64* @_QFEi8 to i8*), i64 8, i8*** @_QFEi8.cache) +!LLVMIR-DAG: [[LOAD4:%.*]] = bitcast i8* [[CACHE4]] to i64*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP5:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]]) +!LLVMIR-DAG: [[CACHE5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 [[TMP5:%.*]], i8* bitcast (i128* @_QFEi16 to i8*), i64 16, i8*** @_QFEi16.cache) +!LLVMIR-DAG: [[LOAD5:%.*]] = bitcast i8* [[CACHE5]] to i128*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD0]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i8, i8* [[CACHE1]], align 1, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i16, i16* [[LOAD2]], align 2, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i64, i64* [[LOAD4]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i128, i128* [[LOAD5]], align 4, !dbg !{{.*}} + print *, i, i1, i2, i4, i8, i16 + + !$omp parallel + print *, i, i1, i2, i4, i8, i16 + !$omp end parallel + +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD0]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i8, i8* [[CACHE1]], align 1, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i16, i16* [[LOAD2]], align 2, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i64, i64* [[LOAD4]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i128, i128* [[LOAD5]], align 4, !dbg !{{.*}} + print *, i, i1, i2, i4, i8, i16 + +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* bitcast (i32* @_QFEi to i8*), i64 4, i8*** @_QFEi.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* @_QFEi1, i64 1, i8*** @_QFEi1.cache) +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* bitcast (i16* @_QFEi2 to i8*), i64 2, i8*** @_QFEi2.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to i16*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]) +!LLVMIR-DAG: [[CACHE3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3:%.*]], i8* bitcast (i32* @_QFEi4 to i8*), i64 4, i8*** @_QFEi4.cache) +!LLVMIR-DAG: [[LOAD3:%.*]] = bitcast i8* [[CACHE3]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB4:[0-9]+]]) +!LLVMIR-DAG: [[CACHE4:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB4:[0-9]+]], i32 [[TMP4:%.*]], i8* bitcast (i64* @_QFEi8 to i8*), i64 8, i8*** @_QFEi8.cache) +!LLVMIR-DAG: [[LOAD4:%.*]] = bitcast i8* [[CACHE4]] to i64*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP5:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB5:[0-9]+]]) +!LLVMIR-DAG: [[CACHE5:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB5:[0-9]+]], i32 [[TMP5:%.*]], i8* bitcast (i128* @_QFEi16 to i8*), i64 16, i8*** @_QFEi16.cache) +!LLVMIR-DAG: [[LOAD5:%.*]] = bitcast i8* [[CACHE5]] to i128*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD0]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i8, i8* [[CACHE1]], align 1, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i16, i16* [[LOAD2]], align 2, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i64, i64* [[LOAD4]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i128, i128* [[LOAD5]], align 4, !dbg !{{.*}} +end Index: flang/test/Integration/OpenMPLLVM/threadprivate-real-logical-complex-derivedtype.f90 =================================================================== --- /dev/null +++ flang/test/Integration/OpenMPLLVM/threadprivate-real-logical-complex-derivedtype.f90 @@ -0,0 +1,80 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for real, logical, complex, and derived type. + +!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s --check-prefix=LLVMIR + +module test + type my_type + integer :: t_i + real :: t_arr(5) + end type my_type + real :: x + complex :: y + logical :: z + type(my_type) :: t + + !$omp threadprivate(x, y, z, t) + +!LLVMIR-DAG: %_QMtestTmy_type = type { i32, [5 x float] } +!LLVMIR-DAG: @_QMtestEt = global %_QMtestTmy_type undef +!LLVMIR-DAG: @_QMtestEx = global float undef +!LLVMIR-DAG: @_QMtestEy = global { float, float } undef +!LLVMIR-DAG: @_QMtestEz = global i32 undef + +!LLVMIR-DAG: @_QMtestEt.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEx.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEy.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEz.cache = common global i8** null + +contains + subroutine sub() +! CHECK-LABEL: @_QMtestPsub() +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* bitcast (%_QMtestTmy_type* @_QMtestEt to i8*), i64 24, i8*** @_QMtestEt.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to %_QMtestTmy_type*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast (float* @_QMtestEx to i8*), i64 4, i8*** @_QMtestEx.cache) +!LLVMIR-DAG: [[LOAD1:%.*]] = bitcast i8* [[CACHE1]] to float*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* bitcast ({ float, float }* @_QMtestEy to i8*), i64 8, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to { float, float }*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]) +!LLVMIR-DAG: [[CACHE3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3:%.*]], i8* bitcast (i32* @_QMtestEz to i8*), i64 4, i8*** @_QMtestEz.cache) +!LLVMIR-DAG: [[LOAD3:%.*]] = bitcast i8* [[CACHE3]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load float, float* [[LOAD1]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load { float, float }, { float, float }* [[LOAD2]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: [[GEP0:%.*]] = getelementptr %_QMtestTmy_type, %_QMtestTmy_type* [[LOAD0]], i64 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[GEP0]], align 4, !dbg !{{.*}} + print *, x, y, z, t%t_i + + !$omp parallel + print *, x, y, z, t%t_i + !$omp end parallel + +!LLVMIR-DAG: %{{.*}} = load float, float* [[LOAD1]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load { float, float }, { float, float }* [[LOAD2]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load i32, i32* [[GEP0]], align 4, !dbg !{{.*}} + print *, x, y, z, t%t_i + +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* bitcast (%_QMtestTmy_type* @_QMtestEt to i8*), i64 24, i8*** @_QMtestEt.cache) +!LLVMIR-DAG: [[LOAD0:%.*]] = bitcast i8* [[CACHE0]] to %_QMtestTmy_type*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast (float* @_QMtestEx to i8*), i64 4, i8*** @_QMtestEx.cache) +!LLVMIR-DAG: [[LOAD1:%.*]] = bitcast i8* [[CACHE1]] to float*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[CACHE2:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP2:%.*]], i8* bitcast ({ float, float }* @_QMtestEy to i8*), i64 8, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[LOAD2:%.*]] = bitcast i8* [[CACHE2]] to { float, float }*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB3:[0-9]+]]) +!LLVMIR-DAG: [[CACHE3:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3:%.*]], i8* bitcast (i32* @_QMtestEz to i8*), i64 4, i8*** @_QMtestEz.cache) +!LLVMIR-DAG: [[LOAD3:%.*]] = bitcast i8* [[CACHE3]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load float, float* [[LOAD1]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load { float, float }, { float, float }* [[LOAD2]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[LOAD3]], align 4, !dbg !{{.*}} +!LLVMIR-DAG: [[GEP0:%.*]] = getelementptr %_QMtestTmy_type, %_QMtestTmy_type* [[LOAD0]], i64 0, i32 0, !dbg !{{.*}} +!LLVMIR-DAG: {{.*}} = load i32, i32* [[GEP0]], align 4, !dbg !{{.*}} + end +end Index: flang/test/Integration/OpenMPLLVM/threadprivate-use-association.f90 =================================================================== --- /dev/null +++ flang/test/Integration/OpenMPLLVM/threadprivate-use-association.f90 @@ -0,0 +1,69 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for threadprivate variable in use association. + +!RUN: %flang_fc1 -emit-llvm -fopenmp %s -o - | FileCheck %s --check-prefix=LLVMIR + +!LLVMIR-DAG: @_QBblk = common global [24 x i8] zeroinitializer +!LLVMIR-DAG: @_QMtestEy = global float undef + +!LLVMIR-DAG: @_QBblk.cache = common global i8** null +!LLVMIR-DAG: @_QMtestEy.cache = common global i8** null + +module test + integer :: x + real :: y, z(5) + common /blk/ x, z + + !$omp threadprivate(y, /blk/) + +contains + subroutine sub() + !$omp parallel +! LLVMIR-LABEL: @_QMtestPsub..omp_par +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR-DAG: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB0:[0-9]+]]) +!LLVMIR-DAG: [[CACHE0:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB0:[0-9]+]], i32 [[TMP0:%.*]], i8* getelementptr inbounds ([24 x i8], [24 x i8]* @_QBblk, i32 0, i32 0), i64 24, i8*** @_QBblk.cache) +!LLVMIR-DAG: [[INS4:%.*]] = bitcast i8* [[CACHE0]] to [24 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[CACHE1:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast (float* @_QMtestEy to i8*), i64 4, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[INS7:%.*]] = bitcast i8* [[CACHE1]] to float*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS8:%.*]] = bitcast [24 x i8]* [[INS4]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS9:%.*]] = getelementptr i8, i8* [[INS8]], i64 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast i8* [[INS9]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS11:%.*]] = getelementptr i8, i8* [[INS8]], i64 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast i8* [[INS11]] to [5 x float]*, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load float, float* [[INS7]], align 4, !dbg !{{.*}} + print *, x, y, z + !$omp end parallel + end +end + +program main + use test + integer :: x1 + real :: z1(5) + common /blk/ x1, z1 + + !$omp threadprivate(/blk/) + + call sub() + + !$omp parallel +! LLVMIR-LABEL: @_QQmain..omp_par +! LLVMIR-LABEL: omp.par.region{{.*}} +!LLVMIR-DAG: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) +!LLVMIR-DAG: [[INS6:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1:%.*]], i8* getelementptr inbounds ([24 x i8], [24 x i8]* @_QBblk, i32 0, i32 0), i64 24, i8*** @_QBblk.cache) +!LLVMIR-DAG: [[INS7:%.*]] = bitcast i8* [[INS6]] to [24 x i8]*, !dbg !{{.*}} +!LLVMIR-DAG: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +!LLVMIR-DAG: [[INS9:%.*]] = call i8* @__kmpc_threadprivate_cached(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP1:%.*]], i8* bitcast (float* @_QMtestEy to i8*), i64 4, i8*** @_QMtestEy.cache) +!LLVMIR-DAG: [[INS10:%.*]] = bitcast i8* [[INS9]] to float*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS11:%.*]] = bitcast [24 x i8]* [[INS7]] to i8*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS12:%.*]] = getelementptr i8, i8* [[INS11]], i64 0, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast i8* [[INS12]] to i32*, !dbg !{{.*}} +!LLVMIR-DAG: [[INS14:%.*]] = getelementptr i8, i8* [[INS11]], i64 4, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = bitcast i8* [[INS14]] to [5 x float]*, !dbg !{{.*}} +!LLVMIR-DAG: %{{.*}} = load float, float* [[INS10]], align 4, !dbg !{{.*}} + print *, x1, y, z1 + !$omp end parallel + +end Index: flang/test/Lower/OpenMP/omp-threadprivate.f90 =================================================================== --- flang/test/Lower/OpenMP/omp-threadprivate.f90 +++ /dev/null @@ -1,10 +0,0 @@ -! This test checks lowering of OpenMP threadprivate Directive. -! XFAIL: * -! RUN: %bbc -fopenmp -emit-fir %s -o - | \ -! RUN: FileCheck %s --check-prefix=FIRDialect - -program main - integer, save :: x, y - - !$omp threadprivate(x, y) -end Index: flang/test/Lower/OpenMP/threadprivate-char-array-chararray.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-char-array-chararray.f90 @@ -0,0 +1,46 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for character, array, and character array. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +module test + character :: x + integer :: y(5) + character(5) :: z(5) + + !$omp threadprivate(x, y, z) + +!FIRDialect-DAG: fir.global @_QMtestEx : !fir.char<1> { +!FIRDialect-DAG: fir.global @_QMtestEy : !fir.array<5xi32> { +!FIRDialect-DAG: fir.global @_QMtestEz : !fir.array<5x!fir.char<1,5>> { + +contains + subroutine sub() +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QMtestEx) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QMtestEy) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QMtestEz) : !fir.ref>> +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[NEWADDR1]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.embox [[NEWADDR2]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, x, y, z + + !$omp parallel +!FIRDialect-DAG: [[ADDR33:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR34:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR35:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.convert [[ADDR33]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR34]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR35]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, x, y, z + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[NEWADDR1]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.embox [[NEWADDR2]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, x, y, z + + end +end Index: flang/test/Lower/OpenMP/threadprivate-commonblock.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-commonblock.f90 @@ -0,0 +1,91 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for common block. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +module test + integer:: a + real :: b(2) + complex, pointer :: c, d(:) + character(5) :: e, f(2) + common /blk/ a, b, c, d, e, f + + !$omp threadprivate(/blk/) + +!FIRDialect: fir.global common @_QBblk(dense<0> : vector<103xi8>) : !fir.array<103xi8> + +contains + subroutine sub() +!FIRDialect: [[ADDR0:%.*]] = fir.address_of(@_QBblk) : !fir.ref> +!FIRDialect: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C0:%.*]] = arith.constant 0 : index +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.coordinate_of [[ADDR1]], [[C0]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR3:%.*]] = fir.convert [[ADDR2]] : (!fir.ref) -> !fir.ref +!FIRDialect-DAG: [[ADDR4:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C1:%.*]] = arith.constant 4 : index +!FIRDialect-DAG: [[ADDR5:%.*]] = fir.coordinate_of [[ADDR4]], [[C1]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR6:%.*]] = fir.convert [[ADDR5]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: [[ADDR7:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C2:%.*]] = arith.constant 16 : index +!FIRDialect-DAG: [[ADDR8:%.*]] = fir.coordinate_of [[ADDR7]], [[C2]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR9:%.*]] = fir.convert [[ADDR8]] : (!fir.ref) -> !fir.ref>>> +!FIRDialect-DAG: [[ADDR10:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C3:%.*]] = arith.constant 40 : index +!FIRDialect-DAG: [[ADDR11:%.*]] = fir.coordinate_of [[ADDR10]], [[C3]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR12:%.*]] = fir.convert [[ADDR11]] : (!fir.ref) -> !fir.ref>>>> +!FIRDialect-DAG: [[ADDR13:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C4:%.*]] = arith.constant 88 : index +!FIRDialect-DAG: [[ADDR14:%.*]] = fir.coordinate_of [[ADDR13]], [[C4]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR15:%.*]] = fir.convert [[ADDR14]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: [[ADDR16:%.*]] = fir.convert [[NEWADDR0]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[C5:%.*]] = arith.constant 93 : index +!FIRDialect-DAG: [[ADDR17:%.*]] = fir.coordinate_of [[ADDR16]], [[C5]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR18:%.*]] = fir.convert [[ADDR17]] : (!fir.ref) -> !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR6]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR9]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR12]] : !fir.ref>>>> +!FIRDialect-DAG: %{{.*}} = fir.convert [[ADDR15]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR18]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, a, b, c, d, e, f + + !$omp parallel +!FIRDialect: [[ADDR77:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR78:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR79:%.*]] = fir.coordinate_of [[ADDR78]], [[C0:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR80:%.*]] = fir.convert [[ADDR79:%.*]] : (!fir.ref) -> !fir.ref +!FIRDialect-DAG: [[ADDR81:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR82:%.*]] = fir.coordinate_of [[ADDR81]], [[C1:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR83:%.*]] = fir.convert [[ADDR82:%.*]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: [[ADDR84:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR85:%.*]] = fir.coordinate_of [[ADDR84]], [[C2:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR86:%.*]] = fir.convert [[ADDR85:%.*]] : (!fir.ref) -> !fir.ref>>> +!FIRDialect-DAG: [[ADDR87:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR88:%.*]] = fir.coordinate_of [[ADDR87]], [[C3:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR89:%.*]] = fir.convert [[ADDR88:%.*]] : (!fir.ref) -> !fir.ref>>>> +!FIRDialect-DAG: [[ADDR90:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR91:%.*]] = fir.coordinate_of [[ADDR90]], [[C4:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR92:%.*]] = fir.convert [[ADDR91:%.*]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: [[ADDR93:%.*]] = fir.convert [[ADDR77]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR94:%.*]] = fir.coordinate_of [[ADDR93]], [[C5:%.*]] : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR95:%.*]] = fir.convert [[ADDR94:%.*]] : (!fir.ref) -> !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR80]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR83]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR86]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR89]] : !fir.ref>>>> +!FIRDialect-DAG: %{{.*}} = fir.convert [[ADDR92]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR95]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, a, b, c, d, e, f + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR6]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR9]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR12]] : !fir.ref>>>> +!FIRDialect-DAG: %{{.*}} = fir.convert [[ADDR15]] : (!fir.ref>) -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR18]](%{{.*}}) : (!fir.ref>>, !fir.shape<1>) -> !fir.box>> + print *, a, b, c, d, e, f + + end +end Index: flang/test/Lower/OpenMP/threadprivate-integer-different-kinds.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-integer-different-kinds.f90 @@ -0,0 +1,67 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for variables with different kind. + +!REQUIRES: shell +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +program test + integer, save :: i + integer(kind=1), save :: i1 + integer(kind=2), save :: i2 + integer(kind=4), save :: i4 + integer(kind=8), save :: i8 + integer(kind=16), save :: i16 + +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QFEi) : !fir.ref +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QFEi1) : !fir.ref +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QFEi16) : !fir.ref +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR3:%.*]] = fir.address_of(@_QFEi2) : !fir.ref +!FIRDialect-DAG: [[NEWADDR3:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR4:%.*]] = fir.address_of(@_QFEi4) : !fir.ref +!FIRDialect-DAG: [[NEWADDR4:%.*]] = omp.threadprivate [[ADDR4]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR5:%.*]] = fir.address_of(@_QFEi8) : !fir.ref +!FIRDialect-DAG: [[NEWADDR5:%.*]] = omp.threadprivate [[ADDR5]] : !fir.ref -> !fir.ref + !$omp threadprivate(i, i1, i2, i4, i8, i16) + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR4]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR5]] : !fir.ref + print *, i, i1, i2, i4, i8, i16 + + !$omp parallel +!FIRDialect-DAG: [[ADDR39:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR40:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR41:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR42:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR43:%.*]] = omp.threadprivate [[ADDR4]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR44:%.*]] = omp.threadprivate [[ADDR5]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR39]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR40]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR41]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR42]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR43]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR44]] : !fir.ref + print *, i, i1, i2, i4, i8, i16 + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR4]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR5]] : !fir.ref + print *, i, i1, i2, i4, i8, i16 + +!FIRDialect-DAG: fir.global internal @_QFEi : i32 { +!FIRDialect-DAG: fir.global internal @_QFEi1 : i8 { +!FIRDialect-DAG: fir.global internal @_QFEi16 : i128 { +!FIRDialect-DAG: fir.global internal @_QFEi2 : i16 { +!FIRDialect-DAG: fir.global internal @_QFEi4 : i32 { +!FIRDialect-DAG: fir.global internal @_QFEi8 : i64 { +end Index: flang/test/Lower/OpenMP/threadprivate-non-global.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-non-global.f90 @@ -0,0 +1,91 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for non-character non-SAVEd non-initialized scalars with or without +! allocatable or pointer attribute in main program. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +program test + integer :: x + real :: y + logical :: z + complex :: w + integer, pointer :: a + real, allocatable :: b + +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QFEa) : !fir.ref>> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QFEb) : !fir.ref>> +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QFEw) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR3:%.*]] = fir.address_of(@_QFEx) : !fir.ref +!FIRDialect-DAG: [[NEWADDR3:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR4:%.*]] = fir.address_of(@_QFEy) : !fir.ref +!FIRDialect-DAG: [[NEWADDR4:%.*]] = omp.threadprivate [[ADDR4]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR5:%.*]] = fir.address_of(@_QFEz) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR5:%.*]] = omp.threadprivate [[ADDR5]] : !fir.ref> -> !fir.ref> + !$omp threadprivate(x, y, z, w, a, b, w, a, b) + + call sub(a, b) + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR4]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR5]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref>> + print *, x, y, z, w, a, b + + !$omp parallel +!FIRDialect-DAG: [[ADDR68:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR69:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR70:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR71:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR72:%.*]] = omp.threadprivate [[ADDR4]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR73:%.*]] = omp.threadprivate [[ADDR5]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR71]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR72]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR73]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR70]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR68]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR69]] : !fir.ref>> + print *, x, y, z, w, a, b + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR4]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR5]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref>> + print *, x, y, z, w, a, b + +!FIRDialect: return + +!FIRDialect-DAG: fir.global internal @_QFEa : !fir.box> { +!FIRDialect-DAG: [[Z0:%.*]] = fir.zero_bits !fir.ptr +!FIRDialect-DAG: [[E0:%.*]] = fir.embox [[Z0]] : (!fir.ptr) -> !fir.box> +!FIRDialect-DAG: fir.has_value [[E0]] : !fir.box> +!FIRDialect-DAG: } +!FIRDialect-DAG: fir.global internal @_QFEb : !fir.box> { +!FIRDialect-DAG: [[Z1:%.*]] = fir.zero_bits !fir.heap +!FIRDialect-DAG: [[E1:%.*]] = fir.embox [[Z1]] : (!fir.heap) -> !fir.box> +!FIRDialect-DAG: fir.has_value [[E1]] : !fir.box> +!FIRDialect-DAG: } +!FIRDialect-DAG: fir.global internal @_QFEw : !fir.complex<4> { +!FIRDialect-DAG: [[Z2:%.*]] = fir.undefined !fir.complex<4> +!FIRDialect-DAG: fir.has_value [[Z2]] : !fir.complex<4> +!FIRDialect-DAG: } +!FIRDialect-DAG: fir.global internal @_QFEx : i32 { +!FIRDialect-DAG: [[Z3:%.*]] = fir.undefined i32 +!FIRDialect-DAG: fir.has_value [[Z3]] : i32 +!FIRDialect-DAG: } +!FIRDialect-DAG: fir.global internal @_QFEy : f32 { +!FIRDialect-DAG: [[Z4:%.*]] = fir.undefined f32 +!FIRDialect-DAG: fir.has_value [[Z4]] : f32 +!FIRDialect-DAG: } +!FIRDialect-DAG: fir.global internal @_QFEz : !fir.logical<4> { +!FIRDialect-DAG: [[Z5:%.*]] = fir.undefined !fir.logical<4> +!FIRDialect-DAG: fir.has_value [[Z5]] : !fir.logical<4> +!FIRDialect-DAG: } +end Index: flang/test/Lower/OpenMP/threadprivate-pointer-allocatable.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-pointer-allocatable.f90 @@ -0,0 +1,51 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for allocatable and pointer variables. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +module test + integer, pointer :: x(:), m + real, allocatable :: y(:), n + + !$omp threadprivate(x, y, m, n) + +!FIRDialect-DAG: fir.global @_QMtestEm : !fir.box> { +!FIRDialect-DAG: fir.global @_QMtestEn : !fir.box> { +!FIRDialect-DAG: fir.global @_QMtestEx : !fir.box>> { +!FIRDialect-DAG: fir.global @_QMtestEy : !fir.box>> { + +contains + subroutine sub() +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QMtestEm) : !fir.ref>> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QMtestEn) : !fir.ref>> +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QMtestEx) : !fir.ref>>> +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref>>> -> !fir.ref>>> +!FIRDialect-DAG: [[ADDR3:%.*]] = fir.address_of(@_QMtestEy) : !fir.ref>>> +!FIRDialect-DAG: [[NEWADDR3:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref>>> -> !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref>> + print *, x, y, m, n + + !$omp parallel +!FIRDialect-DAG: [[ADDR54:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR55:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref>> -> !fir.ref>> +!FIRDialect-DAG: [[ADDR56:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref>>> -> !fir.ref>>> +!FIRDialect-DAG: [[ADDR57:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref>>> -> !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR56]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR57]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR54]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR55]] : !fir.ref>> + print *, x, y, m, n + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref>>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR0]] : !fir.ref>> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref>> + print *, x, y, m, n + end +end Index: flang/test/Lower/OpenMP/threadprivate-real-logical-complex-derivedtype.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-real-logical-complex-derivedtype.f90 @@ -0,0 +1,58 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for real, logical, complex, and derived type. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +module test + type my_type + integer :: t_i + real :: t_arr(5) + end type my_type + real :: x + complex :: y + logical :: z + type(my_type) :: t + + !$omp threadprivate(x, y, z, t) + +!FIRDialect-DAG: fir.global @_QMtestEt : !fir.type<_QMtestTmy_type{t_i:i32,t_arr:!fir.array<5xf32>}> { +!FIRDialect-DAG: fir.global @_QMtestEx : f32 { +!FIRDialect-DAG: fir.global @_QMtestEy : !fir.complex<4> { +!FIRDialect-DAG: fir.global @_QMtestEz : !fir.logical<4> { + +contains + subroutine sub() +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QMtestEt) : !fir.ref}>> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref}>> -> !fir.ref}>> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QMtestEx) : !fir.ref +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QMtestEy) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR3:%.*]] = fir.address_of(@_QMtestEz) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR3:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.coordinate_of [[NEWADDR0]] + print *, x, y, z, t%t_i + + !$omp parallel +!FIRDialect-DAG: [[ADDR38:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref}>> -> !fir.ref}>> +!FIRDialect-DAG: [[ADDR39:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR40:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR41:%.*]] = omp.threadprivate [[ADDR3]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR39]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR40]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR41]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.coordinate_of [[ADDR38]] + print *, x, y, z, t%t_i + !$omp end parallel + +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR1]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR2]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[NEWADDR3]] : !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.coordinate_of [[NEWADDR0]] + print *, x, y, z, t%t_i + + end +end Index: flang/test/Lower/OpenMP/threadprivate-use-association.f90 =================================================================== --- /dev/null +++ flang/test/Lower/OpenMP/threadprivate-use-association.f90 @@ -0,0 +1,74 @@ +! This test checks lowering of OpenMP Threadprivate Directive. +! Test for threadprivate variable in use association. + +!RUN: %flang_fc1 -emit-fir -fopenmp %s -o - | FileCheck %s --check-prefix=FIRDialect + +!FIRDialect-DAG: fir.global common @_QBblk(dense<0> : vector<24xi8>) : !fir.array<24xi8> +!FIRDialect-DAG: fir.global @_QMtestEy : f32 { + +module test + integer :: x + real :: y, z(5) + common /blk/ x, z + + !$omp threadprivate(y, /blk/) + +contains + subroutine sub() +! FIRDialect-LABEL: @_QMtestPsub +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QBblk) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QMtestEy) : !fir.ref +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref + + !$omp parallel +!FIRDialect-DAG: [[ADDR2:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR3:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR4:%.*]] = fir.convert [[ADDR2]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR5:%.*]] = fir.coordinate_of [[ADDR4]], %{{.*}} : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR6:%.*]] = fir.convert [[ADDR5:%.*]] : (!fir.ref) -> !fir.ref +!FIRDialect-DAG: [[ADDR7:%.*]] = fir.convert [[ADDR2]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR8:%.*]] = fir.coordinate_of [[ADDR7]], %{{.*}} : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR9:%.*]] = fir.convert [[ADDR8:%.*]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR6]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR3]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR9]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> + print *, x, y, z + !$omp end parallel + end +end + +program main + use test + integer :: x1 + real :: z1(5) + common /blk/ x1, z1 + + !$omp threadprivate(/blk/) + + call sub() + +! FIRDialect-LABEL: @_QQmain() +!FIRDialect-DAG: [[ADDR0:%.*]] = fir.address_of(@_QBblk) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR0:%.*]] = omp.threadprivate [[ADDR0]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR1:%.*]] = fir.address_of(@_QBblk) : !fir.ref> +!FIRDialect-DAG: [[NEWADDR1:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR2:%.*]] = fir.address_of(@_QMtestEy) : !fir.ref +!FIRDialect-DAG: [[NEWADDR2:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref -> !fir.ref + + !$omp parallel +!FIRDialect-DAG: [[ADDR4:%.*]] = omp.threadprivate [[ADDR1]] : !fir.ref> -> !fir.ref> +!FIRDialect-DAG: [[ADDR5:%.*]] = omp.threadprivate [[ADDR2]] : !fir.ref -> !fir.ref +!FIRDialect-DAG: [[ADDR6:%.*]] = fir.convert [[ADDR4]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR7:%.*]] = fir.coordinate_of [[ADDR6]], %{{.*}} : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR8:%.*]] = fir.convert [[ADDR7:%.*]] : (!fir.ref) -> !fir.ref +!FIRDialect-DAG: [[ADDR9:%.*]] = fir.convert [[ADDR4]] : (!fir.ref>) -> !fir.ref> +!FIRDialect-DAG: [[ADDR10:%.*]] = fir.coordinate_of [[ADDR9]], %{{.*}} : (!fir.ref>, index) -> !fir.ref +!FIRDialect-DAG: [[ADDR11:%.*]] = fir.convert [[ADDR10:%.*]] : (!fir.ref) -> !fir.ref> +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR8]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.load [[ADDR5]] : !fir.ref +!FIRDialect-DAG: %{{.*}} = fir.embox [[ADDR11]](%{{.*}}) : (!fir.ref>, !fir.shape<1>) -> !fir.box> + print *, x1, y, z1 + !$omp end parallel + +end Index: mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp =================================================================== --- mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp +++ mlir/lib/Conversion/OpenMPToLLVM/OpenMPToLLVM.cpp @@ -52,7 +52,11 @@ LogicalResult matchAndRewrite(T curOp, typename T::Adaptor adaptor, ConversionPatternRewriter &rewriter) const override { - rewriter.replaceOpWithNewOp(curOp, TypeRange(), adaptor.getOperands(), + TypeConverter *converter = ConvertToLLVMPattern::getTypeConverter(); + SmallVector resTypes; + if (failed(converter->convertTypes(curOp->getResultTypes(), resTypes))) + return failure(); + rewriter.replaceOpWithNewOp(curOp, resTypes, adaptor.getOperands(), curOp->getAttrs()); return success(); } @@ -65,10 +69,10 @@ mlir::omp::MasterOp>( [&](Operation *op) { return typeConverter.isLegal(&op->getRegion(0)); }); target - .addDynamicallyLegalOp( - [&](Operation *op) { - return typeConverter.isLegal(op->getOperandTypes()); - }); + .addDynamicallyLegalOp([&](Operation *op) { + return typeConverter.isLegal(op->getOperandTypes()); + }); } void mlir::populateOpenMPToLLVMConversionPatterns(LLVMTypeConverter &converter, @@ -77,7 +81,8 @@ RegionOpConversion, RegionOpConversion, RegionLessOpConversion, - RegionLessOpConversion>(converter); + RegionLessOpConversion, + RegionLessOpConversion>(converter); } namespace {