diff --git a/flang/include/flang/Optimizer/CodeGen/CGPasses.td b/flang/include/flang/Optimizer/CodeGen/CGPasses.td --- a/flang/include/flang/Optimizer/CodeGen/CGPasses.td +++ b/flang/include/flang/Optimizer/CodeGen/CGPasses.td @@ -54,7 +54,10 @@ "Override module's target triple.">, Option<"noCharacterConversion", "no-character-conversion", "bool", /*default=*/"false", - "Disable target-specific conversion of CHARACTER."> + "Disable target-specific conversion of CHARACTER.">, + Option<"noComplexConversion", "no-complex-conversion", + "bool", /*default=*/"false", + "Disable target-specific conversion of COMPLEX."> ]; } diff --git a/flang/include/flang/Optimizer/CodeGen/CodeGen.h b/flang/include/flang/Optimizer/CodeGen/CodeGen.h --- a/flang/include/flang/Optimizer/CodeGen/CodeGen.h +++ b/flang/include/flang/Optimizer/CodeGen/CodeGen.h @@ -25,6 +25,7 @@ // FirTargetRewritePass options. struct TargetRewriteOptions { bool noCharacterConversion{}; + bool noComplexConversion{}; }; /// Prerequiste pass for code gen. Perform intermediate rewrites to tailor the diff --git a/flang/include/flang/Optimizer/Dialect/FIRTypes.td b/flang/include/flang/Optimizer/Dialect/FIRTypes.td --- a/flang/include/flang/Optimizer/Dialect/FIRTypes.td +++ b/flang/include/flang/Optimizer/Dialect/FIRTypes.td @@ -506,8 +506,11 @@ def AnyIntegerType : Type; // Composable types +// Note that we include both fir_ComplexType and AnyComplex, so we can use both +// the FIR ComplexType and the MLIR ComplexType (the former is used to represent +// Fortran complex and the latter for C++ std::complex). def AnyCompositeLike : TypeConstraint, "any composite">; diff --git a/flang/lib/Optimizer/CodeGen/Target.h b/flang/lib/Optimizer/CodeGen/Target.h --- a/flang/lib/Optimizer/CodeGen/Target.h +++ b/flang/lib/Optimizer/CodeGen/Target.h @@ -29,11 +29,20 @@ /// LLVMContext. class Attributes { public: - Attributes(bool append = false) : append{append} {} + Attributes(unsigned short alignment = 0, bool byval = false, + bool sret = false, bool append = false) + : alignment{alignment}, byval{byval}, sret{sret}, append{append} {} + unsigned getAlignment() const { return alignment; } + bool hasAlignment() const { return alignment != 0; } + bool isByVal() const { return byval; } + bool isSRet() const { return sret; } bool isAppend() const { return append; } private: + unsigned short alignment{}; + bool byval : 1; + bool sret : 1; bool append : 1; }; @@ -56,6 +65,15 @@ CodeGenSpecifics() = delete; virtual ~CodeGenSpecifics() {} + /// Type representation of a `complex` type argument when passed by + /// value. An argument value may need to be passed as a (safe) reference + /// argument. + virtual Marshalling complexArgumentType(mlir::Type eleTy) const = 0; + + /// Type representation of a `complex` type return value. Such a return + /// value may need to be converted to a hidden reference argument. + virtual Marshalling complexReturnType(mlir::Type eleTy) const = 0; + /// Type representation of a `boxchar` type argument when passed by value. /// An argument value may need to be passed as a (safe) reference argument. /// diff --git a/flang/lib/Optimizer/CodeGen/Target.cpp b/flang/lib/Optimizer/CodeGen/Target.cpp --- a/flang/lib/Optimizer/CodeGen/Target.cpp +++ b/flang/lib/Optimizer/CodeGen/Target.cpp @@ -20,6 +20,15 @@ using namespace fir; +// Reduce a REAL/float type to the floating point semantics. +static const llvm::fltSemantics &floatToSemantics(const KindMapping &kindMap, + mlir::Type type) { + assert(isa_real(type)); + if (auto ty = type.dyn_cast()) + return kindMap.getFloatSemantics(ty.getFKind()); + return type.cast().getFloatSemantics(); +} + namespace { template struct GenericTarget : public CodeGenSpecifics { @@ -35,7 +44,8 @@ // split format with all pointers first (in the declared position) and all // LEN arguments appended after all of the dummy arguments. // NB: Other conventions/ABIs can/should be supported via options. - marshal.emplace_back(idxTy, AT{/*append=*/!sret}); + marshal.emplace_back(idxTy, AT{/*alignment=*/0, /*byval=*/false, + /*sret=*/sret, /*append=*/!sret}); return marshal; } }; @@ -50,6 +60,39 @@ using GenericTarget::GenericTarget; static constexpr int defaultWidth = 32; + + CodeGenSpecifics::Marshalling + complexArgumentType(mlir::Type eleTy) const override { + assert(fir::isa_real(eleTy)); + CodeGenSpecifics::Marshalling marshal; + // { t, t } struct of 2 eleTy, byval, align 4 + mlir::TypeRange range = {eleTy, eleTy}; + auto structTy = mlir::TupleType::get(eleTy.getContext(), range); + marshal.emplace_back(fir::ReferenceType::get(structTy), + AT{/*alignment=*/4, /*byval=*/true}); + return marshal; + } + + CodeGenSpecifics::Marshalling + complexReturnType(mlir::Type eleTy) const override { + assert(fir::isa_real(eleTy)); + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle()) { + // i64 pack both floats in a 64-bit GPR + marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64), + AT{}); + } else if (sem == &llvm::APFloat::IEEEdouble()) { + // { t, t } struct of 2 eleTy, sret, align 4 + mlir::TypeRange range = {eleTy, eleTy}; + auto structTy = mlir::TupleType::get(eleTy.getContext(), range); + marshal.emplace_back(fir::ReferenceType::get(structTy), + AT{/*alignment=*/4, /*byval=*/false, /*sret=*/true}); + } else { + llvm::report_fatal_error("complex for this precision not implemented"); + } + return marshal; + } }; } // namespace @@ -62,6 +105,41 @@ using GenericTarget::GenericTarget; static constexpr int defaultWidth = 64; + + CodeGenSpecifics::Marshalling + complexArgumentType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle()) { + // <2 x t> vector of 2 eleTy + marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{}); + } else if (sem == &llvm::APFloat::IEEEdouble()) { + // two distinct double arguments + marshal.emplace_back(eleTy, AT{}); + marshal.emplace_back(eleTy, AT{}); + } else { + llvm::report_fatal_error("complex for this precision not implemented"); + } + return marshal; + } + + CodeGenSpecifics::Marshalling + complexReturnType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle()) { + // <2 x t> vector of 2 eleTy + marshal.emplace_back(fir::VectorType::get(2, eleTy), AT{}); + } else if (sem == &llvm::APFloat::IEEEdouble()) { + // ( t, t ) tuple of 2 eleTy + mlir::TypeRange range = {eleTy, eleTy}; + marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(), range), + AT{}); + } else { + llvm::report_fatal_error("complex for this precision not implemented"); + } + return marshal; + } }; } // namespace @@ -74,6 +152,36 @@ using GenericTarget::GenericTarget; static constexpr int defaultWidth = 64; + + CodeGenSpecifics::Marshalling + complexArgumentType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle() || + sem == &llvm::APFloat::IEEEdouble()) { + // [2 x t] array of 2 eleTy + marshal.emplace_back(fir::SequenceType::get({2}, eleTy), AT{}); + } else { + llvm::report_fatal_error("complex for this precision not implemented"); + } + return marshal; + } + + CodeGenSpecifics::Marshalling + complexReturnType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle() || + sem == &llvm::APFloat::IEEEdouble()) { + // ( t, t ) tuple of 2 eleTy + mlir::TypeRange range = {eleTy, eleTy}; + marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(), range), + AT{}); + } else { + llvm::report_fatal_error("complex for this precision not implemented"); + } + return marshal; + } }; } // namespace @@ -86,6 +194,24 @@ using GenericTarget::GenericTarget; static constexpr int defaultWidth = 64; + + CodeGenSpecifics::Marshalling + complexArgumentType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + // two distinct element type arguments (re, im) + marshal.emplace_back(eleTy, AT{}); + marshal.emplace_back(eleTy, AT{}); + return marshal; + } + + CodeGenSpecifics::Marshalling + complexReturnType(mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + // ( t, t ) tuple of 2 element type + mlir::TypeRange range = {eleTy, eleTy}; + marshal.emplace_back(mlir::TupleType::get(eleTy.getContext(), range), AT{}); + return marshal; + } }; } // namespace diff --git a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp --- a/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp +++ b/flang/lib/Optimizer/CodeGen/TargetRewrite.cpp @@ -35,7 +35,15 @@ /// Fixups for updating a FuncOp's arguments and return values. struct FixupTy { - enum class Codes { CharPair, Trailing }; + enum class Codes { + ArgumentAsLoad, + ArgumentType, + CharPair, + ReturnAsStore, + ReturnType, + Split, + Trailing + }; FixupTy(Codes code, std::size_t index, std::size_t second = 0) : code{code}, index{index}, second{second} {} @@ -60,6 +68,7 @@ public: TargetRewrite(const TargetRewriteOptions &options) { noCharacterConversion = options.noCharacterConversion; + noComplexConversion = options.noComplexConversion; } void runOnOperation() override final { @@ -99,6 +108,76 @@ mlir::ModuleOp getModule() { return getOperation(); } + template + std::function + rewriteCallComplexResultType(A ty, B &newResTys, B &newInTys, C &newOpers) { + auto m = specifics->complexReturnType(ty.getElementType()); + // Currently targets mandate COMPLEX is a single aggregate or packed + // scalar, including the sret case. + assert(m.size() == 1 && "target lowering of complex return not supported"); + auto resTy = std::get(m[0]); + auto attr = std::get(m[0]); + auto loc = mlir::UnknownLoc::get(resTy.getContext()); + if (attr.isSRet()) { + assert(isa_ref_type(resTy)); + mlir::Value stack = + rewriter->create(loc, dyn_cast_ptrEleTy(resTy)); + newInTys.push_back(resTy); + newOpers.push_back(stack); + return [=](mlir::Operation *) -> mlir::Value { + auto memTy = ReferenceType::get(ty); + auto cast = rewriter->create(loc, memTy, stack); + return rewriter->create(loc, cast); + }; + } + newResTys.push_back(resTy); + return [=](mlir::Operation *call) -> mlir::Value { + auto mem = rewriter->create(loc, resTy); + rewriter->create(loc, call->getResult(0), mem); + auto memTy = ReferenceType::get(ty); + auto cast = rewriter->create(loc, memTy, mem); + return rewriter->create(loc, cast); + }; + } + + template + void rewriteCallComplexInputType(A ty, mlir::Value oper, B &newInTys, + C &newOpers) { + auto m = specifics->complexArgumentType(ty.getElementType()); + auto *ctx = ty.getContext(); + auto loc = mlir::UnknownLoc::get(ctx); + if (m.size() == 1) { + // COMPLEX is a single aggregate + auto resTy = std::get(m[0]); + auto attr = std::get(m[0]); + auto oldRefTy = ReferenceType::get(ty); + if (attr.isByVal()) { + auto mem = rewriter->create(loc, ty); + rewriter->create(loc, oper, mem); + newOpers.push_back(rewriter->create(loc, resTy, mem)); + } else { + auto mem = rewriter->create(loc, resTy); + auto cast = rewriter->create(loc, oldRefTy, mem); + rewriter->create(loc, oper, cast); + newOpers.push_back(rewriter->create(loc, mem)); + } + newInTys.push_back(resTy); + } else { + assert(m.size() == 2); + // COMPLEX is split into 2 separate arguments + for (auto e : llvm::enumerate(m)) { + auto &tup = e.value(); + auto ty = std::get(tup); + auto index = e.index(); + auto idx = rewriter->getIntegerAttr(rewriter->getIndexType(), index); + auto val = rewriter->create( + loc, ty, oper, rewriter->getArrayAttr(idx)); + newInTys.push_back(ty); + newOpers.push_back(val); + } + } + } + // Convert fir.call and fir.dispatch Ops. template void convertCallOp(A callOp) { @@ -124,7 +203,16 @@ llvm::Optional> wrap; if (fnTy.getResults().size() == 1) { mlir::Type ty = fnTy.getResult(0); - newResTys.push_back(ty); + llvm::TypeSwitch(ty) + .template Case([&](fir::ComplexType cmplx) { + wrap = rewriteCallComplexResultType(cmplx, newResTys, newInTys, + newOpers); + }) + .template Case([&](mlir::ComplexType cmplx) { + wrap = rewriteCallComplexResultType(cmplx, newResTys, newInTys, + newOpers); + }) + .Default([&](mlir::Type ty) { newResTys.push_back(ty); }); } else if (fnTy.getResults().size() > 1) { TODO(loc, "multiple results not supported yet"); } @@ -169,6 +257,12 @@ } } }) + .template Case([&](fir::ComplexType cmplx) { + rewriteCallComplexInputType(cmplx, oper, newInTys, newOpers); + }) + .template Case([&](mlir::ComplexType cmplx) { + rewriteCallComplexInputType(cmplx, oper, newInTys, newOpers); + }) .Default([&](mlir::Type ty) { newInTys.push_back(ty); newOpers.push_back(oper); @@ -198,6 +292,33 @@ TODO(loc, "dispatch not implemented"); } } + + // Result type fixup for fir::ComplexType and mlir::ComplexType + template + void lowerComplexSignatureRes(A cmplx, B &newResTys, B &newInTys) { + if (noComplexConversion) { + newResTys.push_back(cmplx); + } else { + for (auto &tup : specifics->complexReturnType(cmplx.getElementType())) { + auto argTy = std::get(tup); + if (std::get(tup).isSRet()) + newInTys.push_back(argTy); + else + newResTys.push_back(argTy); + } + } + } + + // Argument type fixup for fir::ComplexType and mlir::ComplexType + template + void lowerComplexSignatureArg(A cmplx, B &newInTys) { + if (noComplexConversion) + newInTys.push_back(cmplx); + else + for (auto &tup : specifics->complexArgumentType(cmplx.getElementType())) + newInTys.push_back(std::get(tup)); + } + /// Convert the type signatures on all the functions present in the module. /// As the type signature is being changed, this must also update the /// function itself to use any new arguments, etc. @@ -215,12 +336,14 @@ assert(signature.isa()); auto func = signature.dyn_cast(); for (auto ty : func.getResults()) - if ((ty.isa() && !noCharacterConversion)) { + if ((ty.isa() && !noCharacterConversion) || + (isa_complex(ty) && !noComplexConversion)) { LLVM_DEBUG(llvm::dbgs() << "rewrite " << signature << " for target\n"); return false; } for (auto ty : func.getInputs()) - if ((ty.isa() && !noCharacterConversion)) { + if ((ty.isa() && !noCharacterConversion) || + (isa_complex(ty) && !noComplexConversion)) { LLVM_DEBUG(llvm::dbgs() << "rewrite " << signature << " for target\n"); return false; } @@ -239,7 +362,20 @@ // Convert return value(s) for (auto ty : funcTy.getResults()) - newResTys.push_back(ty); + llvm::TypeSwitch(ty) + .Case([&](fir::ComplexType cmplx) { + if (noComplexConversion) + newResTys.push_back(cmplx); + else + doComplexReturn(func, cmplx, newResTys, newInTys, fixups); + }) + .Case([&](mlir::ComplexType cmplx) { + if (noComplexConversion) + newResTys.push_back(cmplx); + else + doComplexReturn(func, cmplx, newResTys, newInTys, fixups); + }) + .Default([&](mlir::Type ty) { newResTys.push_back(ty); }); // Convert arguments llvm::SmallVector trailingTys; @@ -276,6 +412,18 @@ } } }) + .Case([&](fir::ComplexType cmplx) { + if (noComplexConversion) + newInTys.push_back(cmplx); + else + doComplexArg(func, cmplx, newInTys, fixups); + }) + .Case([&](mlir::ComplexType cmplx) { + if (noComplexConversion) + newInTys.push_back(cmplx); + else + doComplexArg(func, cmplx, newInTys, fixups); + }) .Default([&](mlir::Type ty) { newInTys.push_back(ty); }); } @@ -289,6 +437,37 @@ for (std::remove_const_t i = 0; i < fixupSize; ++i) { const auto &fixup = fixups[i]; switch (fixup.code) { + case FixupTy::Codes::ArgumentAsLoad: { + // Argument was pass-by-value, but is now pass-by-reference and + // possibly with a different element type. + auto newArg = + func.front().insertArgument(fixup.index, newInTys[fixup.index]); + rewriter->setInsertionPointToStart(&func.front()); + auto oldArgTy = ReferenceType::get(oldArgTys[fixup.index - offset]); + auto cast = rewriter->create(loc, oldArgTy, newArg); + auto load = rewriter->create(loc, cast); + func.getArgument(fixup.index + 1).replaceAllUsesWith(load); + func.front().eraseArgument(fixup.index + 1); + } break; + case FixupTy::Codes::ArgumentType: { + // Argument is pass-by-value, but its type has likely been modified to + // suit the target ABI convention. + auto newArg = + func.front().insertArgument(fixup.index, newInTys[fixup.index]); + rewriter->setInsertionPointToStart(&func.front()); + auto mem = + rewriter->create(loc, newInTys[fixup.index]); + rewriter->create(loc, newArg, mem); + auto oldArgTy = ReferenceType::get(oldArgTys[fixup.index - offset]); + auto cast = rewriter->create(loc, oldArgTy, mem); + mlir::Value load = rewriter->create(loc, cast); + func.getArgument(fixup.index + 1).replaceAllUsesWith(load); + func.front().eraseArgument(fixup.index + 1); + LLVM_DEBUG(llvm::dbgs() + << "old argument: " << oldArgTy.getEleTy() + << ", repl: " << load << ", new argument: " + << func.getArgument(fixup.index).getType() << '\n'); + } break; case FixupTy::Codes::CharPair: { // The FIR boxchar argument has been split into a pair of distinct // arguments that are in juxtaposition to each other. @@ -304,6 +483,59 @@ offset++; } } break; + case FixupTy::Codes::ReturnAsStore: { + // The value being returned is now being returned in memory (callee + // stack space) through a hidden reference argument. + auto newArg = + func.front().insertArgument(fixup.index, newInTys[fixup.index]); + offset++; + func.walk([&](mlir::ReturnOp ret) { + rewriter->setInsertionPoint(ret); + auto oldOper = ret.getOperand(0); + auto oldOperTy = ReferenceType::get(oldOper.getType()); + auto cast = rewriter->create(loc, oldOperTy, newArg); + rewriter->create(loc, oldOper, cast); + rewriter->create(loc); + ret.erase(); + }); + } break; + case FixupTy::Codes::ReturnType: { + // The function is still returning a value, but its type has likely + // changed to suit the target ABI convention. + func.walk([&](mlir::ReturnOp ret) { + rewriter->setInsertionPoint(ret); + auto oldOper = ret.getOperand(0); + auto oldOperTy = ReferenceType::get(oldOper.getType()); + auto mem = + rewriter->create(loc, newResTys[fixup.index]); + auto cast = rewriter->create(loc, oldOperTy, mem); + rewriter->create(loc, oldOper, cast); + mlir::Value load = rewriter->create(loc, mem); + rewriter->create(loc, load); + ret.erase(); + }); + } break; + case FixupTy::Codes::Split: { + // The FIR argument has been split into a pair of distinct arguments + // that are in juxtaposition to each other. (For COMPLEX value.) + auto newArg = + func.front().insertArgument(fixup.index, newInTys[fixup.index]); + if (fixup.second == 1) { + rewriter->setInsertionPointToStart(&func.front()); + auto cplxTy = oldArgTys[fixup.index - offset - fixup.second]; + auto undef = rewriter->create(loc, cplxTy); + auto zero = rewriter->getIntegerAttr(rewriter->getIndexType(), 0); + auto one = rewriter->getIntegerAttr(rewriter->getIndexType(), 1); + auto cplx1 = rewriter->create( + loc, cplxTy, undef, func.front().getArgument(fixup.index - 1), + rewriter->getArrayAttr(zero)); + auto cplx = rewriter->create( + loc, cplxTy, cplx1, newArg, rewriter->getArrayAttr(one)); + func.getArgument(fixup.index + 1).replaceAllUsesWith(cplx); + func.front().eraseArgument(fixup.index + 1); + offset++; + } + } break; case FixupTy::Codes::Trailing: { // The FIR argument has been split into a pair of distinct arguments. // The first part of the pair appears in the original argument @@ -341,6 +573,81 @@ return false; } + /// Convert a complex return value. This can involve converting the return + /// value to a "hidden" first argument or packing the complex into a wide + /// GPR. + template + void doComplexReturn(mlir::FuncOp func, A cmplx, B &newResTys, B &newInTys, + C &fixups) { + if (noComplexConversion) { + newResTys.push_back(cmplx); + return; + } + auto m = specifics->complexReturnType(cmplx.getElementType()); + assert(m.size() == 1); + auto &tup = m[0]; + auto attr = std::get(tup); + auto argTy = std::get(tup); + if (attr.isSRet()) { + unsigned argNo = newInTys.size(); + fixups.emplace_back( + FixupTy::Codes::ReturnAsStore, argNo, [=](mlir::FuncOp func) { + func.setArgAttr(argNo, "llvm.sret", rewriter->getUnitAttr()); + }); + newInTys.push_back(argTy); + return; + } + fixups.emplace_back(FixupTy::Codes::ReturnType, newResTys.size()); + newResTys.push_back(argTy); + } + + /// Convert a complex argument value. This can involve storing the value to + /// a temporary memory location or factoring the value into two distinct + /// arguments. + template + void doComplexArg(mlir::FuncOp func, A cmplx, B &newInTys, C &fixups) { + if (noComplexConversion) { + newInTys.push_back(cmplx); + return; + } + auto m = specifics->complexArgumentType(cmplx.getElementType()); + const auto fixupCode = + m.size() > 1 ? FixupTy::Codes::Split : FixupTy::Codes::ArgumentType; + for (auto e : llvm::enumerate(m)) { + auto &tup = e.value(); + auto index = e.index(); + auto attr = std::get(tup); + auto argTy = std::get(tup); + auto argNo = newInTys.size(); + if (attr.isByVal()) { + if (auto align = attr.getAlignment()) + fixups.emplace_back( + FixupTy::Codes::ArgumentAsLoad, argNo, [=](mlir::FuncOp func) { + func.setArgAttr(argNo, "llvm.byval", rewriter->getUnitAttr()); + func.setArgAttr(argNo, "llvm.align", + rewriter->getIntegerAttr( + rewriter->getIntegerType(32), align)); + }); + else + fixups.emplace_back(FixupTy::Codes::ArgumentAsLoad, newInTys.size(), + [=](mlir::FuncOp func) { + func.setArgAttr(argNo, "llvm.byval", + rewriter->getUnitAttr()); + }); + } else { + if (auto align = attr.getAlignment()) + fixups.emplace_back(fixupCode, argNo, index, [=](mlir::FuncOp func) { + func.setArgAttr( + argNo, "llvm.align", + rewriter->getIntegerAttr(rewriter->getIntegerType(32), align)); + }); + else + fixups.emplace_back(fixupCode, argNo, index); + } + newInTys.push_back(argTy); + } + } + private: // Replace `op` and remove it. void replaceOp(mlir::Operation *op, mlir::ValueRange newValues) { diff --git a/flang/test/Fir/target.fir b/flang/test/Fir/target-rewrite-boxchar.fir rename from flang/test/Fir/target.fir rename to flang/test/Fir/target-rewrite-boxchar.fir diff --git a/flang/test/Fir/target-rewrite-complex.fir b/flang/test/Fir/target-rewrite-complex.fir new file mode 100644 --- /dev/null +++ b/flang/test/Fir/target-rewrite-complex.fir @@ -0,0 +1,454 @@ +// RUN: fir-opt --target-rewrite="target=i386-unknown-linux-gnu" %s | FileCheck %s --check-prefix=I32 +// RUN: fir-opt --target-rewrite="target=x86_64-unknown-linux-gnu" %s | FileCheck %s --check-prefix=X64 +// RUN: fir-opt --target-rewrite="target=aarch64-unknown-linux-gnu" %s | FileCheck %s --check-prefix=AARCH64 +// RUN: fir-opt --target-rewrite="target=powerpc64le-unknown-linux-gnu" %s | FileCheck %s --check-prefix=PPC + +// Test that we rewrite the signature and body of a function that returns a +// complex<4>. +// I32-LABEL: func @returncomplex4() -> i64 +// X64-LABEL: func @returncomplex4() -> !fir.vector<2:!fir.real<4>> +// AARCH64-LABEL: func @returncomplex4() -> tuple, !fir.real<4>> +// PPC-LABEL: func @returncomplex4() -> tuple, !fir.real<4>> +func @returncomplex4() -> !fir.complex<4> { + // I32: fir.insert_value + // I32: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value + // X64: fir.insert_value + // X64: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value + // AARCH64: fir.insert_value + // AARCH64: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value + // PPC: fir.insert_value + // PPC: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value + %1 = fir.undefined !fir.complex<4> + %2 = arith.constant 2.0 : f32 + %3 = fir.convert %2 : (f32) -> !fir.real<4> + %c0 = arith.constant 0 : i32 + %4 = fir.insert_value %1, %3, [0 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + %c1 = arith.constant 1 : i32 + %5 = arith.constant -42.0 : f32 + %6 = fir.insert_value %4, %5, [1 : index] : (!fir.complex<4>, f32) -> !fir.complex<4> + + // I32: [[ADDRI64:%[0-9A-Za-z]+]] = fir.alloca i64 + // I32: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRI64]] : (!fir.ref) -> !fir.ref> + // I32: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // I32: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRI64]] : !fir.ref + // I32: return [[RES]] : i64 + // X64: [[ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRV]] : (!fir.ref>>) -> !fir.ref> + // X64: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // X64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRV]] : !fir.ref>> + // X64: return [[RES]] : !fir.vector<2:!fir.real<4>> + // AARCH64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<4>> + // AARCH64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // AARCH64: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // AARCH64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT]] : !fir.ref, !fir.real<4>>> + // AARCH64: return [[RES]] : tuple, !fir.real<4>> + // PPC: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<4>> + // PPC: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // PPC: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // PPC: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT]] : !fir.ref, !fir.real<4>>> + // PPC: return [[RES]] : tuple, !fir.real<4>> + return %6 : !fir.complex<4> +} + +// Test that we rewrite the signature and body of a function that returns a +// complex<8>. +// I32-LABEL:func @returncomplex8 +// I32-SAME: ([[ARG0:%[0-9A-Za-z]+]]: !fir.ref, !fir.real<8>>> {llvm.sret}) +// X64-LABEL: func @returncomplex8() -> tuple, !fir.real<8>> +// AARCH64-LABEL: func @returncomplex8() -> tuple, !fir.real<8>> +// PPC-LABEL: func @returncomplex8() -> tuple, !fir.real<8>> +func @returncomplex8() -> !fir.complex<8> { + // I32: fir.insert_value + // I32: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value {{.*}} + // X64: fir.insert_value + // X64: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value {{.*}} + // AARCH64: fir.insert_value + // AARCH64: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value {{.*}} + // PPC: fir.insert_value + // PPC: [[VAL:%[0-9A-Za-z]+]] = fir.insert_value {{.*}} + %1 = fir.undefined !fir.complex<8> + %2 = arith.constant 1.0 : f64 + %3 = arith.constant -4.0 : f64 + %c0 = arith.constant 0 : i32 + %4 = fir.insert_value %1, %3, [0 : index] : (!fir.complex<8>, f64) -> !fir.complex<8> + %c1 = arith.constant 1 : i32 + %5 = fir.insert_value %4, %2, [1 : index] : (!fir.complex<8>, f64) -> !fir.complex<8> + + // I32: [[ADDR:%[0-9A-Za-z]+]] = fir.convert [[ARG0]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // I32: fir.store [[VAL]] to [[ADDR]] : !fir.ref> + // I32: return{{ *$}} + // X64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // X64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // X64: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // X64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT]] : !fir.ref, !fir.real<8>>> + // X64: return [[RES]] : tuple, !fir.real<8>> + // AARCH64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // AARCH64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // AARCH64: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // AARCH64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT]] : !fir.ref, !fir.real<8>>> + // AARCH64: return [[RES]] : tuple, !fir.real<8>> + // PPC: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // PPC: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // PPC: fir.store [[VAL]] to [[ADDRC]] : !fir.ref> + // PPC: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT]] : !fir.ref, !fir.real<8>>> + // PPC: return [[RES]] : tuple, !fir.real<8>> + return %5 : !fir.complex<8> +} + +// Test that we rewrite the signature of a function that accepts a complex<4>. +// I32-LABEL: func private @paramcomplex4(!fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}) +// X64-LABEL: func private @paramcomplex4(!fir.vector<2:!fir.real<4>>) +// AARCH64-LABEL: func private @paramcomplex4(!fir.array<2x!fir.real<4>>) +// PPC-LABEL: func private @paramcomplex4(!fir.real<4>, !fir.real<4>) +func private @paramcomplex4(!fir.complex<4>) -> () + +// Test that we rewrite calls to functions that return or accept complex<4>. +// I32-LABEL: func @callcomplex4() +// X64-LABEL: func @callcomplex4() +// AARCH64-LABEL: func @callcomplex4() +func @callcomplex4() { + + // I32: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex4() : () -> i64 + // X64: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex4() : () -> !fir.vector<2:!fir.real<4>> + // AARCH64: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex4() : () -> tuple, !fir.real<4>> + // PPC: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex4() : () -> tuple, !fir.real<4>> + %1 = fir.call @returncomplex4() : () -> !fir.complex<4> + + // I32: [[ADDRI64:%[0-9A-Za-z]+]] = fir.alloca i64 + // I32: fir.store [[RES]] to [[ADDRI64]] : !fir.ref + // I32: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRI64]] : (!fir.ref) -> !fir.ref> + // I32: [[C:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // I32: [[ADDRC2:%[0-9A-Za-z]+]] = fir.alloca !fir.complex<4> + // I32: fir.store [[C]] to [[ADDRC2]] : !fir.ref> + // I32: [[T:%[0-9A-Za-z]+]] = fir.convert [[ADDRC2]] : (!fir.ref>) -> !fir.ref, !fir.real<4>>> + // I32: fir.call @paramcomplex4([[T]]) : (!fir.ref, !fir.real<4>>>) -> () + + // X64: [[ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64: fir.store [[RES]] to [[ADDRV]] : !fir.ref>> + // X64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRV]] : (!fir.ref>>) -> !fir.ref> + // X64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // X64: [[ADDRV2:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64: [[ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[ADDRV2]] : (!fir.ref>>) -> !fir.ref> + // X64: fir.store [[V]] to [[ADDRC2]] : !fir.ref> + // X64: [[VRELOADED:%[0-9A-Za-z]+]] = fir.load [[ADDRV2]] : !fir.ref>> + // X64: fir.call @paramcomplex4([[VRELOADED]]) : (!fir.vector<2:!fir.real<4>>) -> () + + // AARCH64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<4>> + // AARCH64: fir.store [[RES]] to [[ADDRT]] : !fir.ref, !fir.real<4>>> + // AARCH64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // AARCH64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // AARCH64: [[ADDRARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64: [[ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[ADDRARR]] : (!fir.ref>>) -> !fir.ref> + // AARCH64: fir.store [[V]] to [[ADDRC2]] : !fir.ref> + // AARCH64: [[ARR:%[0-9A-Za-z]+]] = fir.load [[ADDRARR]] : !fir.ref>> + // AARCH64: fir.call @paramcomplex4([[ARR]]) : (!fir.array<2x!fir.real<4>>) -> () + + // PPC: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<4>> + // PPC: fir.store [[RES]] to [[ADDRT]] : !fir.ref, !fir.real<4>>> + // PPC: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // PPC: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // PPC: [[A:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [0 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC: [[B:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [1 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC: fir.call @paramcomplex4([[A]], [[B]]) : (!fir.real<4>, !fir.real<4>) -> () + fir.call @paramcomplex4(%1) : (!fir.complex<4>) -> () + return +} + +// Test that we rewrite the signature of a function that accepts a complex<8>. +// I32-LABEL: func private @paramcomplex8(!fir.ref, !fir.real<8>>> {llvm.align = 4 : i32, llvm.byval}) +// X64-LABEL: func private @paramcomplex8(!fir.real<8>, !fir.real<8>) +// AARCH64-LABEL: func private @paramcomplex8(!fir.array<2x!fir.real<8>>) +// PPC-LABEL: func private @paramcomplex8(!fir.real<8>, !fir.real<8>) +func private @paramcomplex8(!fir.complex<8>) -> () + +// Test that we rewrite calls to functions that return or accept complex<8>. +// I32-LABEL: func @callcomplex8() +// X64-LABEL: func @callcomplex8() +// AARCH64-LABEL: func @callcomplex8() +// PPC-LABEL: func @callcomplex8() +func @callcomplex8() { + // I32: [[RES:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // I32: fir.call @returncomplex8([[RES]]) : (!fir.ref, !fir.real<8>>>) -> () + // X64: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex8() : () -> tuple, !fir.real<8>> + // AARCH64: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex8() : () -> tuple, !fir.real<8>> + // PPC: [[RES:%[0-9A-Za-z]+]] = fir.call @returncomplex8() : () -> tuple, !fir.real<8>> + %1 = fir.call @returncomplex8() : () -> !fir.complex<8> + + // I32: [[RESC:%[0-9A-Za-z]+]] = fir.convert [[RES]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // I32: [[V:%[0-9A-Za-z]+]] = fir.load [[RESC]] : !fir.ref> + // I32: [[ADDRC:%[0-9A-Za-z]+]] = fir.alloca !fir.complex<8> + // I32: fir.store [[V]] to [[ADDRC]] : !fir.ref> + // I32: [[ADDRT:%[0-9A-Za-z]+]] = fir.convert [[ADDRC]] : (!fir.ref>) -> !fir.ref, !fir.real<8>>> + // I32: fir.call @paramcomplex8([[ADDRT]]) : (!fir.ref, !fir.real<8>>>) -> () + + // X64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // X64: fir.store [[RES]] to [[ADDRT]] : !fir.ref, !fir.real<8>>> + // X64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // X64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // X64: [[A:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [0 : index] : (!fir.complex<8>) -> !fir.real<8> + // X64: [[B:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [1 : index] : (!fir.complex<8>) -> !fir.real<8> + // X64: fir.call @paramcomplex8([[A]], [[B]]) : (!fir.real<8>, !fir.real<8>) -> () + + // AARCH64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // AARCH64: fir.store [[RES]] to [[ADDRT]] : !fir.ref, !fir.real<8>>> + // AARCH64: [[ADDRV:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // AARCH64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRV]] : !fir.ref> + // AARCH64: [[ADDRARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<8>> + // AARCH64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRARR]] : (!fir.ref>>) -> !fir.ref> + // AARCH64: fir.store [[V]] to [[ADDRC]] : !fir.ref> + // AARCH64: [[ARR:%[0-9A-Za-z]+]] = fir.load [[ADDRARR]] : !fir.ref>> + // AARCH64: fir.call @paramcomplex8([[ARR]]) : (!fir.array<2x!fir.real<8>>) -> () + + // PPC: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple, !fir.real<8>> + // PPC: fir.store [[RES]] to [[ADDRT]] : !fir.ref, !fir.real<8>>> + // PPC: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref, !fir.real<8>>>) -> !fir.ref> + // PPC: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // PPC: [[A:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [0 : index] : (!fir.complex<8>) -> !fir.real<8> + // PPC: [[B:%[0-9A-Za-z]+]] = fir.extract_value [[V]], [1 : index] : (!fir.complex<8>) -> !fir.real<8> + // PPC: fir.call @paramcomplex8([[A]], [[B]]) : (!fir.real<8>, !fir.real<8>) -> () + fir.call @paramcomplex8(%1) : (!fir.complex<8>) -> () + return +} + +// Test multiple complex<4> parameters and arguments +// I32-LABEL: func private @calleemultipleparamscomplex4(!fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}, !fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}, !fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}) +// X64-LABEL: func private @calleemultipleparamscomplex4(!fir.vector<2:!fir.real<4>>, !fir.vector<2:!fir.real<4>>, !fir.vector<2:!fir.real<4>>) +// AARCH64-LABEL: func private @calleemultipleparamscomplex4(!fir.array<2x!fir.real<4>>, !fir.array<2x!fir.real<4>>, !fir.array<2x!fir.real<4>>) +// PPC-LABEL: func private @calleemultipleparamscomplex4(!fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>) +func private @calleemultipleparamscomplex4(!fir.complex<4>, !fir.complex<4>, !fir.complex<4>) -> () + +// I32-LABEL: func @multipleparamscomplex4 +// I32-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}, [[Z2:%[0-9A-Za-z]+]]: !fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}, [[Z3:%[0-9A-Za-z]+]]: !fir.ref, !fir.real<4>>> {llvm.align = 4 : i32, llvm.byval}) +// X64-LABEL: func @multipleparamscomplex4 +// X64-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.vector<2:!fir.real<4>>, [[Z2:%[0-9A-Za-z]+]]: !fir.vector<2:!fir.real<4>>, [[Z3:%[0-9A-Za-z]+]]: !fir.vector<2:!fir.real<4>>) +// AARCH64-LABEL: func @multipleparamscomplex4 +// AARCH64-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.array<2x!fir.real<4>>, [[Z2:%[0-9A-Za-z]+]]: !fir.array<2x!fir.real<4>>, [[Z3:%[0-9A-Za-z]+]]: !fir.array<2x!fir.real<4>>) +// PPC-LABEL: func @multipleparamscomplex4 +// PPC-SAME: ([[A1:%[0-9A-Za-z]+]]: !fir.real<4>, [[B1:%[0-9A-Za-z]+]]: !fir.real<4>, [[A2:%[0-9A-Za-z]+]]: !fir.real<4>, [[B2:%[0-9A-Za-z]+]]: !fir.real<4>, [[A3:%[0-9A-Za-z]+]]: !fir.real<4>, [[B3:%[0-9A-Za-z]+]]: !fir.real<4>) +func @multipleparamscomplex4(%z1 : !fir.complex<4>, %z2 : !fir.complex<4>, %z3 : !fir.complex<4>) { + // I32-DAG: [[Z1_ADDR:%[0-9A-Za-z]+]] = fir.convert [[Z1]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // I32-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDR]] : !fir.ref> + // I32-DAG: [[Z2_ADDR:%[0-9A-Za-z]+]] = fir.convert [[Z2]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // I32-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDR]] : !fir.ref> + // I32-DAG: [[Z3_ADDR:%[0-9A-Za-z]+]] = fir.convert [[Z3]] : (!fir.ref, !fir.real<4>>>) -> !fir.ref> + // I32-DAG: [[Z3_VAL:%[0-9A-Za-z]+]] = fir.load [[Z3_ADDR]] : !fir.ref> + + // I32-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.alloca !fir.complex<4> + // I32-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC]] : !fir.ref> + // I32-DAG: [[Z1_ADDRT:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDRC]] : (!fir.ref>) -> !fir.ref, !fir.real<4>>> + // I32-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.alloca !fir.complex<4> + // I32-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC]] : !fir.ref> + // I32-DAG: [[Z2_ADDRT:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDRC]] : (!fir.ref>) -> !fir.ref, !fir.real<4>>> + // I32-DAG: [[Z3_ADDRC:%[0-9A-Za-z]+]] = fir.alloca !fir.complex<4> + // I32-DAG: fir.store [[Z3_VAL]] to [[Z3_ADDRC]] : !fir.ref> + // I32-DAG: [[Z3_ADDRT:%[0-9A-Za-z]+]] = fir.convert [[Z3_ADDRC]] : (!fir.ref>) -> !fir.ref, !fir.real<4>>> + + // I32: fir.call @calleemultipleparamscomplex4([[Z1_ADDRT]], [[Z2_ADDRT]], [[Z3_ADDRT]]) : (!fir.ref, !fir.real<4>>>, !fir.ref, !fir.real<4>>>, !fir.ref, !fir.real<4>>>) -> () + + // X64-DAG: [[Z3_ADDR:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: fir.store [[Z3]] to [[Z3_ADDR]] : !fir.ref>> + // X64-DAG: [[Z3_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z3_ADDR]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: [[Z3_VAL:%[0-9A-Za-z]+]] = fir.load [[Z3_ADDRC]] : !fir.ref> + // X64-DAG: [[Z2_ADDR:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: fir.store [[Z2]] to [[Z2_ADDR]] : !fir.ref>> + // X64-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDR]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRC]] : !fir.ref> + // X64-DAG: [[Z1_ADDR:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: fir.store [[Z1]] to [[Z1_ADDR]] : !fir.ref>> + // X64-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDR]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRC]] : !fir.ref> + + // X64-DAG: [[Z1_ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: [[Z1_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDRV]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC2]] : !fir.ref> + // X64-DAG: [[Z1_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRV]] : !fir.ref>> + // X64-DAG: [[Z2_ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: [[Z2_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDRV]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC2]] : !fir.ref> + // X64-DAG: [[Z2_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRV]] : !fir.ref>> + // X64-DAG: [[Z3_ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:!fir.real<4>> + // X64-DAG: [[Z3_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z3_ADDRV]] : (!fir.ref>>) -> !fir.ref> + // X64-DAG: fir.store [[Z3_VAL]] to [[Z3_ADDRC2]] : !fir.ref> + // X64-DAG: [[Z3_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z3_ADDRV]] : !fir.ref>> + + // X64: fir.call @calleemultipleparamscomplex4([[Z1_RELOADED]], [[Z2_RELOADED]], [[Z3_RELOADED]]) : (!fir.vector<2:!fir.real<4>>, !fir.vector<2:!fir.real<4>>, !fir.vector<2:!fir.real<4>>) -> () + + // AARCH64-DAG: [[Z3_ARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: fir.store [[Z3]] to [[Z3_ARR]] : !fir.ref>> + // AARCH64-DAG: [[Z3_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z3_ARR]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: [[Z3_VAL:%[0-9A-Za-z]+]] = fir.load [[Z3_ADDRC]] : !fir.ref> + // AARCH64-DAG: [[Z2_ARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: fir.store [[Z2]] to [[Z2_ARR]] : !fir.ref>> + // AARCH64-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z2_ARR]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRC]] : !fir.ref> + // AARCH64-DAG: [[Z1_ARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: fir.store [[Z1]] to [[Z1_ARR]] : !fir.ref>> + // AARCH64-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z1_ARR]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRC]] : !fir.ref> + + // AARCH64-DAG: [[Z1_ARR2:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: [[Z1_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z1_ARR2]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC2]] : !fir.ref> + // AARCH64-DAG: [[Z1_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z1_ARR2]] : !fir.ref>> + // AARCH64-DAG: [[Z2_ARR2:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: [[Z2_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z2_ARR2]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC2]] : !fir.ref> + // AARCH64-DAG: [[Z2_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z2_ARR2]] : !fir.ref>> + // AARCH64-DAG: [[Z3_ARR2:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2x!fir.real<4>> + // AARCH64-DAG: [[Z3_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z3_ARR2]] : (!fir.ref>>) -> !fir.ref> + // AARCH64-DAG: fir.store [[Z3_VAL]] to [[Z3_ADDRC2]] : !fir.ref> + // AARCH64-DAG: [[Z3_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z3_ARR2]] : !fir.ref>> + + // AARCH64: fir.call @calleemultipleparamscomplex4([[Z1_RELOADED]], [[Z2_RELOADED]], [[Z3_RELOADED]]) : (!fir.array<2x!fir.real<4>>, !fir.array<2x!fir.real<4>>, !fir.array<2x!fir.real<4>>) -> () + + // PPC-DAG: [[Z3_EMPTY:%[0-9A-Za-z]+]] = fir.undefined !fir.complex<4> + // PPC-DAG: [[Z3_PARTIAL:%[0-9A-Za-z]+]] = fir.insert_value [[Z3_EMPTY]], [[A3]], [0 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + // PPC-DAG: [[Z3:%[0-9A-Za-z]+]] = fir.insert_value [[Z3_PARTIAL]], [[B3]], [1 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + // PPC-DAG: [[Z2_EMPTY:%[0-9A-Za-z]+]] = fir.undefined !fir.complex<4> + // PPC-DAG: [[Z2_PARTIAL:%[0-9A-Za-z]+]] = fir.insert_value [[Z2_EMPTY]], [[A2]], [0 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + // PPC-DAG: [[Z2:%[0-9A-Za-z]+]] = fir.insert_value [[Z2_PARTIAL]], [[B2]], [1 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + // PPC-DAG: [[Z1_EMPTY:%[0-9A-Za-z]+]] = fir.undefined !fir.complex<4> + // PPC-DAG: [[Z1_PARTIAL:%[0-9A-Za-z]+]] = fir.insert_value [[Z1_EMPTY]], [[A1]], [0 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + // PPC-DAG: [[Z1:%[0-9A-Za-z]+]] = fir.insert_value [[Z1_PARTIAL]], [[B1]], [1 : index] : (!fir.complex<4>, !fir.real<4>) -> !fir.complex<4> + + // PPC-DAG: [[A1_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z1]], [0 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC-DAG: [[B1_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z1]], [1 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC-DAG: [[A2_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z2]], [0 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC-DAG: [[B2_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z2]], [1 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC-DAG: [[A3_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z3]], [0 : index] : (!fir.complex<4>) -> !fir.real<4> + // PPC-DAG: [[B3_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z3]], [1 : index] : (!fir.complex<4>) -> !fir.real<4> + + // PPC: fir.call @calleemultipleparamscomplex4([[A1_EXTR]], [[B1_EXTR]], [[A2_EXTR]], [[B2_EXTR]], [[A3_EXTR]], [[B3_EXTR]]) : (!fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>, !fir.real<4>) -> () + + fir.call @calleemultipleparamscomplex4(%z1, %z2, %z3) : (!fir.complex<4>, !fir.complex<4>, !fir.complex<4>) -> () + return +} + +// Test that we rewrite the signature of and calls to a function that accepts +// and returns MLIR complex. + +// I32-LABEL: func private @mlircomplexf32 +// I32-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.ref> {llvm.align = 4 : i32, llvm.byval}, [[Z2:%[0-9A-Za-z]+]]: !fir.ref> {llvm.align = 4 : i32, llvm.byval}) +// I32-SAME: -> i64 +// X64-LABEL: func private @mlircomplexf32 +// X64-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.vector<2:f32>, [[Z2:%[0-9A-Za-z]+]]: !fir.vector<2:f32>) +// X64-SAME: -> !fir.vector<2:f32> +// AARCH64-LABEL: func private @mlircomplexf32 +// AARCH64-SAME: ([[Z1:%[0-9A-Za-z]+]]: !fir.array<2xf32>, [[Z2:%[0-9A-Za-z]+]]: !fir.array<2xf32>) +// AARCH64-SAME: -> tuple +// PPC-LABEL: func private @mlircomplexf32 +// PPC-SAME: ([[A1:%[0-9A-Za-z]+]]: f32, [[B1:%[0-9A-Za-z]+]]: f32, [[A2:%[0-9A-Za-z]+]]: f32, [[B2:%[0-9A-Za-z]+]]: f32) +// PPC-SAME: -> tuple +func private @mlircomplexf32(%z1: complex, %z2: complex) -> complex { + + // I32-DAG: [[Z1_ADDR:%[0-9A-Za-z]+]] = fir.convert [[Z1]] : (!fir.ref>) -> !fir.ref> + // I32-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDR]] : !fir.ref> + // I32-DAG: [[Z2_ADDR:%[0-9A-Za-z]+]] = fir.convert [[Z2]] : (!fir.ref>) -> !fir.ref> + // I32-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDR]] : !fir.ref> + + // I32-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.alloca complex + // I32-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC]] : !fir.ref> + // I32-DAG: [[Z1_ADDRT:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDRC]] : (!fir.ref>) -> !fir.ref> + // I32-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.alloca complex + // I32-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC]] : !fir.ref> + // I32-DAG: [[Z2_ADDRT:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDRC]] : (!fir.ref>) -> !fir.ref> + + // I32: [[VAL:%[0-9A-Za-z]+]] = fir.call @mlircomplexf32([[Z1_ADDRT]], [[Z2_ADDRT]]) : (!fir.ref>, !fir.ref>) -> i64 + + // X64-DAG: [[Z2_ADDR:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64-DAG: fir.store [[Z2]] to [[Z2_ADDR]] : !fir.ref> + // X64-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDR]] : (!fir.ref>) -> !fir.ref> + // X64-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRC]] : !fir.ref> + // X64-DAG: [[Z1_ADDR:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64-DAG: fir.store [[Z1]] to [[Z1_ADDR]] : !fir.ref> + // X64-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDR]] : (!fir.ref>) -> !fir.ref> + // X64-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRC]] : !fir.ref> + + // X64-DAG: [[Z1_ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64-DAG: [[Z1_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z1_ADDRV]] : (!fir.ref>) -> !fir.ref> + // X64-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC2]] : !fir.ref> + // X64-DAG: [[Z1_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRV]] : !fir.ref> + // X64-DAG: [[Z2_ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64-DAG: [[Z2_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z2_ADDRV]] : (!fir.ref>) -> !fir.ref> + // X64-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC2]] : !fir.ref> + // X64-DAG: [[Z2_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRV]] : !fir.ref> + + // X64: [[VAL:%[0-9A-Za-z]+]] = fir.call @mlircomplexf32([[Z1_RELOADED]], [[Z2_RELOADED]]) : (!fir.vector<2:f32>, !fir.vector<2:f32>) -> !fir.vector<2:f32> + + // AARCH64-DAG: [[Z2_ARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2xf32> + // AARCH64-DAG: fir.store [[Z2]] to [[Z2_ARR]] : !fir.ref> + // AARCH64-DAG: [[Z2_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z2_ARR]] : (!fir.ref>) -> !fir.ref> + // AARCH64-DAG: [[Z2_VAL:%[0-9A-Za-z]+]] = fir.load [[Z2_ADDRC]] : !fir.ref> + // AARCH64-DAG: [[Z1_ARR:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2xf32> + // AARCH64-DAG: fir.store [[Z1]] to [[Z1_ARR]] : !fir.ref> + // AARCH64-DAG: [[Z1_ADDRC:%[0-9A-Za-z]+]] = fir.convert [[Z1_ARR]] : (!fir.ref>) -> !fir.ref> + // AARCH64-DAG: [[Z1_VAL:%[0-9A-Za-z]+]] = fir.load [[Z1_ADDRC]] : !fir.ref> + + // AARCH64-DAG: [[Z1_ARR2:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2xf32> + // AARCH64-DAG: [[Z1_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z1_ARR2]] : (!fir.ref>) -> !fir.ref> + // AARCH64-DAG: fir.store [[Z1_VAL]] to [[Z1_ADDRC2]] : !fir.ref> + // AARCH64-DAG: [[Z1_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z1_ARR2]] : !fir.ref> + // AARCH64-DAG: [[Z2_ARR2:%[0-9A-Za-z]+]] = fir.alloca !fir.array<2xf32> + // AARCH64-DAG: [[Z2_ADDRC2:%[0-9A-Za-z]+]] = fir.convert [[Z2_ARR2]] : (!fir.ref>) -> !fir.ref> + // AARCH64-DAG: fir.store [[Z2_VAL]] to [[Z2_ADDRC2]] : !fir.ref> + // AARCH64-DAG: [[Z2_RELOADED:%[0-9A-Za-z]+]] = fir.load [[Z2_ARR2]] : !fir.ref> + + // AARCH64: [[VAL:%[0-9A-Za-z]+]] = fir.call @mlircomplexf32([[Z1_RELOADED]], [[Z2_RELOADED]]) : (!fir.array<2xf32>, !fir.array<2xf32>) -> tuple + + // PPC-DAG: [[Z2_EMPTY:%[0-9A-Za-z]+]] = fir.undefined complex + // PPC-DAG: [[Z2_PARTIAL:%[0-9A-Za-z]+]] = fir.insert_value [[Z2_EMPTY]], [[A2]], [0 : index] : (complex, f32) -> complex + // PPC-DAG: [[Z2:%[0-9A-Za-z]+]] = fir.insert_value [[Z2_PARTIAL]], [[B2]], [1 : index] : (complex, f32) -> complex + // PPC-DAG: [[Z1_EMPTY:%[0-9A-Za-z]+]] = fir.undefined complex + // PPC-DAG: [[Z1_PARTIAL:%[0-9A-Za-z]+]] = fir.insert_value [[Z1_EMPTY]], [[A1]], [0 : index] : (complex, f32) -> complex + // PPC-DAG: [[Z1:%[0-9A-Za-z]+]] = fir.insert_value [[Z1_PARTIAL]], [[B1]], [1 : index] : (complex, f32) -> complex + + // PPC-DAG: [[A1_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z1]], [0 : index] : (complex) -> f32 + // PPC-DAG: [[B1_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z1]], [1 : index] : (complex) -> f32 + // PPC-DAG: [[A2_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z2]], [0 : index] : (complex) -> f32 + // PPC-DAG: [[B2_EXTR:%[0-9A-Za-z]+]] = fir.extract_value [[Z2]], [1 : index] : (complex) -> f32 + + // PPC: [[VAL:%[0-9A-Za-z]+]] = fir.call @mlircomplexf32([[A1_EXTR]], [[B1_EXTR]], [[A2_EXTR]], [[B2_EXTR]]) : (f32, f32, f32, f32) -> tuple + %0 = fir.call @mlircomplexf32(%z1, %z2) : (complex, complex) -> complex + + + // I32: [[ADDRI64:%[0-9A-Za-z]+]] = fir.alloca i64 + // I32: fir.store [[VAL]] to [[ADDRI64]] : !fir.ref + // I32: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRI64]] : (!fir.ref) -> !fir.ref> + // I32: [[VAL_2:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // I32: [[ADDRI64_2:%[0-9A-Za-z]+]] = fir.alloca i64 + // I32: [[ADDRC_2:%[0-9A-Za-z]+]] = fir.convert [[ADDRI64_2]] : (!fir.ref) -> !fir.ref> + // I32: fir.store [[VAL_2]] to [[ADDRC_2]] : !fir.ref> + // I32: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRI64_2]] : !fir.ref + // I32: return [[RES]] : i64 + + // X64: [[ADDRV:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64: fir.store [[VAL]] to [[ADDRV]] : !fir.ref> + // X64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRV]] : (!fir.ref>) -> !fir.ref> + // X64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // X64: [[ADDRV_2:%[0-9A-Za-z]+]] = fir.alloca !fir.vector<2:f32> + // X64: [[ADDRC_2:%[0-9A-Za-z]+]] = fir.convert [[ADDRV_2]] : (!fir.ref>) -> !fir.ref> + // X64: fir.store [[V]] to [[ADDRC_2]] : !fir.ref> + // X64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRV_2]] : !fir.ref> + // X64: return [[RES]] : !fir.vector<2:f32> + + // AARCH64: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple + // AARCH64: fir.store [[VAL]] to [[ADDRT]] : !fir.ref> + // AARCH64: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref>) -> !fir.ref> + // AARCH64: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // AARCH64: [[ADDRT_2:%[0-9A-Za-z]+]] = fir.alloca tuple + // AARCH64: [[ADDRC_2:%[0-9A-Za-z]+]] = fir.convert [[ADDRT_2]] : (!fir.ref>) -> !fir.ref> + // AARCH64: fir.store [[V]] to [[ADDRC_2]] : !fir.ref> + // AARCH64: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT_2]] : !fir.ref> + // AARCH64: return [[RES]] : tuple + + // PPC: [[ADDRT:%[0-9A-Za-z]+]] = fir.alloca tuple + // PPC: fir.store [[VAL]] to [[ADDRT]] : !fir.ref> + // PPC: [[ADDRC:%[0-9A-Za-z]+]] = fir.convert [[ADDRT]] : (!fir.ref>) -> !fir.ref> + // PPC: [[V:%[0-9A-Za-z]+]] = fir.load [[ADDRC]] : !fir.ref> + // PPC: [[ADDRT_2:%[0-9A-Za-z]+]] = fir.alloca tuple + // PPC: [[ADDRC_2:%[0-9A-Za-z]+]] = fir.convert [[ADDRT_2]] : (!fir.ref>) -> !fir.ref> + // PPC: fir.store [[V]] to [[ADDRC_2]] : !fir.ref> + // PPC: [[RES:%[0-9A-Za-z]+]] = fir.load [[ADDRT_2]] : !fir.ref> + // PPC: return [[RES]] : tuple + return %0 : complex +}