diff --git a/flang/lib/Optimizer/CodeGen/Target.cpp b/flang/lib/Optimizer/CodeGen/Target.cpp --- a/flang/lib/Optimizer/CodeGen/Target.cpp +++ b/flang/lib/Optimizer/CodeGen/Target.cpp @@ -232,6 +232,74 @@ }; } // namespace +//===----------------------------------------------------------------------===// +// x86_64 (x86 64 bit) Windows target specifics. +//===----------------------------------------------------------------------===// + +namespace { +struct TargetX86_64Win : public GenericTarget { + using GenericTarget::GenericTarget; + + static constexpr int defaultWidth = 64; + + CodeGenSpecifics::Marshalling + complexArgumentType(mlir::Location loc, mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle()) { + // i64 pack both floats in a 64-bit GPR + marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64), + AT{}); + } else if (sem == &llvm::APFloat::IEEEdouble()) { + // Use a type that will be translated into LLVM as: + // { double, double } struct of 2 double, byval, align 8 + marshal.emplace_back( + fir::ReferenceType::get(mlir::TupleType::get( + eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})), + AT{/*align=*/8, /*byval=*/true}); + } else if (sem == &llvm::APFloat::IEEEquad()) { + // Use a type that will be translated into LLVM as: + // { fp128, fp128 } struct of 2 fp128, byval, align 16 + marshal.emplace_back( + fir::ReferenceType::get(mlir::TupleType::get( + eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})), + AT{/*align=*/16, /*byval=*/true}); + } else { + TODO(loc, "complex for this precision"); + } + return marshal; + } + + CodeGenSpecifics::Marshalling + complexReturnType(mlir::Location loc, mlir::Type eleTy) const override { + CodeGenSpecifics::Marshalling marshal; + const auto *sem = &floatToSemantics(kindMap, eleTy); + if (sem == &llvm::APFloat::IEEEsingle()) { + // i64 pack both floats in a 64-bit GPR + marshal.emplace_back(mlir::IntegerType::get(eleTy.getContext(), 64), + AT{}); + } else if (sem == &llvm::APFloat::IEEEdouble()) { + // Use a type that will be translated into LLVM as: + // { double, double } struct of 2 double, sret, align 8 + marshal.emplace_back( + fir::ReferenceType::get(mlir::TupleType::get( + eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})), + AT{/*align=*/8, /*byval=*/false, /*sret=*/true}); + } else if (sem == &llvm::APFloat::IEEEquad()) { + // Use a type that will be translated into LLVM as: + // { fp128, fp128 } struct of 2 fp128, sret, align 16 + marshal.emplace_back( + fir::ReferenceType::get(mlir::TupleType::get( + eleTy.getContext(), mlir::TypeRange{eleTy, eleTy})), + AT{/*align=*/16, /*byval=*/false, /*sret=*/true}); + } else { + TODO(loc, "complex for this precision"); + } + return marshal; + } +}; +} // namespace + //===----------------------------------------------------------------------===// // AArch64 linux target specifics. //===----------------------------------------------------------------------===// @@ -548,8 +616,12 @@ return std::make_unique(ctx, std::move(trp), std::move(kindMap)); case llvm::Triple::ArchType::x86_64: - return std::make_unique(ctx, std::move(trp), - std::move(kindMap)); + if (trp.isOSWindows()) + return std::make_unique(ctx, std::move(trp), + std::move(kindMap)); + else + return std::make_unique(ctx, std::move(trp), + std::move(kindMap)); case llvm::Triple::ArchType::aarch64: return std::make_unique(ctx, std::move(trp), std::move(kindMap));