diff --git a/Fortran/gfortran/CMakeLists.txt b/Fortran/gfortran/CMakeLists.txt --- a/Fortran/gfortran/CMakeLists.txt +++ b/Fortran/gfortran/CMakeLists.txt @@ -59,7 +59,9 @@ set(TEST_SUITE_FORTRAN_ISO_C_HEADER_DIR "" CACHE STRING "Path to the directory containing ISO_Fortran_bindings.h header file.") -# The following cause errors if they are passed to flang via FFLAGS +# The following cause errors if they are passed to flang via FFLAGS. This could +# be because they are currently unsupported and might eventually be supported +# or because they are GCC-specific and will never be supported. set(FLANG_ERRORING_FFLAGS -fallow-invalid-boz -fdec @@ -121,7 +123,11 @@ -ftree-tail-merge -ftree-vectorize -ftree-vrp + -mdalign -mdejagnu-cpu=power4 + -mfpmath=387 + -mfpmath=sse + -mtune=amdfam10 # -Os might eventually be supported, so this might also need to be removed # at some point -Og @@ -179,6 +185,63 @@ -Wzerotrip ) +# Find all the Fortran files in the current source directory that may be test +# files. This will filter out those files that have been explicitly disabled +# for any reason. The returned files will be consist of the main file for +# "execute" and "compile" tests as well as any dependencies for multi-file +# tests. +function(gfortran_find_test_files unsupported unimplemented skipped failing out) + # This will just get all the Fortran source files in the directory. Since + # the tests may be a mix of single-source and multi-source tests, this will + # include files that are dependencies of some "main" test file as well. + file(GLOB files CONFIGURE_DEPENDS LIST_DIRECTORIES false + *.f* + *.F* + ) + + set(ignore "") + + # There is still a chance that some of the unsupported tests may need to be + # enabled, for instance if the non-standard extensions that they exercise are + # supported due to user demand. + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND + NOT TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) + list(APPEND ignore ${unsupported}) + endif() + + # For the remaining tests, there is cause to build and run the skipped, failing + # and unimplemented tests since some could be enabled once some feature is + # implemented. Eventually, all the TEST_SUITE_FORTRAN_FORCE_* options (perhaps + # with the exception of TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) should + # become redundant and removed. + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND + NOT TEST_SUITE_FORTRAN_FORCE_UNIMPLEMENTED_TESTS) + list(APPEND ignore ${unimplemented}) + endif() + + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND + NOT TEST_SUITE_FORTRAN_FORCE_SKIPPED_TESTS) + list(APPEND ignore ${skipped}) + endif() + + if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND + NOT TEST_SUITE_FORTRAN_FORCE_FAILING_TESTS) + list(APPEND ignore ${failing}) + endif() + + foreach(file ${ignore}) + list(REMOVE_ITEM files ${file}) + endforeach() + + set(${out} ${files} PARENT_SCOPE) +endfunction() + +# Populate the tests from the files in the subdirectory. This macro will be +# called from each subdirectory containing tests. It is expected that the +# subdirectory will contain a file named "DisabledFiles.cmake" which will +# provide the list of files that are disabled in that subdirectory for various +# reasons. A list named TESTS is expected to be in scope before this macro is +# called. macro(gfortran_populate_tests TESTS) string(REPLACE "${CMAKE_SOURCE_DIR}/" "" DIR "${CMAKE_CURRENT_SOURCE_DIR}") message(STATUS "Adding directory ${DIR}") @@ -198,134 +261,386 @@ list(APPEND FAILING ${FAILING_FILES}) # The TESTS variable is expected to be set before the macro is called. - add_tests( - "${UNSUPPORTED}" "${UNIMPLEMENTED}" "${SKIPPED}" "${FAILING}") + gfortran_find_test_files( + "${UNSUPPORTED}" "${UNIMPLEMENTED}" "${SKIPPED}" "${FAILING}" "${TESTS}") endmacro() -set(HEADER_SEARCH_PATH "${TEST_SUITE_FORTRAN_ISO_C_HEADER_DIR}") -if (NOT HEADER_SEARCH_PATH) - get_filename_component(Fortran_BINDIR ${CMAKE_Fortran_COMPILER} DIRECTORY) - get_filename_component(Fortran_PREFIX ${Fortran_BINDIR} DIRECTORY) - - set(HEADER_SEARCH_PATH "${Fortran_PREFIX}/include/flang") -endif() - -find_file(ISO_FORTRAN_C_HEADER - ISO_Fortran_binding.h - PATHS ${HEADER_SEARCH_PATH} - REQUIRED) - -get_filename_component(ISO_FORTRAN_C_HEADER_DIR - "${ISO_FORTRAN_C_HEADER}" - DIRECTORY) - -# This pulls out options in dg-options into `${Variable}` -function(gfortran_dg_options_fflags Variable File) - # Some files have dg-options which we need to pick up. These should be in - # the first line but often aren't. - # - # We also need to be careful not to pick up target-specific dg-options. - set(DG_FFLAGS) - - file(STRINGS ${File} FileLines) - foreach(FileLine ${FileLines}) - # Looking for `dg-options "..."` or `dg-additional-options "..."` without - # `{ target` afterwards (ignoring spaces). - if(FileLine MATCHES "dg-(additional-)?options [{]?[ ]*\"([^\"]*)\"[ ]*[}]?(.*)") - # This is needed to turn the string into a list of FFLAGS - separate_arguments(FILE_FFLAGS UNIX_COMMAND ${CMAKE_MATCH_2}) - # This does the negative lookahead for `{ target` anywhere in the rest of - # the line - if(NOT "${CMAKE_MATCH_3}" MATCHES "{ +target") - list(APPEND DG_FFLAGS ${FILE_FFLAGS}) - endif() - endif() - endforeach() - - # Remove any flags that will cause flang to raise an error. - if (DG_FFLAGS) - list(REMOVE_ITEM DG_FFLAGS ${FLANG_ERRORING_FFLAGS}) - endif() - - # Set the parent scope variable - set(${Variable} ${DG_FFLAGS} PARENT_SCOPE) -endfunction() - -function(gfortran_execute_test File) - cmake_parse_arguments(GFORTRAN "" "PREFIX" "FFLAGS;LDFLAGS;DEPFILES" ${ARGN}) +# Generate a unique target name from the given base and prepend it with the +# given prefix. +function(gfortran_unique_target_name prefix base out) # There are a few tests - in different directories - with duplicate filenames. # CMake requires all target names to be unique, so we add a disambiguator. The # disambiguator uses the path of the file relative to the top-level directory # containing all the tests from the gfortran test suite to ensure that # targets in different directories will have distinct names. + set(result "") - # The ${File} argument is guaranteed to be the absolute path to the source - # file. - string(REPLACE "${PROJECT_SOURCE_DIR}/Fortran/gfortran/" "" Name "${File}") + # The ${base} argument is guaranteed to be the absolute path to a source file. + string(REPLACE "${PROJECT_SOURCE_DIR}/Fortran/gfortran/" "" result "${base}") # Replace any '/' separators with 2 underscores. Just replacing it by a single # underscore results in conflicts. For instance, there is a conflict between # regression/coarray_ptr_comp_2.f08 and regression/coarray/ptr_comp_2.f08 # which are unrelated tests. Other such conflicts are probably also unrelated. - string(REPLACE "/" "__" Name "${Name}") + string(REPLACE "/" "__" result "${result}") # Retain the extension of the source file in the final target name because # there are cases where two source files with the same basename but different # extensions and they, too, represent completely different and unrelated # tests. - string(REPLACE "." "_" Name "${Name}") - - set(test_target "${GFORTRAN_PREFIX}-${Name}") - set(working_dir_name "${test_target}.wd") - set(working_dir "${CMAKE_CURRENT_BINARY_DIR}/${working_dir_name}") - - # Several tests in the suite build modules with the same name at build-time. - # Others create/write/read files with the same name at test-time. In either - # case, these are race conditions which can lead to non-deterministic failures - # at build and/or test time. To work around this, have each test run in its - # own directory. - # - # This directory is also used as module directory at build-time. - # - # It may be "cleaner" to have separate directories - one that serves as the - # module directory and the other as the working directory, but that is - # probably unnecessary. + string(REPLACE "." "_" result "${result}") + + set(${out} "${prefix}-${result}" PARENT_SCOPE) +endfunction() + +# Several tests in the suite build modules with the same name at build-time. +# Others create/write/read files with the same name at test-time. In either +# case, these are race conditions which can lead to non-deterministic failures +# at build and/or test time. To work around this, have each test run in its +# own directory. +# +# This directory is also used as module directory at build-time. +# +# It may be "cleaner" to have separate directories - one that serves as the +# module directory and the other as the working directory, but that is +# probably unnecessary. +# +# Make a working directory for the given target and return the full path of +# the resulting directory. +function(gfortran_make_working_dir tgt out) + set(working_dir "${CMAKE_CURRENT_BINARY_DIR}/${tgt}.wd") + file(MAKE_DIRECTORY ${working_dir}) - # Parse the dg-options annotations in the file and add it to DG_FFLAGS. - gfortran_dg_options_fflags(DG_FFLAGS ${File}) + set("${out}" "${working_dir}" PARENT_SCOPE) +endfunction() + +# Setup a "compile" test. EXPECT_ERROR will be ON if the compile test is +# expected to fail, OFF otherwise. MAIN is the main test file. In the case of +# multi-file tests, OTHERS will be the remaining files needed by the test. +# FFLAGS are compiler flags needed by the test. LDFLAGS are linker flags needed +# by the test. +function(gfortran_add_compile_test expect_error main others fflags ldflags) + # The test-suite expects an executable to be produced at build time and for + # that executable to be run at test time. The result (in the form of the + # return code or the output written to stdout/stderr) is used to determine + # whether the test has succeeded. The "compile" tests are intended to exercise + # the behavior of the compiler itself. There isn't a clean way of having the + # compiler be executed at test time. Instead, the compiler is run at + # build time and the diagnostics/errors saved to a file as needed. This file is + # compared to a reference output at test time to determine success/failure of + # the test. A dummy executable is also built. This does nothing, but provides + # something that the test suite can "run" at test time. + + # PREFIX_COMPILE will have been defined in the subdirectory from which this + # function is called. + gfortran_unique_target_name("${PREFIX_COMPILE}" "${main}" target) + gfortran_make_working_dir("${target}" working_dir) + + # The output of the compilation of the test file. This may contain warnings + # and error messages. If the compilation succeeded without any warnings or + # other diagnostics, it will be empty. + set(out ${target}.out) + + add_custom_command( + OUTPUT ${out} + COMMAND ${CMAKE_COMMAND} + -DCMD="${CMAKE_Fortran_COMPILER};-c;${fflags};${ldflags};${others};${main}" + -DALWAYS_SAVE_DIAGS=OFF + -DWORKING_DIRECTORY=${working_dir} + -DOUTPUT_FILE=${out} + -P ${COMPILE_SCRIPT_BIN} + USES_TERMINAL + COMMENT "Compiling ${main}") + + add_custom_target(${target} + ALL + DEPENDS ${out} + SOURCES ${main} ${others}) + + # The dummy and empty reference output files are in + # ${CMAKE_BINARY_DIR}/Fortran/gfortran. This function could be called from + # any of the subdirectories under ${CMAKE_BINARY_DIR}/Fortran/gfortran. The + # tests need paths relative to the directory containing the test, so calculate + # the relative path back to ${CMAKE_BINARY_DIR}/Fortran/gfortran. + file(RELATIVE_PATH relpath + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_BINARY_DIR}/Fortran/gfortran) + + # The test suite expects an executable to run, so give it the dummy (see + # comments above). + llvm_test_run(EXECUTABLE %S/${relpath}/${DUMMY_EXE}) + + # The verification compares the saved diagnostics file against what is + # expected. For the test. The reference output may have been extracted from + # the DejaGNU annotations in the test file, or it may be an empty file if the + # compilation of the test file was expected to be successful and without any + # diagnostics. + if (expect_error) + # Since we don't check for any particular error, we expect "some" error. + # In that case, the compiler's diagnostic output will be non-empty. + llvm_test_verify(${TESTCMD} -s %S/${out}) + else () + llvm_test_verify(${DIFFPROG} %S/${relpath}/${EMPTY_FILE} %S/${out}) + endif () + + llvm_add_test(${target}.test %S/${relpath}/${DUMMY_EXE}) +endfunction() + +# Look for "compile" tests in TESTS and create a test for each "main" file that +# is found. +function(gfortran_add_compile_tests_from tests) + cmake_parse_arguments(GFORTRAN "" "" "FFLAGS;LDFLAGS" ${ARGN}) + + foreach (file ${tests}) + # Whether this test is expected to pass or fail. + set(expect_error OFF) + + # For multi-file tests, these are the other files needed. For the "compile" + # tests, there don't seem to be any multi-file tests at the time of writing, + # but leave it general in case that ever changes. + set(others "") + + set(fflags "") + set(ldflags "") + list(APPEND fflags ${GFORTRAN_FFLAGS}) + list(APPEND ldflags ${GFORTRAN_LDFLAGS}) + + file(STRINGS ${file} lines) + foreach (line ${lines}) + # The "compile" tests have a { dg-do compile } directive. + if (line MATCHES "^.*[{][ ]*dg-do[ ]*compile(.*)[}].*$") + # TODO: We completely ignore any target directives that may be attached + # to the run directives. For now, it seems to be ok, but as these tests + # are enabled on more platforms, the target directives might need to be + # handled. + elseif (line MATCHES "dg-additional-sources[ ]*[\"]?(.+)[\"]?[ ]*[}]") + separate_arguments(others UNIX_COMMAND ${CMAKE_MATCH_1}) + list(TRANSFORM others STRIP) + elseif (LINE MATCHES "dg-(additional-)?options [{]?[ ]*\"([^\"]*)\"[ ]*[}]?(.*)") + # TODO: We completely ignore any target directives that may be attached + # to the run directives. For now, it seems to be ok, but as these tests + # are enabled on more platforms, the target directives might need to be + # handled. + separate_arguments(file_fflags UNIX_COMMAND ${CMAKE_MATCH_2}) + list(REMOVE_ITEM file_fflags ${FLANG_ERRORING_FFLAGS}) + list(APPEND fflags ${file_fflags}) + elseif (line MATCHES "[{][ ]*dg-error[ ]*") + # Currently, we don't try to match gfortran's expected errors with + # flang's expected error messages. Instead, if gfortran expects an + # error, we test that flang produces "some" error. + # TODO: It may be more useful to match gfortran's error messages to + # flang's in which case we should do something more sophisticated here, + # but as a first pass, the more basic approach should do. + set(expect_error ON) + endif() + endforeach() + + # Some files look like they ought to be "compile" tests but they don't + # contain a DejaGNU compile directive. Some have an error or other + # directive that could be used to infer that they are actually compile tests + # but for those that are intended to succeed, there may be no directives + # at all. So just treat all files as if they were "compile" tests (even + # those that are the main files of execute tests and ones that are + # dependencies in multi-file tests). + gfortran_add_compile_test(${expect_error} ${file} "${others}" "${fflags}" "${ldflags}") + endforeach() +endfunction() + +# Creates a "compile" test from each file in TESTS. It is assumed that the +# compilation is intended to succeed. Any compile/link flags that the test needs +# must be passed explicitly. +function(gfortran_add_compile_tests tests) + cmake_parse_arguments(GFORTRAN "" "" "FFLAGS;LDFLAGS" ${ARGN}) - # Add any flags that were requested. - list(APPEND FFLAGS ${DG_FFLAGS} ${GFORTRAN_FFLAGS}) - list(APPEND LDFLAGS ${GFORTRAN_LDFLAGS}) + list(APPEND fflags ${GFORTRAN_FFLAGS}) + list(APPEND ldflags ${GFORTRAN_LDFLAGS}) + + foreach (file ${tests}) + gfortran_add_compile_test(OFF ${file} "" "${fflags}" "${ldflags}") + endforeach() +endfunction() - llvm_test_executable_no_test(${test_target} ${File} ${GFORTRAN_DEPFILES}) +# Setup an "execute" test. In the case of multi-file tests, MAIN will be the +# main file. For multi-file tests, OTHERS will be the remaining files needed by +# the test. FFLAGS are additional compiler flags needed by the test. LDFLAGS +# are the other linker flags needed by the test. +function(gfortran_add_execute_test main others fflags ldflags) + # PREFIX_EXECUTE will have been defined in the subdirectory from which this + # function is called. + gfortran_unique_target_name("${PREFIX_EXECUTE}" "${main}" target) + gfortran_make_working_dir("${target}" working_dir) + get_filename_component(working_dir_name "${working_dir}" NAME) + + llvm_test_executable_no_test(${target} ${main} ${others}) llvm_test_run(WORKDIR "%S/${working_dir_name}") - llvm_add_test_for_target(${test_target}) + llvm_add_test_for_target(${target}) - target_include_directories(${test_target} + target_include_directories(${target} PRIVATE ${ISO_FORTRAN_C_HEADER_DIR} ${working_dir}) - set_target_properties(${test_target} PROPERTIES + target_compile_options(${target} PRIVATE "${fflags}") + target_link_options(${target} PRIVATE "${ldflags}") + set_target_properties(${target} PROPERTIES Fortran_MODULE_DIRECTORY ${working_dir}) # This is a workaround because cmake does not currently recognize the .f03 # and .f08 extensions. A patch to fix cmake has been accepted and the fix # should be available in CMake 3.27. It might be better to check the CMake - # CMake version and do this conditionally. - list(APPEND SOURCES ${File}) - list(APPEND SOURCES ${GFORTRAN_DEPFILES}) - foreach(Source ${SOURCES}) - get_filename_component(Ext ${Source} LAST_EXT) - if("${Ext}" STREQUAL ".f03" OR - "${Ext}" STREQUAL ".F03" OR - "${Ext}" STREQUAL ".f08" OR - "${Ext}" STREQUAL ".F08") - set_source_files_properties(${Source} PROPERTIES LANGUAGE Fortran) + # version and do this conditionally. + list(APPEND sources ${main}) + list(APPEND sources ${others}) + foreach(source ${sources}) + get_filename_component(ext ${source} LAST_EXT) + if("${ext}" STREQUAL ".f03" OR + "${ext}" STREQUAL ".F03" OR + "${ext}" STREQUAL ".f08" OR + "${ext}" STREQUAL ".F08") + set_source_files_properties(${source} PROPERTIES LANGUAGE Fortran) + endif() + endforeach() + + set_target_properties(${target} PROPERTIES LINKER_LANGUAGE Fortran) +endfunction() + +# Look for "execute" tests in TESTS and create a test for each "main" file that +# is found. In the case of multi-file tests, other files may be needed for the +# test. Those will be obtained by parsing the DejaGNU directives in the "main" +# file. +function(gfortran_add_execute_tests_from tests) + cmake_parse_arguments(GFORTRAN "" "" "FFLAGS;LDFLAGS" ${ARGN}) + + foreach(file ${tests}) + # The file containing the "run" directive will be the main file. + set(main "") + + # For multi-file tests, these are the other files needed. + set(others "") + + # Any flags needed to compile the test. These will be flang-specific flags. + # Directives in the test files may specify additional flags needed by the + # test. Those will be parsed in gfortran_add_execute_test. + set(fflags "") + set(ldflags "") + list(APPEND fflags ${GFORTRAN_FFLAGS}) + list(APPEND ldflags ${GFORTRAN_LDFLAGS}) + + file(STRINGS ${file} lines) + foreach (line ${lines}) + # The "execute" tests have a { dg-do run }, or a { dg-lto-do run } + # directive. + if (line MATCHES "^.*[{][ ]*dg-(lto-)?do[ ]*run (.*)[}]$") + # TODO: We completely ignore any target directives that may be attached + # to the run directives. For now, it seems to be ok, but as these tests + # are enabled on more platforms, the target directives might need to be + # handled. + set(main "${file}") + elseif (line MATCHES "dg-additional-sources[ ]*[\"]?(.+)[\"]?[ ]*[}]") + separate_arguments(others UNIX_COMMAND ${CMAKE_MATCH_1}) + list(TRANSFORM others STRIP) + elseif (line MATCHES "dg-(additional-)?options [{]?[ ]*\"([^\"]*)\"[ ]*[}]?(.*)") + # TODO: We completely ignore any target-specific options that may be + # present. These are usually in the form of target directives. For now, + # it seems to be ok, but as these tests are enabled on more platforms, + # target directives might need to be handled. + separate_arguments(file_fflags UNIX_COMMAND ${CMAKE_MATCH_2}) + list(REMOVE_ITEM file_fflags ${FLANG_ERRORING_FFLAGS}) + list(APPEND fflags ${file_fflags}) + endif() + endforeach() + + # Since any dependent files could also be processed by this function, there + # is no guarantee that main will have been set. + if (main) + gfortran_add_execute_test(${main} "${others}" "${fflags}" "${ldflags}") endif() endforeach() +endfunction() + +# Creates an "execute" test from each file in TESTS. The tests are assumed to +# not contain any additional DejaGNU directives. Any compiler/linker flags +# needed by the test must be passed explicitly. +function(gfortran_add_execute_tests tests) + cmake_parse_arguments(GFORTRAN "" "" "FFLAGS;LDFLAGS" ${ARGN}) - set_target_properties(${test_target} PROPERTIES LINKER_LANGUAGE Fortran) + list(APPEND fflags ${GFORTRAN_FFLAGS}) + list(APPEND ldflags ${GFORTRAN_LDFLAGS}) + + foreach(file ${tests}) + gfortran_add_execute_test(${file} "" "${fflags}" "${ldflags}") + endforeach() endfunction() +set(HEADER_SEARCH_PATH "${TEST_SUITE_FORTRAN_ISO_C_HEADER_DIR}") +if (NOT HEADER_SEARCH_PATH) + get_filename_component(Fortran_BINDIR ${CMAKE_Fortran_COMPILER} DIRECTORY) + get_filename_component(Fortran_PREFIX ${Fortran_BINDIR} DIRECTORY) + + set(HEADER_SEARCH_PATH "${Fortran_PREFIX}/include/flang") +endif() + +find_file(ISO_FORTRAN_C_HEADER + ISO_Fortran_binding.h + PATHS ${HEADER_SEARCH_PATH} + REQUIRED) + +get_filename_component(ISO_FORTRAN_C_HEADER_DIR + "${ISO_FORTRAN_C_HEADER}" + DIRECTORY) + +# The program to be used to verify the results. The programs here should take +# two files as arguments, return 0 if the files are identical, non-zero +# otherwise. +set(DIFFPROG) +if (WIN32) + # Windows support has been disabled earlier anyway, but at some point, we + # should find a way to check if a file is non-empty on windows. + message(FATAL_ERROR "No way to check file size in Windows.") + find_program(DIFFPROG + NAMES fc.exe + REQUIRED) +else () + # FIXME: For the moment, check if a file is not empty, by using the test + # command/shell built-in on *nix. But it is not clear that all systems will + # have this. What we really need is to invert the result of DIFFPROG. LLVM has + # a not utility that will do just this. But it needs LLVM's build directory + # to be present unless it is installed on the system already. It is not clear + # if we can depend on this. + find_program(TESTCMD + NAMES test + REQUIRED) + find_program(DIFFPROG + NAMES diff cmp + REQUIRED) +endif () + +# The test suite expects to be able to run something at test-time. For the +# compile tests, there is nothing to be run. While a better solution will be +# to modify the test suite to allow for cases like this, for the moment, just +# create an empty executable that will be run for each test. +set(DUMMY_SRC ${CMAKE_CURRENT_BINARY_DIR}/dummy.f90) +file(WRITE ${DUMMY_SRC} "program test\nend program test") + +set(DUMMY_EXE "dummy") +add_executable(${DUMMY_EXE} ${DUMMY_SRC}) + +# This script compiles the files that are "compile" tests. It may save the +# diagnostics to file as needed (see the options that the script accepts). There +# should be no dependence on the source files at test-time, so copy the compile +# script over to the build directory. For the moment, nothing is compiled at +# test-time, but that might change. +set(COMPILE_SCRIPT compile-save-diags.cmake) +set(COMPILE_SCRIPT_SRC ${CMAKE_CURRENT_SOURCE_DIR}/${COMPILE_SCRIPT}) +set(COMPILE_SCRIPT_BIN ${CMAKE_CURRENT_BINARY_DIR}/${COMPILE_SCRIPT}) + +file(COPY + ${CMAKE_CURRENT_SOURCE_DIR}/${COMPILE_SCRIPT} + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}) + +# In some case, the "compile" tests are expected to pass. Since diagnostics are +# only saved on failure, the diagnostics file produced when compiling the test +# should be empty. An empty file can, therefore, be used as reference output. +set(EMPTY_FILE "gfortran-compile-empty.reference.out") +file(TOUCH ${CMAKE_CURRENT_BINARY_DIR}/${EMPTY_FILE}) + add_subdirectory(regression) add_subdirectory(torture) diff --git a/Fortran/gfortran/compile-save-diags.cmake b/Fortran/gfortran/compile-save-diags.cmake --- a/Fortran/gfortran/compile-save-diags.cmake +++ b/Fortran/gfortran/compile-save-diags.cmake @@ -8,17 +8,15 @@ # Run the compiler and save the compiler diagnostics to file. The diagnostics # are simply the messages printed to stdout and stderr. This is intended to be -# used for the "compile" tests in this test. The "compile" tests exercise the -# compiler's parser and semantic analyzer as well as the diagnostics. +# used for the "compile" tests in this suite. # Required parameters # -# COMPILER: STRING Path to the compiler. -# COMPILER_FLAGS: STRING Compiler flags. -# INPUT_FILES: STRING Space separated list of files to compile. -# OUTPUT_FILE: STRING The output diagnostics file. -# OBJECT_FILE: STRING The object file produced by the compiler. This -# will be deleted before this script exits. +# CMD: STRING Most of the compilation command. This excludes +# the module directory flag (-J). +# +# OUTPUT_FILE: STRING The output file containing any compiler +# diagnostics. # # Optional parameters # @@ -28,18 +26,30 @@ # failed for any reason. # -execute_process(COMMAND ${COMPILER} ${COMPILER_FLAGS} ${INPUT_FILES} -o ${OBJECT_FILE} +separate_arguments(COMMAND NATIVE_COMMAND "${CMD} -J${WORKING_DIRECTORY}") + +# There are race conditions on .mod (due to different tests compiling modules +# having the same name). At the time of writing, there don't seem to be any +# multi-file "compile" tests, let alone any pairs of tests which might generate +# object files having the same name. In case any do show up, set the working +# directory to be the module directory. This ensures that any object files are +# written to the dedicated working directory for the test. +execute_process(COMMAND ${COMMAND} + WORKING_DIRECTORY ${WORKING_DIRECTORY} RESULT_VARIABLE RETVAR OUTPUT_VARIABLE OUTVAR ERROR_VARIABLE ERRVAR OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE) +# An output file will always be written because one is expected to exists at +# test-time. file(WRITE "${OUTPUT_FILE}" "") + +# Only save diagnostics if the compilation return a non-zero (error) code unless +# instructed to always save diagnostics. if (ALWAYS_SAVE_DIAGS OR NOT "${RETVAR}" EQUAL "0") file(APPEND "${OUTPUT_FILE}" "${ERRVAR}") file(APPEND "${OUTPUT_FILE}" "") file(APPEND "${OUTPUT_FILE}" "${OUTVAR}") endif() - -file(REMOVE ${OBJECT_FILE}) diff --git a/Fortran/gfortran/regression/CMakeLists.txt b/Fortran/gfortran/regression/CMakeLists.txt --- a/Fortran/gfortran/regression/CMakeLists.txt +++ b/Fortran/gfortran/regression/CMakeLists.txt @@ -6,89 +6,16 @@ # #===------------------------------------------------------------------------===# -# Creates a test from each valid test file in the current source directory. Each -# argument to a function is a list. If a test file is in any of the lists, a -# test will not be created from it. -function(add_tests UnsupportedTests UnimplementedTests SkippedTests FailingTests) - # This will just get all the Fortran source files in the directory. Since - # the tests here are a mix of single-source and multi-source tests, this will - # include files that are dependencies of some "main" test file as well. Those - # will be filtered out later. - file(GLOB AllFiles CONFIGURE_DEPENDS LIST_DIRECTORIES false - *.f* - *.F* - ) - - set(TestsToSkip) - - # There is still a chance that some of the unsupported tests may need to be - # enabled, for instance if the non-standard extensions that they exercise are - # supported due to user demand. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) - list(APPEND TestsToSkip ${UnsupportedTests}) - endif() - - # For the remaining tests, there is cause to build and run the skipped, failing - # and unimplemented tests since some could be enabled once some feature is - # implemented. Eventually, all the TEST_SUITE_FORTRAN_FORCE_* options (perhaps - # with the exception of TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) should - # become redundant and removed. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNIMPLEMENTED_TESTS) - list(APPEND TestsToSkip ${UnimplementedTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_FAILING_TESTS) - list(APPEND TestsToSkip ${FailingTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_SKIPPED_TESTS) - list(APPEND TestsToSkip ${SkippedTests}) - endif() - - foreach(TestToSkip ${TestsToSkip}) - list(REMOVE_ITEM AllFiles ${TestToSkip}) - endforeach() - - # For now, only the execute tests are supported. Those have a "{ dg-do run }" - # or a "{ dg-lto-do run }" directive. - foreach(File ${AllFiles}) - set(MAIN) - set(ADDITIONAL_SOURCES) - set(ADDITIONAL_FFLAGS) - set(ADDITIONAL_LDFLAGS) - - file(STRINGS ${File} FileLines) - foreach(FileLine ${FileLines}) - if(FileLine MATCHES "^.*[{][ ]*dg-(lto-)?do[ ]*([A-Za-z0-9]+).*[}].*$") - # If this is not a main file for an execute test, just move on to the - # next file. - if (NOT ${CMAKE_MATCH_2} MATCHES "run") - break() - else() - set(MAIN "${File}") - endif() - elseif(FileLine MATCHES "dg-additional-sources[ ]*[\"]?(.+)[\"]?[ ]*[}]") - separate_arguments(ADDITIONAL_SOURCES UNIX_COMMAND ${CMAKE_MATCH_1}) - list(TRANSFORM ADDITIONAL_SOURCES STRIP) - endif() - endforeach() - - if (MAIN) - gfortran_execute_test(${MAIN} - PREFIX "gfortran-regression-execute" - FFLAGS ${ADDITIONAL_FFLAGS} - LDFLAGS ${ADDITIONAL_LDFLAGS} - DEPFILES ${ADDITIONAL_SOURCES}) - endif() - endforeach() -endfunction() +# PREFIX_EXECUTE and PREFIX_COMPILE are used in gfortran_add_execute_test and +# gfortran_add_compile_test respectively. +set(PREFIX_EXECUTE "gfortran-regression-execute") +set(PREFIX_COMPILE "gfortran-regression-compile") set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests +# gfortran_add_compile_tests_from("${TESTS}") add_subdirectory(analyzer) add_subdirectory(asan) diff --git a/Fortran/gfortran/regression/analyzer/CMakeLists.txt b/Fortran/gfortran/regression/analyzer/CMakeLists.txt --- a/Fortran/gfortran/regression/analyzer/CMakeLists.txt +++ b/Fortran/gfortran/regression/analyzer/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/asan/CMakeLists.txt b/Fortran/gfortran/regression/asan/CMakeLists.txt --- a/Fortran/gfortran/regression/asan/CMakeLists.txt +++ b/Fortran/gfortran/regression/asan/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/c-interop/CMakeLists.txt b/Fortran/gfortran/regression/c-interop/CMakeLists.txt --- a/Fortran/gfortran/regression/c-interop/CMakeLists.txt +++ b/Fortran/gfortran/regression/c-interop/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/coarray/CMakeLists.txt b/Fortran/gfortran/regression/coarray/CMakeLists.txt --- a/Fortran/gfortran/regression/coarray/CMakeLists.txt +++ b/Fortran/gfortran/regression/coarray/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/debug/CMakeLists.txt b/Fortran/gfortran/regression/debug/CMakeLists.txt --- a/Fortran/gfortran/regression/debug/CMakeLists.txt +++ b/Fortran/gfortran/regression/debug/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/g77/CMakeLists.txt b/Fortran/gfortran/regression/g77/CMakeLists.txt --- a/Fortran/gfortran/regression/g77/CMakeLists.txt +++ b/Fortran/gfortran/regression/g77/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/goacc-gomp/CMakeLists.txt b/Fortran/gfortran/regression/goacc-gomp/CMakeLists.txt --- a/Fortran/gfortran/regression/goacc-gomp/CMakeLists.txt +++ b/Fortran/gfortran/regression/goacc-gomp/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/goacc/CMakeLists.txt b/Fortran/gfortran/regression/goacc/CMakeLists.txt --- a/Fortran/gfortran/regression/goacc/CMakeLists.txt +++ b/Fortran/gfortran/regression/goacc/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/gomp/CMakeLists.txt b/Fortran/gfortran/regression/gomp/CMakeLists.txt --- a/Fortran/gfortran/regression/gomp/CMakeLists.txt +++ b/Fortran/gfortran/regression/gomp/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/gomp/appendix-a/CMakeLists.txt b/Fortran/gfortran/regression/gomp/appendix-a/CMakeLists.txt --- a/Fortran/gfortran/regression/gomp/appendix-a/CMakeLists.txt +++ b/Fortran/gfortran/regression/gomp/appendix-a/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/graphite/CMakeLists.txt b/Fortran/gfortran/regression/graphite/CMakeLists.txt --- a/Fortran/gfortran/regression/graphite/CMakeLists.txt +++ b/Fortran/gfortran/regression/graphite/CMakeLists.txt @@ -6,7 +6,5 @@ # #===------------------------------------------------------------------------===# -message(STATUS "Adding directory Fortran/gfortran/regression/graphite") - # The tests in this directory exercise the graphite polyhedral compiler that is # part of the GNU compiler collection. This is not relevant for flang. diff --git a/Fortran/gfortran/regression/ieee/CMakeLists.txt b/Fortran/gfortran/regression/ieee/CMakeLists.txt --- a/Fortran/gfortran/regression/ieee/CMakeLists.txt +++ b/Fortran/gfortran/regression/ieee/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/lto/CMakeLists.txt b/Fortran/gfortran/regression/lto/CMakeLists.txt --- a/Fortran/gfortran/regression/lto/CMakeLists.txt +++ b/Fortran/gfortran/regression/lto/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/ubsan/CMakeLists.txt b/Fortran/gfortran/regression/ubsan/CMakeLists.txt --- a/Fortran/gfortran/regression/ubsan/CMakeLists.txt +++ b/Fortran/gfortran/regression/ubsan/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/regression/vect/CMakeLists.txt b/Fortran/gfortran/regression/vect/CMakeLists.txt --- a/Fortran/gfortran/regression/vect/CMakeLists.txt +++ b/Fortran/gfortran/regression/vect/CMakeLists.txt @@ -8,3 +8,6 @@ set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests_from("${TESTS}") +# TODO: Enable the "compile" tests. +# gfortran_add_compile_tests_from("${TESTS}") diff --git a/Fortran/gfortran/torture/CMakeLists.txt b/Fortran/gfortran/torture/CMakeLists.txt --- a/Fortran/gfortran/torture/CMakeLists.txt +++ b/Fortran/gfortran/torture/CMakeLists.txt @@ -6,6 +6,13 @@ # #===------------------------------------------------------------------------===# +message(STATUS "Adding directory Fortran/gfortran/torture") + +# PREFIX_EXECUTE and PREFIX_COMPILE are used in gfortran_add_execute_test and +# gfortran_add_compile_test respectively. +set(PREFIX_EXECUTE "gfortran-torture-execute") +set(PREFIX_COMPILE "gfortran-torture-compile") + add_subdirectory(compile) add_subdirectory(execute) diff --git a/Fortran/gfortran/torture/compile/CMakeLists.txt b/Fortran/gfortran/torture/compile/CMakeLists.txt --- a/Fortran/gfortran/torture/compile/CMakeLists.txt +++ b/Fortran/gfortran/torture/compile/CMakeLists.txt @@ -6,145 +6,6 @@ # #===------------------------------------------------------------------------===# -# The test-suite expects an executable to be produced at build time and for -# that executable to be run at test time. The result (in the form of the -# return code or the output written to stdout/stderr) is used to determine -# whether the test has succeeded. The "compile" tests are intended to exercise -# the behavior of the compiler itself. There isn't a clean way of having the -# compiler be executed at test time. Instead, the compiler is run at -# build time and the diagnostics/errors saved to a file as needed. This file is -# compared to a reference output at test time to determine success/failure of -# the test. A dummy executable is also built. This does nothing, but provides -# something that the test suite can "run" at test time. - -# Creates a test from each valid test file in the current source directory. Each -# argument to a function is a list. If a test file is in any of the lists, a -# test will not be created from it. -function(add_tests UnsupportedTests UnimplementedTests SkippedTests FailingTests) - # This will just get all the Fortran source files in the directory. The tests - # in this directory are all single-source. - file(GLOB AllFiles CONFIGURE_DEPENDS LIST_DIRECTORIES false - *.f* - *.F* - ) - - set(TestsToSkip) - - # There is still a chance that some of the unsupported tests may need to be - # enabled, for instance if the non-standard extensions that they exercise are - # supported due to user demand. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) - list(APPEND TestsToSkip ${UnsupportedTests}) - endif() - - # For the remaining tests, there is cause to build and run the skipped, failing - # and unimplemented tests since some could be enabled once some feature is - # implemented. Eventually, all the TEST_SUITE_FORTRAN_FORCE_* options (perhaps - # with the exception of TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) should - # become redundant and removed. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNIMPLEMENTED_TESTS) - list(APPEND TestsToSkip ${UnimplementedTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_FAILING_TESTS) - list(APPEND TestsToSkip ${FailingTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_SKIPPED_TESTS) - list(APPEND TestsToSkip ${SkippedTests}) - endif() - - foreach(TestToSkip ${TestsToSkip}) - list(REMOVE_ITEM AllFiles ${TestToSkip}) - endforeach() - - # The program to be used to verify the results. The programs here should take - # two files as arguments, return 0 if the files are identical, non-zero - # otherwise. - set(DIFFPROG) - if (WIN32) - find_program(DIFFPROG - NAMES fc.exe - REQUIRED) - else () - find_program(DIFFPROG - NAMES diff cmp - REQUIRED) - endif () - - # The file prefix is needed because there are several tests with the same - # file name across the gfortran test suite. cmake prefers all targets to be - # unique, so they get prefixed with this. - set(FilePrefix "gfortran-torture-compile") - - # The test suite expects to be able to run something at testing time. For the - # compile tests, there is nothing to be run. While a better solution will be - # to modify the test suite to allow for cases like this, as a temporary - # measure, just create an empty executable that will be run for each test. - set(DummySrc ${CMAKE_CURRENT_BINARY_DIR}/dummy.f90) - set(Dummy "dummy") - - file(WRITE ${DummySrc} "program test\nend program test") - add_executable(${Dummy} ${DummySrc}) - - # All the "compile" tests in the gfortran torture tests are expected to - # pass. Since diagnostics are only saved on failure, the diagnostics - # file produced when compiling the test should be empty. An empty file can, - # therefore, be used as reference output. - set(Reference "${FilePrefix}-empty.reference.out") - add_custom_command( - OUTPUT ${Reference} - COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/${Reference} - VERBATIM - USES_TERMINAL - COMMENT "Creating reference output file" - ) - - # The compile script compiles the files and may save the diagnostics to file - # as needed (see the options that the script accepts). - set(COMPILE_SCRIPT - ${CMAKE_SOURCE_DIR}/Fortran/gfortran/compile-save-diags.cmake) - - foreach(File ${AllFiles}) - get_filename_component(FileName ${File} NAME) - - set(Out ${FileName}.out) - set(Obj ${FileName}.o) - - # ${Exe} is just used as a custom target name. Nevertheless, it needs to be - # unique. There are multiple files with the same name but different - # extensions in this directory. Retain the extension, but replace the - # final '.' with an '_'. - string(REPLACE "." "_" Exe "${FilePrefix}_${FileName}") - - set(Compiler -DCOMPILER=${CMAKE_Fortran_COMPILER}) - set(CompilerFlags -DCOMPILER_FLAGS=-c) - set(InputFiles -DINPUT_FILES=${File}) - set(ObjFile -DOBJECT_FILE=${Obj}) - set(OutputFile -DOUTPUT_FILE=${Out}) - set(AlwaysSaveDiags -DALWAYS_SAVE_DIAGS=OFF) - - add_custom_command( - OUTPUT ${Out} - COMMAND ${CMAKE_COMMAND} ${Compiler} ${CompilerFlags} ${InputFiles} ${ObjFile} ${OutputFile} ${AlwaysSaveDiags} -P ${COMPILE_SCRIPT} - VERBATIM - USES_TERMINAL - COMMENT "Compiling ${File}") - - add_custom_target(${Exe} - ALL - DEPENDS ${Out} ${Reference} ${Dummy} - SOURCES ${File}) - - llvm_test_run(EXECUTABLE %S/${Dummy}) - llvm_test_verify(${DIFFPROG} %S/${Reference} %S/${Out}) - llvm_add_test(${Exe}.test %S/${Dummy}) - endforeach() -endfunction() - set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_compile_tests("${TESTS}") diff --git a/Fortran/gfortran/torture/execute/CMakeLists.txt b/Fortran/gfortran/torture/execute/CMakeLists.txt --- a/Fortran/gfortran/torture/execute/CMakeLists.txt +++ b/Fortran/gfortran/torture/execute/CMakeLists.txt @@ -6,58 +6,6 @@ # #===------------------------------------------------------------------------===# -# Creates a test from each valid test file in the current source directory. Each -# argument to a function is a list. If a test file is in any of the lists, a -# test will not be created from it. -function(add_tests UnsupportedTests UnimplementedTests SkippedTests FailingTests) - # All the tests here are single-source. - file(GLOB AllFiles CONFIGURE_DEPENDS LIST_DIRECTORIES false - *.f* - *.F* - ) - - set(TestsToSkip) - - # There is still a chance that some of the unsupported tests may need to be - # enabled, for instance if the non-standard extensions that they exercise are - # supported due to user demand. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) - list(APPEND TestsToSkip ${UnsupportedTests}) - endif() - - # For the remaining tests, there is cause to build and run the skipped, failing - # and unimplemented tests since some could be enabled once some feature is - # implemented. Eventually, all the TEST_SUITE_FORTRAN_FORCE_* options (perhaps - # with the exception of TEST_SUITE_FORTRAN_FORCE_UNSUPPORTED_TESTS) should - # become redundant and removed. - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_UNIMPLEMENTED_TESTS) - list(APPEND TestsToSkip ${UnimplementedTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_FAILING_TESTS) - list(APPEND TestsToSkip ${FailingTests}) - endif() - - if (NOT TEST_SUITE_FORTRAN_FORCE_ALL_TESTS AND - NOT TEST_SUITE_FORTRAN_FORCE_SKIPPED_TESTS) - list(APPEND TestsToSkip ${SkippedTests}) - endif() - - foreach(TestToSkip ${TestsToSkip}) - list(REMOVE_ITEM AllFiles ${TestToSkip}) - endforeach() - - foreach(File ${AllFiles}) - gfortran_execute_test(${File} - PREFIX "gfortran-torture-execute" - FFLAGS "" - LDFLAGS "" - DEPFILES "") - endforeach() -endfunction() - set(TESTS) gfortran_populate_tests(TESTS) +gfortran_add_execute_tests("${TESTS}")