diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -845,6 +845,29 @@ include_directories(${TENSORFLOW_C_LIB_PATH}/include) endif() +# They are not referenced. See set_output_directory(). +set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/bin ) +set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) +set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) + +# For up-to-date instructions for installing the Tensorflow dependency, refer to +# the bot setup script: https://github.com/google/ml-compiler-opt/blob/master/buildbot/buildbot_init.sh +# Specifically, assuming python3 is installed: +# python3 -m pip install --upgrade pip && python3 -m pip install --user tf_nightly==2.3.0.dev20200528 +# Then set TENSORFLOW_AOT_PATH to the package install - usually it's ~/.local/lib/python3.7/site-packages/tensorflow +# +set(TENSORFLOW_AOT_PATH "" CACHE PATH "Path to TensorFlow pip install dir") + +if (NOT TENSORFLOW_AOT_PATH STREQUAL "") + set(LLVM_HAVE_TF_AOT "ON" CACHE BOOL "Tensorflow AOT available") + set(TENSORFLOW_AOT_COMPILER + "${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli" + CACHE PATH "Path to the Tensorflow AOT compiler") + include_directories(${TENSORFLOW_AOT_PATH}/include) + add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src + ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime) +endif() + # Configure the three LLVM configuration header files. configure_file( ${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/config.h.cmake @@ -875,12 +898,6 @@ COMMAND rpmbuild -bs --define '_topdir ${LLVM_SRPM_DIR}' ${LLVM_SRPM_BINARY_SPECFILE}) set_target_properties(srpm PROPERTIES FOLDER "Misc") - -# They are not referenced. See set_output_directory(). -set( CMAKE_RUNTIME_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/bin ) -set( CMAKE_LIBRARY_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) -set( CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${LLVM_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX} ) - if(APPLE AND DARWIN_LTO_LIBRARY) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-lto_library -Wl,${DARWIN_LTO_LIBRARY}") @@ -975,28 +992,6 @@ llvm_replace_compiler_option(CMAKE_CXX_FLAGS_RELEASE "-O3" "-O2") endif() -# For up-to-date instructions for installing the Tensorflow dependency, refer to -# the bot setup script: https://github.com/google/ml-compiler-opt/blob/master/buildbot/buildbot_init.sh -# Specifically, assuming python3 is installed: -# python3 -m pip install --upgrade pip && python3 -m pip install --user tf_nightly==2.3.0.dev20200528 -# Then set TENSORFLOW_AOT_PATH to the package install - usually it's ~/.local/lib/python3.7/site-packages/tensorflow -# -set(TENSORFLOW_AOT_PATH "" CACHE PATH "Path to TensorFlow pip install dir") - -if (NOT TENSORFLOW_AOT_PATH STREQUAL "") - set(LLVM_HAVE_TF_AOT "ON" CACHE BOOL "Tensorflow AOT available") - set(TENSORFLOW_AOT_COMPILER - "${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli" - CACHE PATH "Path to the Tensorflow AOT compiler") - # Unlike the LLVM_HAVE_TF_API case, we don't need to expose this through - # llvm-config.h, because it's an internal implementation detail. A user of the llvm library that wants to also - # use the TF AOT compiler may do so through their custom build step. - add_definitions("-DLLVM_HAVE_TF_AOT") - include_directories(${TENSORFLOW_AOT_PATH}/include) - add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src - ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime) -endif() - # Put this before tblgen. Else we have a circular dependence. add_subdirectory(lib/Demangle) add_subdirectory(lib/Support) diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h --- a/llvm/include/llvm/Analysis/InlineAdvisor.h +++ b/llvm/include/llvm/Analysis/InlineAdvisor.h @@ -14,6 +14,7 @@ #include #include "llvm/Analysis/InlineCost.h" +#include "llvm/Config/config.h" #include "llvm/IR/PassManager.h" namespace llvm {