diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt --- a/llvm/CMakeLists.txt +++ b/llvm/CMakeLists.txt @@ -850,6 +850,8 @@ get_errc_messages(LLVM_LIT_ERRC_MESSAGES) endif() +include(AddLLVM) + # For up-to-date instructions for installing the Tensorflow dependency, refer to # the bot setup script: https://github.com/google/ml-compiler-opt/blob/main/buildbot/buildbot_init.sh # In this case, the latest C API library is available for download from @@ -857,31 +859,25 @@ # We will expose the conditional compilation variable, # LLVM_HAVE_TF_API, through llvm-config.h, so that a user of the LLVM library may # also leverage the dependency. -set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install") -if (TENSORFLOW_C_LIB_PATH) - find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED) - # Currently, the protobuf headers are distributed with the pip package that corresponds to the version - # of the C API library. - find_library(tensorflow_fx tensorflow_framework PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED) +set(TENSORFLOW_SRC_DIR "" CACHE PATH "Path to TensorFlow source") +if (TENSORFLOW_SRC_DIR) + set(TFLITE_ENABLE_XNNPACK OFF) + # find_library(tflite_c tensorflowlite_c PATHS ${TENSORFLOW_SRC_DIR}/build NO_DEFAULT_PATH REQUIRED) + # find_library(tflite tensorflow-lite PATHS ${TENSORFLOW_SRC_DIR}/build/tensorflow-lite NO_DEFAULT_PATH REQUIRED) + # include_directories(${TENSORFLOW_SRC_DIR}/build/abseil-cpp) + # include_directories(${TENSORFLOW_SRC_DIR}/build/eigen) + # include_directories(${TENSORFLOW_SRC_DIR}/build/flatbuffers/include) + message(STATUS "about to") + set(LLVM_EXTERNAL_TFLITE_SOURCE_DIR "${TENSORFLOW_SRC_DIR}/tensorflow/lite") + set(LLVM_TOOL_TFLITE_BUILD On) + add_llvm_external_project(TFLITE) + # install(TARGETS tensorflow-lite EXPORT LLVMExports + # ARCHIVE DESTINATION lib${LLVM_LIBDIR_SUFFIX} COMPONENT tensorflow-lite) + message(STATUS "done") set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available") - include_directories(${TENSORFLOW_C_LIB_PATH}/include) - if (NOT TF_PROTO_HEADERS) - message(STATUS "TF_PROTO_HEADERS not defined. Looking for tensorflow pip package.") - execute_process(COMMAND - ${Python3_EXECUTABLE} "-m" "pip" "show" "tensorflow" - OUTPUT_VARIABLE TF_PIP_OUT) - if ("${TF_PIP_OUT}" STREQUAL "") - message(FATAL ERROR "Tensorflow pip package is also required for 'development' mode (protobuf headers)") - endif() - string(REGEX MATCH "Location: ([^\n]*\n)" TF_PIP_LOC "${TF_PIP_OUT}") - string(REPLACE "Location: " "" TF_PIP ${TF_PIP_LOC}) - string(STRIP ${TF_PIP} TF_PIP) - set(TF_PROTO_HEADERS "${TF_PIP}/tensorflow/include") - endif() - message(STATUS "Using Tensorflow headers under: ${TF_PROTO_HEADERS}") - include_directories(${TF_PROTO_HEADERS}) - add_definitions("-DGOOGLE_PROTOBUF_NO_RTTI") - add_definitions("-D_GLIBCXX_USE_CXX11_ABI=0") + set(Protobuf_USE_STATIC_LIBS ON) + find_package(Protobuf REQUIRED) + include_directories(${Protobuf_INCLUDE_DIRS}) endif() # For up-to-date instructions for installing the Tensorflow dependency, refer to diff --git a/llvm/cmake/modules/TensorFlowCompile.cmake b/llvm/cmake/modules/TensorFlowCompile.cmake --- a/llvm/cmake/modules/TensorFlowCompile.cmake +++ b/llvm/cmake/modules/TensorFlowCompile.cmake @@ -115,3 +115,18 @@ set(MLLinkDeps ${MLLinkDeps} tf_xla_runtime ${GENERATED_OBJS} PARENT_SCOPE) add_definitions(-DLLVM_HAVE_TF_AOT_${fname_allcaps}) endfunction() + +function(build_proto) + foreach (P ${ARGV}) + set(PB_SRCS ${PB_SRCS} ${CMAKE_CURRENT_BINARY_DIR}/${P}.pb.cc) + set(PB_HDRS ${PB_HDRS} ${CMAKE_CURRENT_BINARY_DIR}/${P}.pb.h) + set(PBS ${PBS} ${TENSORFLOW_SRC_DIR}/${P}.proto) + endforeach() + add_custom_command(OUTPUT ${PB_SRCS} ${PB_HDRS} + COMMAND ${PROTOBUF_PROTOC_EXECUTABLE} --proto_path=${TENSORFLOW_SRC_DIR} + --cpp_out=${CMAKE_CURRENT_BINARY_DIR} ${PBS}) + set_source_files_properties(${PB_SRCS} PROPERTIES + GENERATED 1) + set(GeneratedMLSources ${GeneratedMLSources} ${PB_SRCS} PARENT_SCOPE) + set(MLDeps ${MLDeps} ${GeneratedMLSources} PARENT_SCOPE) +endfunction() \ No newline at end of file diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h --- a/llvm/include/llvm/Analysis/Utils/TFUtils.h +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h @@ -210,11 +210,11 @@ /// Get a (const) pointer to the first element of the tensor at Index. template T *getTensorValue(size_t Index) { - return static_cast(getUntypedTensorValue(Index)); + return reinterpret_cast(getUntypedTensorValue(Index)); } template const T *getTensorValue(size_t Index) const { - return static_cast(getUntypedTensorValue(Index)); + return reinterpret_cast(getUntypedTensorValue(Index)); } /// Get a (const) pointer to the untyped data of the tensor. @@ -248,7 +248,7 @@ /// Provides access to the input vector. template T *getInput(size_t Index) { - return static_cast(getUntypedInput(Index)); + return reinterpret_cast(getUntypedInput(Index)); } /// Returns true if the tensorflow model was loaded successfully, false @@ -266,16 +266,16 @@ /// - C++ type /// - enum name (implementation-specific) #define TFUTILS_SUPPORTED_TYPES(M) \ - M(float, TF_FLOAT) \ - M(double, TF_DOUBLE) \ - M(int8_t, TF_INT8) \ - M(uint8_t, TF_UINT8) \ - M(int16_t, TF_INT16) \ - M(uint16_t, TF_UINT16) \ - M(int32_t, TF_INT32) \ - M(uint32_t, TF_UINT32) \ - M(int64_t, TF_INT64) \ - M(uint64_t, TF_UINT64) + M(float, TfLiteType::kTfLiteFloat32) \ + M(double, TfLiteType::kTfLiteFloat64) \ + M(int8_t, TfLiteType::kTfLiteInt8) \ + M(uint8_t, TfLiteType::kTfLiteUInt8) \ + M(int16_t, TfLiteType::kTfLiteInt16) \ + M(uint16_t, TfLiteType::kTfLiteInt16) \ + M(int32_t, TfLiteType::kTfLiteInt32) \ + M(uint32_t, TfLiteType::kTfLiteUInt32) \ + M(int64_t, TfLiteType::kTfLiteInt64) \ + M(uint64_t, TfLiteType::kTfLiteUInt64) #define TFUTILS_GETDATATYPE_DEF(T, E) \ template <> int TensorSpec::getDataType(); diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -21,7 +21,19 @@ endif() if (DEFINED LLVM_HAVE_TF_API) - list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx}) + build_proto( + tensorflow/core/protobuf/error_codes + tensorflow/core/example/feature + tensorflow/core/example/example) + # list(APPEND MLLinkDeps ${tflite} ${tflite_c} ${Protobuf_LIBRARIES} + # ${TENSORFLOW_SRC_DIR}/build/_deps/ruy-build/libruy.a + # ${TENSORFLOW_SRC_DIR}/build/_deps/farmhash-build/libfarmhash.a + # ${TENSORFLOW_SRC_DIR}/build/_deps/flatbuffers-build/libflatbuffers.a + # ${TENSORFLOW_SRC_DIR}/build/_deps/fft2d-build/libfft2d_fftsg.a + # ${TENSORFLOW_SRC_DIR}/build/_deps/fft2d-build/libfft2d_fftsg2d.a + # ) + # include_directories(${TENSORFLOW_SRC_DIR}) + list(APPEND MLLinkDeps tensorflow-lite ${Protobuf_LIBRARIES}) endif() endif() diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp --- a/llvm/lib/Analysis/TFUtils.cpp +++ b/llvm/lib/Analysis/TFUtils.cpp @@ -25,9 +25,12 @@ #include "google/protobuf/struct.pb.h" #include "google/protobuf/text_format.h" -#include "tensorflow/c/c_api.h" -#include "tensorflow/c/c_api_experimental.h" #include "tensorflow/core/example/example.pb.h" +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" +#include "tensorflow/lite/model_builder.h" +#include "tensorflow/lite/op_resolver.h" #include #include @@ -39,42 +42,7 @@ static cl::opt ProtobufTextMode("tfutils-text-log", cl::init(false), cl::Hidden, cl::desc("Output textual (human-readable) protobuf.")); - namespace { - -using TFGraphPtr = std::unique_ptr; -using TFSessionOptionsPtr = - std::unique_ptr; -using TFStatusPtr = std::unique_ptr; - -struct TFInitializer { - TFInitializer() { - assert(!IsInitialized && "TFInitialized should be called only once"); - int Argc = 1; - const char *Name = ""; - const char **NamePtr = &Name; - TF_InitMain(Name, &Argc, const_cast(&NamePtr)); - IsInitialized = true; - } - bool IsInitialized = false; -}; - -llvm::ManagedStatic TFLibInitializer; - -bool ensureInitTF() { return TFLibInitializer->IsInitialized; } - -TFGraphPtr createTFGraph() { - return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph); -} - -TFStatusPtr createTFStatus() { - return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus); -} - -TFSessionOptionsPtr createTFSessionOptions() { - return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions); -} - void serialize(const Message &SE, std::string *OutStr) { if (ProtobufTextMode) { TextFormat::PrintToString(SE, OutStr); @@ -82,31 +50,27 @@ *OutStr = SE.SerializeAsString(); } } -} // namespace namespace llvm { class EvaluationResultImpl { public: - EvaluationResultImpl(size_t OutputSize) - : OutputSize(OutputSize), Output(OutputSize){}; - - ~EvaluationResultImpl() { - for (auto *P : Output) - if (P) - TF_DeleteTensor(P); - } + EvaluationResultImpl(const std::vector &Outputs) + : Outputs(Outputs){}; + const TfLiteTensor *getOutput(size_t I) { return Outputs[I]; } EvaluationResultImpl(const EvaluationResultImpl &) = delete; EvaluationResultImpl(EvaluationResultImpl &&Other) = delete; - std::vector &getOutput() { return Output; } private: - const size_t OutputSize; - std::vector Output; + const std::vector &Outputs; }; size_t TensorSpec::getElementByteSize() const { - return TF_DataTypeSize(static_cast(TypeIndex)); + size_t Size = 0; + if (tflite::GetSizeOfType(nullptr, static_cast(TypeIndex), + &Size) != TfLiteStatus::kTfLiteOk) + return 0; + return Size; } TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex, @@ -158,7 +122,7 @@ SmallVector OutputSpecsPath; StringRef FileName = SpecFileOverride; if (FileName.empty()) { - llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json"); + llvm::sys::path::append(OutputSpecsPath, "/tmp", "output_spec.json"); FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()}; } @@ -223,40 +187,32 @@ size_t OutputSpecsSize, const char *Tags); bool isValid() const { return IsValid; } - size_t OutputSize() const { return OutputFeed.size(); } + size_t OutputSize() const { return Output.size(); } - void evaluate(TF_Tensor **Output, TF_Status *Status) { - TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(), - Input.size(), OutputFeed.data(), Output, OutputFeed.size(), - nullptr, 0, nullptr, Status); + std::unique_ptr evaluate() { + Interpreter->Invoke(); + return std::make_unique(Output); } - void initInput(size_t Index, TF_DataType Type, - const std::vector &Dimensions); - const std::vector &getInput() const { return Input; } + const std::vector &getInput() const { return Input; } ~TFModelEvaluatorImpl(); private: + std::unique_ptr Model; + /// The objects necessary for carrying out an evaluation of the SavedModel. /// They are expensive to set up, and we maintain them accross all the /// evaluations of the model. - TF_Session *Session = nullptr; - TFGraphPtr Graph; - TFSessionOptionsPtr Options; + std::unique_ptr Interpreter; - /// The specification of the input nodes. - std::vector InputFeed; + /// The input tensors. We set up the tensors once and just mutate theirs + /// scalars before each evaluation. The input tensors keep their value after + /// an evaluation. + std::vector Input; - /// The input tensors. They must match by index of the corresponding InputFeed - /// value. We set up the tensors once and just mutate theirs scalars before - /// each evaluation. The input tensors keep their value after an evaluation. - std::vector Input; - - /// The specification of the output nodes. When evaluating, the tensors in the - /// output tensor vector must match by index the corresponding element in the - /// OutputFeed. - std::vector OutputFeed; + /// The output nodes. + std::vector Output; void invalidate() { IsValid = false; } @@ -264,8 +220,8 @@ /// Reusable utility for ensuring we can bind the requested Name to a node in /// the SavedModel Graph. - bool checkReportAndInvalidate(const TF_Output &Output, - const TensorSpec &OutputSpec); + bool checkReportAndInvalidate(const TfLiteTensor *Tensor, + const TensorSpec &Spec); }; class LoggerDataImpl { @@ -367,38 +323,41 @@ StringRef SavedModelPath, const std::vector &InputSpecs, function_ref GetOutputSpecs, size_t OutputSpecsSize, const char *Tags = "serve") - : Graph(createTFGraph()), Options(createTFSessionOptions()), - InputFeed(InputSpecs.size()), Input(InputSpecs.size()), - OutputFeed(OutputSpecsSize) { - if (!ensureInitTF()) { - errs() << "Tensorflow should have been initialized"; + : Input(InputSpecs.size()), Output(OutputSpecsSize) { + tflite::StderrReporter ErrorReporter; + Model = tflite::FlatBufferModel::BuildFromFile(SavedModelPath.str().c_str(), + &ErrorReporter); + if (!Model) { return; } - auto Status = createTFStatus(); + tflite::ops::builtin::BuiltinOpResolver Resolver; + tflite::InterpreterBuilder Builder(*Model, Resolver); + Builder(&Interpreter); - Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr, - SavedModelPath.str().c_str(), &Tags, 1, - Graph.get(), nullptr, Status.get()); - if (TF_GetCode(Status.get()) != TF_Code::TF_OK) { - errs() << TF_Message(Status.get()); + if (!Interpreter || + Interpreter->AllocateTensors() != TfLiteStatus::kTfLiteOk) { invalidate(); + return; } + StringMap InputsMap; + StringMap OutputsMap; + for (size_t I = 0; I < Interpreter->inputs().size(); ++I) + InputsMap[Interpreter->GetInputName(I)] = I; + for (size_t I = 0; I < Interpreter->outputs().size(); ++I) + OutputsMap[Interpreter->GetOutputName(I)] = I; for (size_t I = 0; I < InputSpecs.size(); ++I) { auto &InputSpec = InputSpecs[I]; - InputFeed[I] = { - TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()), - InputSpec.port()}; - if (!checkReportAndInvalidate(InputFeed[I], InputSpec)) + Input[I] = Interpreter->tensor( + InputsMap[InputSpec.name() + ":" + std::to_string(InputSpec.port())]); + if (!checkReportAndInvalidate(Input[I], InputSpec)) return; - initInput(I, static_cast(InputSpec.typeIndex()), - InputSpec.shape()); } for (size_t I = 0; I < OutputSpecsSize; ++I) { auto OutputSpec = GetOutputSpecs(I); - OutputFeed[I] = { - TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()), - OutputSpec.port()}; - if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec)) + Output[I] = Interpreter->output_tensor( + OutputsMap[OutputSpec.name() + ":" + + std::to_string(OutputSpec.port())]); + if (!checkReportAndInvalidate(Output[I], OutputSpec)) return; } } @@ -421,56 +380,25 @@ SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; }, OutputSpecs.size(), Tags) {} -TFModelEvaluatorImpl::~TFModelEvaluatorImpl() { - for (auto *T : Input) { - TF_DeleteTensor(T); - } - if (Session == nullptr) - return; - auto Status = createTFStatus(); - TF_DeleteSession(Session, Status.get()); - Session = nullptr; - if (TF_GetCode(Status.get()) != TF_Code::TF_OK) - errs() << "Could not delete TF session"; -} +TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {} -bool TFModelEvaluatorImpl::checkReportAndInvalidate( - const TF_Output &Output, const TensorSpec &OutputSpec) { - if (Output.oper) - return true; - errs() << "Could not find TF_Output named: " + OutputSpec.name(); - IsValid = false; +bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor, + const TensorSpec &Spec) { + if (!Tensor) { + errs() << "Could not find TF_Output named: " + Spec.name(); + IsValid = false; + } return IsValid; } Optional TFModelEvaluator::evaluate() { if (!isValid()) return None; - std::unique_ptr Ret = - std::make_unique(Impl->OutputSize()); - auto Status = createTFStatus(); - Impl->evaluate(Ret->getOutput().data(), Status.get()); - if (TF_GetCode(Status.get()) != TF_Code::TF_OK) { - errs() << TF_Message(Status.get()); - Impl.reset(); - return None; - } - return EvaluationResult(std::move(Ret)); -} - -void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type, - const std::vector &Dimensions) { - int64_t TotalSize = TF_DataTypeSize(Type); - for (auto &D : Dimensions) - TotalSize *= D; - - Input[Index] = - TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize); - std::memset(TF_TensorData(Input[Index]), 0, TotalSize); + return TFModelEvaluator::EvaluationResult(Impl->evaluate()); } void *TFModelEvaluator::getUntypedInput(size_t Index) { - return TF_TensorData(Impl->getInput()[Index]); + return Impl->getInput()[Index]->data.data; } TFModelEvaluator::EvaluationResult::EvaluationResult( @@ -487,12 +415,12 @@ } void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) { - return TF_TensorData(Impl->getOutput()[Index]); + return Impl->getOutput(Index)->data.data; } const void * TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const { - return TF_TensorData(Impl->getOutput()[Index]); + return Impl->getOutput(Index)->data.data; } #define TFUTILS_GETDATATYPE_IMPL(T, E) \