diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h --- a/llvm/include/llvm/Analysis/InlineAdvisor.h +++ b/llvm/include/llvm/Analysis/InlineAdvisor.h @@ -208,6 +208,12 @@ getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM); #endif +#ifdef LLVM_HAVE_TF_API +std::unique_ptr +getDevelopmentModeAdvisor(Module &M, ModuleAnalysisManager &MAM, + std::function GetDefaultAdvice); +#endif + // Default (manual policy) decision making helper APIs. Shared with the legacy // pass manager inliner. diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt --- a/llvm/lib/Analysis/CMakeLists.txt +++ b/llvm/lib/Analysis/CMakeLists.txt @@ -1,6 +1,9 @@ set(CommonMLSources MLInlineAdvisor.cpp) set(ReleaseModeMLSources ReleaseModeModelRunner.cpp) -set(DevelopmentModeMLSources TFUtils.cpp) +set(DevelopmentModeMLSources + DevelopmentModeInlineAdvisor.cpp + TFUtils.cpp + ) if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API) set(MLPolicySources ${CommonMLSources}) diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp new file mode 100644 --- /dev/null +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -0,0 +1,447 @@ +//===- DevelopmentModeInlineAdvisor.cpp - runtime-loadable model runner --===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a model runner using Tensorflow C APIs, allowing the +// loading of a model from a command line option. +// +//===----------------------------------------------------------------------===// +#include "llvm/Analysis/CallGraph.h" +#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h" +#include "llvm/Analysis/MLInlineAdvisor.h" +#include "llvm/Analysis/Utils/TFUtils.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ManagedStatic.h" + +#include "tensorflow/c/c_api.h" + +#include + +using namespace llvm; + +static cl::opt TrainingLog( + "training-log", cl::Hidden, + cl::desc("Path where the development - mode inlining log is saved.")); + +static cl::opt TFTrainedModelPath( + "ml-inliner-trained-model", cl::Hidden, + cl::desc( + "Path to saved model to use as policy during this training session.")); + +static cl::opt TFFeedPrefix("ml-inliner-trained-model-feed-prefix", + cl::Hidden, cl::init("action_"), + cl::desc("Prefix for feature names.")); + +static cl::opt TFDecisionName( + "ml-inliner-trained-model-decision-name", cl::Hidden, + cl::init("StatefulPartitionedCall"), + cl::desc("Name of the graph operation representing the decision.")); + +namespace { +/// An InlineEvent, used by TrainingLogger. +struct InlineEvent { + /// What the default policy's decision would have been. + bool DefaultDecision = false; + + /// What we advised. When training off the default policy, this is the same as + /// DefaultDecision. + bool AdvisedDecision = false; + + /// What actually happened. This would be 'false' in the case of an inline + /// error, even if AdvisedDecision were true, otherwise it agrees with + /// AdvisedDecision. + bool Effect = false; + + /// What the change in size was: size_after - size_before + int64_t Reward = 0; +}; + +/// Collect data we may use for training a model, and write it as a textual +/// Tensorflow SequenceExample +/// (https://www.tensorflow.org/api_docs/python/tf/train/SequenceExample) +/// protobuf (https://developers.google.com/protocol-buffers). +/// Because this is a protobuf, we cannot just stream the events as they come. +/// Internally, TrainingLogger stores data in column-major format, because that +/// lines up with how TF SequenceExample represents it. +class TrainingLogger final { +public: + TrainingLogger() { + for (size_t I = 0; I < NumberOfFeatures; ++I) { + Features.push_back(InlineFeatures()); + } + } + + /// Log one inlining event. + void logInlineEvent(const InlineEvent &Event, + const MLModelRunner &ModelRunner) { + for (size_t I = 0; I < NumberOfFeatures; ++I) { + Features[I].push_back(ModelRunner.getFeature(I)); + } + Decisions.push_back(Event.AdvisedDecision); + Effects.push_back(Event.Effect); + Rewards.push_back(Event.Reward); + DefaultDecisions.push_back(Event.DefaultDecision); + } + + void printTensor(raw_fd_ostream &OutFile) { + if (DefaultDecisions.empty()) + return; + OutFile << "feature_lists: {\n"; + + for (size_t I = 0; I < Features.size(); I++) { + writeTensor(OutFile, FeatureNameMap.at(I), Features[I]); + } + writeTensor(OutFile, DefaultDecisionName, DefaultDecisions); + writeTensor(OutFile, DecisionName, Decisions); + writeTensor(OutFile, RewardName, Rewards); + + OutFile << "}\n"; + } + +private: + template + void writeTensor(raw_fd_ostream &OutFile, StringRef TensorName, + const std::vector &Tensor) { + OutFile << " feature_list: {\n"; + OutFile << " key: " + << "\"" << TensorName << "\" "; + OutFile << "value: {\n"; + for (const auto &Feature : Tensor) { + OutFile << " feature: { int64_list: { value: [" << Feature + << "] } }\n"; + } + OutFile << " }\n"; + OutFile << " }\n"; + } + + std::vector Features; + std::vector DefaultDecisions; + std::vector Decisions; + std::vector Effects; + std::vector Rewards; + std::vector Mandatory; +}; + +/// An extension of the MLInlineAdvisor which can also produce training logs, +/// and can default to the default policy when we need to produce training logs +/// from it. +class DevelopmentModeMLInlineAdvisor : public MLInlineAdvisor { +public: + DevelopmentModeMLInlineAdvisor( + Module &M, ModuleAnalysisManager &MAM, + std::unique_ptr ModelRunner, + std::function GetDefaultAdvice, bool IsDoingInference); + + size_t getTotalSizeEstimate(); + + virtual ~DevelopmentModeMLInlineAdvisor(); + void updateNativeSizeEstimate(int64_t Change) { CurrentNativeSize += Change; } + void resetNativeSize(Function *F) { + FAM.invalidate(*F); + } + + std::unique_ptr + getMandatoryAdvice(CallBase &CB, OptimizationRemarkEmitter &ORE) override; + std::unique_ptr + getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override; + + size_t getNativeSizeEstimate(const Function &F) const; + +private: + bool isLogging() const { return !TrainingLog.empty(); } + + std::function GetDefaultAdvice; + TrainingLogger Logger; + const bool IsDoingInference; + + const int32_t InitialNativeSize; + int32_t CurrentNativeSize = 0; +}; + +/// A variant of MLInlineAdvice that tracks all non-trivial inlining +/// decisions, for training/logging. +class LoggingMLInlineAdvice : public MLInlineAdvice { +public: + LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB, + OptimizationRemarkEmitter &ORE, bool Recommendation, + TrainingLogger &Logger, size_t CallerSizeEstimateBefore, + size_t CalleeSizeEstimateBefore, bool DefaultDecision) + : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger), + CallerSizeEstimateBefore(CallerSizeEstimateBefore), + CalleeSizeEstimateBefore(CalleeSizeEstimateBefore), + DefaultDecision(DefaultDecision) {} + + virtual ~LoggingMLInlineAdvice() = default; + +private: + DevelopmentModeMLInlineAdvisor *getAdvisor() const { + return static_cast(Advisor); + } + void recordInliningImpl() override { + MLInlineAdvice::recordInliningImpl(); + getAdvisor()->resetNativeSize(Caller); + int Reward = std::numeric_limits::max(); + if (!getAdvisor()->isForcedToStop()) { + int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller) + + CalleeSizeEstimateBefore; + Reward = NativeSizeAfter - + (CallerSizeEstimateBefore + CalleeSizeEstimateBefore); + getAdvisor()->updateNativeSizeEstimate(Reward); + } + log(Reward, /*Success=*/true); + } + + void recordInliningWithCalleeDeletedImpl() override { + MLInlineAdvice::recordInliningWithCalleeDeletedImpl(); + getAdvisor()->resetNativeSize(Caller); + if (!getAdvisor()->isForcedToStop()) { + int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller); + int Reward = NativeSizeAfter - + (CallerSizeEstimateBefore + CalleeSizeEstimateBefore); + getAdvisor()->updateNativeSizeEstimate(Reward); + log(Reward, /*Success=*/true); + } + } + + void recordUnsuccessfulInliningImpl(const InlineResult &Result) override { + MLInlineAdvice::recordUnsuccessfulInliningImpl(Result); + log(NoReward, /*Success=*/false); + } + + void recordUnattemptedInliningImpl() override { + MLInlineAdvice::recordUnattemptedInliningImpl(); + log(NoReward, /*Success=*/false); + } + + void log(int64_t Reward, bool Success) { + InlineEvent Event; + Event.AdvisedDecision = isInliningRecommended(); + Event.DefaultDecision = DefaultDecision; + Event.Effect = Success; + Event.Reward = Reward; + Logger.logInlineEvent(Event, getAdvisor()->getModelRunner()); + } + + static const int64_t NoReward = 0; + TrainingLogger &Logger; + const size_t CallerSizeEstimateBefore; + const size_t CalleeSizeEstimateBefore; + const bool DefaultDecision; +}; + +/// A pseudo model runner. We use it to store feature values when collecting +/// logs for the default policy, but never ask it to 'run'. +class NoInferenceModelRunner : public MLModelRunner { +public: + NoInferenceModelRunner(LLVMContext &Ctx) + : MLModelRunner(Ctx), Features(NumberOfFeatures) {} + void setFeature(FeatureIndex Index, int64_t Value) override { + Features[static_cast(Index)] = Value; + } + + int64_t getFeature(int Index) const override { return Features[Index]; } + bool run() override { + llvm_unreachable("We shouldn't call run on this model runner."); + } + +private: + InlineFeatures Features; +}; + +/// DynamicModelRunner - training mode implementation. It uses TF C APIs to +/// dynamically load and evaluate a TF SavedModel. Runtime performance is +/// sacrificed for ease of use while training. +class DynamicModelRunner final : public MLModelRunner { +public: + DynamicModelRunner(LLVMContext &Ctx, const std::string &ModelPath); + + bool run() override; + + // Disallows copy and assign. + DynamicModelRunner(const DynamicModelRunner &) = delete; + DynamicModelRunner &operator=(const DynamicModelRunner &) = delete; + + void setFeature(FeatureIndex Index, int64_t Value) override; + int64_t getFeature(int Index) const override; + bool isValid() const { return !!Evaluator; } + +private: + std::unique_ptr Evaluator; + + // The training framework needs some additional features, that just need to + // be set to 0. + struct TensorSpec { + std::string Name; + TF_DataType Type; + }; + + const std::vector TrainingOnlyFeatures{ + {"inlining_default", TF_INT64}, + {"discount", TF_FLOAT}, + {"reward", TF_FLOAT}, + {"step_type", TF_INT32}}; +}; +} // namespace + +DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor( + Module &M, ModuleAnalysisManager &MAM, + std::unique_ptr ModelRunner, + std::function GetDefaultAdvice, bool IsDoingInference) + : MLInlineAdvisor(M, MAM, std::move(ModelRunner)), + GetDefaultAdvice(GetDefaultAdvice), IsDoingInference(IsDoingInference), + InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0), + CurrentNativeSize(InitialNativeSize) { + // We cannot have the case of neither inference nor logging. + assert(IsDoingInference || isLogging()); +} + +DevelopmentModeMLInlineAdvisor::~DevelopmentModeMLInlineAdvisor() { + if (TrainingLog.empty()) + return; + std::error_code ErrorCode; + raw_fd_ostream OutFile(TrainingLog, ErrorCode); + Logger.printTensor(OutFile); +} + +size_t +DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const { + auto &R = + FAM.getResult(const_cast(F)); + if (!R) { + F.getParent()->getContext().emitError( + "Native size estimator is not present."); + return 0; + } + return *R; +} + +std::unique_ptr +DevelopmentModeMLInlineAdvisor::getMandatoryAdvice( + CallBase &CB, OptimizationRemarkEmitter &ORE) { + if (!isLogging()) + return MLInlineAdvisor::getMandatoryAdvice(CB, ORE); + return std::make_unique( + /*Advisor=*/this, + /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/true, /*Logger=*/Logger, + /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()), + /*CalleeSizeEstimateBefore=*/ + getNativeSizeEstimate(*CB.getCalledFunction()), + /*DefaultDecision=*/true); +} + +std::unique_ptr +DevelopmentModeMLInlineAdvisor::getAdviceFromModel( + CallBase &CB, OptimizationRemarkEmitter &ORE) { + if (IsDoingInference && !isLogging()) + return MLInlineAdvisor::getAdviceFromModel(CB, ORE); + + bool DefaultAdvice = GetDefaultAdvice(CB); + auto Recommendation = IsDoingInference ? ModelRunner->run() : DefaultAdvice; + return std::make_unique( + /*Advisor=*/this, + /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation, + /*Logger=*/Logger, + /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()), + /*CalleeSizeEstimateBefore=*/ + getNativeSizeEstimate(*CB.getCalledFunction()), + /*DefaultDecision=*/DefaultAdvice); +} + +size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() { + size_t Ret = 0; + for (auto &F : M) { + if (F.isDeclaration()) + continue; + if (isFunctionDeleted(&F)) + continue; + Ret += getNativeSizeEstimate(F); + } + return Ret; +} + +DynamicModelRunner::DynamicModelRunner(LLVMContext &Ctx, + const std::string &ModelPath) + : MLModelRunner(Ctx) { + std::vector InputNames; + std::vector OutputNames; + for (size_t I = 0; I < NumberOfFeatures; ++I) + InputNames.push_back(TFFeedPrefix + FeatureNameMap[I]); + for (size_t I = 0; I < TrainingOnlyFeatures.size(); ++I) + InputNames.push_back(TFFeedPrefix + TrainingOnlyFeatures[I].Name); + OutputNames.push_back(TFDecisionName); + + Evaluator = + std::make_unique(ModelPath, InputNames, OutputNames); + if (!Evaluator || !Evaluator->isValid()) { + Ctx.emitError("Failed to create inliner saved model evaluator"); + Evaluator.reset(); + return; + } + + static const std::vector Dim{1}; + + size_t InputIndex = 0; + for (; InputIndex < NumberOfFeatures; ++InputIndex) { + Evaluator->initInput(InputIndex, TF_INT64, Dim); + } + + for (; InputIndex < Evaluator->getInput().size(); ++InputIndex) { + auto TFType = TrainingOnlyFeatures[InputIndex - NumberOfFeatures].Type; + Evaluator->initInput(InputIndex, TFType, Dim); + } +} + +bool DynamicModelRunner::run() { + auto ER = Evaluator->evaluate(); + if (!ER.hasValue()) { + Ctx.emitError("Error evaluating model."); + return false; + } + int64_t Decision = *ER->getTensorValue(0); + return static_cast(Decision); +} + +int64_t DynamicModelRunner::getFeature(int Index) const { + return *static_cast(TF_TensorData(Evaluator->getInput()[Index])); +} + +void DynamicModelRunner::setFeature(FeatureIndex Index, int64_t Value) { + size_t NumericIndex = static_cast(Index); + *(static_cast( + TF_TensorData(Evaluator->getInput()[NumericIndex]))) = Value; +} + +std::unique_ptr llvm::getDevelopmentModeAdvisor( + Module &M, ModuleAnalysisManager &MAM, + std::function GetDefaultAdvice) { + auto &Ctx = M.getContext(); + if (TrainingLog.empty() != + !InlineSizeEstimatorAnalysis::isEvaluatorRequested()) { + Ctx.emitError("For development mode, if training logs are requested, then " + "a size estimator must be available; either that, or neither " + "are specified."); + return nullptr; + } + + std::unique_ptr Runner; + + bool DoesInference = false; + if (TFTrainedModelPath.empty()) + Runner.reset(new NoInferenceModelRunner(Ctx)); + else { + Runner = std::make_unique(Ctx, TFTrainedModelPath); + if (!Runner) { + Ctx.emitError("Could not load the policy model from the provided path"); + return nullptr; + } + DoesInference = true; + } + return std::make_unique( + M, MAM, std::move(Runner), GetDefaultAdvice, DoesInference); +} \ No newline at end of file diff --git a/llvm/lib/Analysis/InlineAdvisor.cpp b/llvm/lib/Analysis/InlineAdvisor.cpp --- a/llvm/lib/Analysis/InlineAdvisor.cpp +++ b/llvm/lib/Analysis/InlineAdvisor.cpp @@ -160,7 +160,13 @@ Advisor.reset(new DefaultInlineAdvisor(FAM, Params)); break; case InliningAdvisorMode::Development: - // To be added subsequently under conditional compilation. +#ifdef LLVM_HAVE_TF_API + Advisor = + llvm::getDevelopmentModeAdvisor(M, MAM, [&FAM, Params](CallBase &CB) { + auto OIC = getDefaultInlineAdvice(CB, FAM, Params); + return OIC.hasValue(); + }); +#endif break; case InliningAdvisorMode::Release: #ifdef LLVM_HAVE_TF_AOT diff --git a/llvm/test/Bindings/Go/lit.local.cfg b/llvm/test/Bindings/Go/lit.local.cfg --- a/llvm/test/Bindings/Go/lit.local.cfg +++ b/llvm/test/Bindings/Go/lit.local.cfg @@ -9,7 +9,7 @@ if not config.root.include_go_tests: config.unsupported = True -if config.have_tf_aot: +if config.have_tf_aot or config.have_tf_api: config.unsupported = True def find_executable(executable, path=None): diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -16,6 +16,7 @@ LLVM_ENABLE_PLUGINS LLVM_BYE_LINK_INTO_TOOLS LLVM_HAVE_TF_AOT + LLVM_HAVE_TF_API ) configure_lit_site_cfg( diff --git a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll @@ -0,0 +1,45 @@ +; Test behavior when inlining policy grows size out of control. +; In all cases, the end result is the same: mandatory inlinings must happen. +; However, when we discover we 'trip' over the artificially-low size increase +; factor, we penalize the 'bad' decision. +; REQUIRES: have_tf_api +; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-trained-model=%S/../../../../lib/Analysis/models/inliner -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS +; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-trained-model=%S/../../../../lib/Analysis/models/inliner -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOUNDS + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-grtev4-linux-gnu" + +declare i64 @f1() + +define i64 @f2() #0 { + %r = call i64 @f1() + %r2 = add i64 13, %r + ret i64 %r2 +} + +define i64 @some_function() { + %r = call i64 @f1() + %r2 = add i64 13, %r + ret i64 %r2 +} + +define i64 @top() { + %r = call i64 @f2() + %r2 = call i64 @some_function() + %r3 = add i64 %r, %r2 + ret i64 %r3 +} + +attributes #0 = { alwaysinline } +; CHECK: key: "delta_size" value: { +; NOBOUNDS-NEXT: feature: { int64_list: { value: [10] } } +; NOBOUNDS-NEXT: feature: { int64_list: { value: [6] } } +; BOUNDS-NEXT: feature: { int64_list: { value: [2147483647] } } +; CHECK-NEXT: } +; CHECK-LABEL: @top +; f2 must always be inlined, so we won't find a call to it in @top() +; CHECK-NOT: call i64 @f2 +; @some-function isn't mandatory, and when we set the increase threshold too low, +; it won't be inlined. +; NOBOUNDS-NOT: @some_function +; BOUNDS: call i64 @some_function \ No newline at end of file diff --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll @@ -0,0 +1,51 @@ +; Test that we can produce a log if we have or do not have a model, in development mode. +; REQUIRES: have_tf_api +; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-trained-model=%S/../../../../lib/Analysis/models/inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s +; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-linux-gnu" + +declare i32 @f1(i32) +declare i32 @f2(i32) + +define dso_local i32 @branches(i32) { + %cond = icmp slt i32 %0, 3 + br i1 %cond, label %then, label %else + +then: + %ret.1 = call i32 @f1(i32 %0) + br label %last.block + +else: + %ret.2 = call i32 @f2(i32 %0) + br label %last.block + +last.block: + %ret = phi i32 [%ret.1, %then], [%ret.2, %else] + ret i32 %ret +} + +define dso_local i32 @top() { + %1 = call i32 @branches(i32 2) + ret i32 %1 +} + + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{!"clang version 7.0.0-6 (tags/RELEASE_700/final)"} + +; Check we produce a protobuf that has inlining decisions and rewards. +; CHECK: feature_lists: { +; CHECK: key: "inlining_decision" value: { +; CHECK-NEXT: feature: { int64_list: { value: [1] } } +; CHECK-NEXT: } +; CHECK-NEXT: } +; CHECK-NEXT: feature_list: { +; CHECK-NEXT: key: "delta_size" value: { +; CHECK-NEXT: feature: { int64_list: { value: [0] } } +; CHECK-NEXT: } +; CHECK-NEXT: } \ No newline at end of file diff --git a/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll @@ -0,0 +1,10 @@ +; The default inliner doesn't elide @adder, it believes it's too costly to inline +; adder into switcher. The ML inliner carries out that inlining, resulting in +; a smaller result (part of it is that adder gets elided). +; +; This test uses Inputs/test-module.ll, as it shares it with a similar test +; for the 'release' mode. +; +; REQUIRES: have_tf_api +; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT +; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-trained-model=%S/../../../../lib/Analysis/models/inliner -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK diff --git a/llvm/test/Transforms/Inline/inlining-advisor-default.ll b/llvm/test/Transforms/Inline/inlining-advisor-default.ll --- a/llvm/test/Transforms/Inline/inlining-advisor-default.ll +++ b/llvm/test/Transforms/Inline/inlining-advisor-default.ll @@ -1,9 +1,10 @@ ; Check that, in the absence of dependencies, we emit an error message when ; trying to use ML-driven inlining. ; REQUIRES: !have_tf_aot +; REQUIRES: !have_tf_api ; RUN: not opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -S < %s 2>&1 | FileCheck %s ; RUN: not opt -passes=scc-oz-module-inliner -enable-ml-inliner=release -S < %s 2>&1 | FileCheck %s declare i64 @f1() ; CHECK: Could not setup Inlining Advisor for the requested mode and/or options \ No newline at end of file diff --git a/llvm/test/lit.cfg.py b/llvm/test/lit.cfg.py --- a/llvm/test/lit.cfg.py +++ b/llvm/test/lit.cfg.py @@ -222,6 +222,9 @@ if config.have_tf_aot: config.available_features.add("have_tf_aot") +if config.have_tf_api: + config.available_features.add("have_tf_api") + def have_cxx_shared_library(): readobj_exe = lit.util.which('llvm-readobj', config.llvm_tools_dir) if not readobj_exe: diff --git a/llvm/test/lit.site.cfg.py.in b/llvm/test/lit.site.cfg.py.in --- a/llvm/test/lit.site.cfg.py.in +++ b/llvm/test/lit.site.cfg.py.in @@ -49,6 +49,7 @@ config.has_plugins = @LLVM_ENABLE_PLUGINS@ config.linked_bye_extension = @LLVM_BYE_LINK_INTO_TOOLS@ config.have_tf_aot = @LLVM_HAVE_TF_AOT@ +config.have_tf_api = @LLVM_HAVE_TF_API@ # Support substitution of the tools_dir with user parameters. This is # used when we can't determine the tool dir at configuration time.