Index: include/llvm/LTO/Caching.h =================================================================== --- include/llvm/LTO/Caching.h +++ include/llvm/LTO/Caching.h @@ -7,92 +7,29 @@ // //===----------------------------------------------------------------------===// // -// This file defines the lto::CacheObjectOutput data structure, which allows -// clients to add a filesystem cache to ThinLTO +// This file defines the localCache function, which allows clients to add a +// filesystem cache to ThinLTO. // //===----------------------------------------------------------------------===// #ifndef LLVM_LTO_CACHING_H #define LLVM_LTO_CACHING_H -#include "llvm/ADT/SmallString.h" -#include "llvm/LTO/Config.h" -#include "llvm/Support/MemoryBuffer.h" +#include "llvm/LTO/LTO.h" +#include namespace llvm { namespace lto { -/// Type for client-supplied callback when a buffer is loaded from the cache. -typedef std::function AddBufferFn; -/// Manage caching on the filesystem. +/// This type defines the callback to add a pre-existing native object file +/// (e.g. in a cache). /// -/// The general scheme is the following: -/// -/// void do_stuff(AddBufferFn CallBack) { -/// /* ... */ -/// { -/// /* Create the CacheObjectOutput pointing to a cache directory */ -/// auto Output = CacheObjectOutput("/tmp/cache", CallBack) -/// -/// /* Call some processing function */ -/// process(Output); -/// -/// } /* Callback is only called now, on destruction of the Output object */ -/// /* ... */ -/// } -/// -/// -/// void process(NativeObjectOutput &Output) { -/// /* check if caching is supported */ -/// if (Output.isCachingEnabled()) { -/// auto Key = ComputeKeyForEntry(...); // "expensive" call -/// if (Output.tryLoadFromCache()) -/// return; // Cache hit -/// } -/// -/// auto OS = Output.getStream(); -/// -/// OS << ...; -/// /* Note that the callback is not called here, but only when the caller -/// destroys Output */ -/// } -/// -class CacheObjectOutput : public NativeObjectOutput { - /// Path to the on-disk cache directory - StringRef CacheDirectoryPath; - /// Path to this entry in the cache, initialized by tryLoadFromCache(). - SmallString<128> EntryPath; - /// Path to temporary file used to buffer output that will be committed to the - /// cache entry when this object is destroyed - SmallString<128> TempFilename; - /// User-supplied callback, used to provide path to cache entry - /// (potentially after creating it). - AddBufferFn AddBuffer; - -public: - /// The destructor pulls the entry from the cache and calls the AddBuffer - /// callback, after committing the entry into the cache on miss. - ~CacheObjectOutput(); - - /// Create a CacheObjectOutput: the client is supposed to create it in the - /// callback supplied to LTO::run. The \p CacheDirectoryPath points to the - /// directory on disk where to store the cache, and \p AddBuffer will be - /// called when the buffer is ready to be pulled out of the cache - /// (potentially after creating it). - CacheObjectOutput(StringRef CacheDirectoryPath, AddBufferFn AddBuffer) - : CacheDirectoryPath(CacheDirectoryPath), AddBuffer(AddBuffer) {} - - /// Return an allocated stream for the output, or null in case of failure. - std::unique_ptr getStream() override; - - /// Set EntryPath, try loading from a possible cache first, return true on - /// cache hit. - bool tryLoadFromCache(StringRef Key) override; +/// File callbacks must be thread safe. +typedef std::function AddFileFn; - /// Returns true to signal that this implementation of NativeObjectFile - /// support caching. - bool isCachingEnabled() const override { return true; } -}; +/// Create a local file system cache which uses the given cache directory and +/// file callback. +NativeObjectCache localCache(std::string CacheDirectoryPath, AddFileFn AddFile); } // namespace lto } // namespace llvm Index: include/llvm/LTO/Config.h =================================================================== --- include/llvm/LTO/Config.h +++ include/llvm/LTO/Config.h @@ -30,38 +30,6 @@ namespace lto { -/// Abstract class representing a single Task output to be implemented by the -/// client of the LTO API. -/// -/// The general scheme the API is called is the following: -/// -/// void process(NativeObjectOutput &Output) { -/// /* check if caching is supported */ -/// if (Output.isCachingEnabled()) { -/// auto Key = ComputeKeyForEntry(...); // "expensive" call -/// if (Output.tryLoadFromCache()) -/// return; // Cache hit -/// } -/// -/// auto OS = Output.getStream(); -/// -/// OS << ....; -/// } -/// -class NativeObjectOutput { -public: - // Return an allocated stream for the output, or null in case of failure. - virtual std::unique_ptr getStream() = 0; - - // Try loading from a possible cache first, return true on cache hit. - virtual bool tryLoadFromCache(StringRef Key) { return false; } - - // Returns true if a cache is available - virtual bool isCachingEnabled() const { return false; } - - virtual ~NativeObjectOutput() = default; -}; - /// LTO configuration. A linker can configure LTO by setting fields in this data /// structure and passing it to the lto::LTO constructor. struct Config { @@ -235,13 +203,6 @@ bool UseInputModulePath = false); }; -/// This type defines the callback to add a native object that is generated on -/// the fly. -/// -/// Output callbacks must be thread safe. -typedef std::function(unsigned Task)> - AddOutputFn; - /// A derived class of LLVMContext that initializes itself according to a given /// Config object. The purpose of this class is to tie ownership of the /// diagnostic handler to the context, as opposed to the Config object (which Index: include/llvm/LTO/LTO.h =================================================================== --- include/llvm/LTO/LTO.h +++ include/llvm/LTO/LTO.h @@ -247,13 +247,47 @@ } }; +/// This class wraps an output stream for a native object. Most clients should +/// just be able to return an instance of this base class from the stream +/// callback, but if a client needs to perform some action after the stream is +/// written to, that can be done by deriving from this class and overriding the +/// destructor. +class NativeObjectStream { +public: + NativeObjectStream(std::unique_ptr OS) : OS(std::move(OS)) {} + std::unique_ptr OS; + virtual ~NativeObjectStream() = default; +}; + +/// This type defines the callback to add a native object that is generated on +/// the fly. +/// +/// Stream callbacks must be thread safe. +typedef std::function(unsigned Task)> + AddStreamFn; + +/// This is the type of a native object cache. To request an item from the +/// cache, pass a unique string as the Key. For hits, the cached file will be +/// added to the link and this function will return AddStreamFn(). For misses, +/// the cache will return a stream callback which must be called at most once to +/// produce content for the stream. The native object stream produced by the +/// stream callback will add the file to the link after the stream is written +/// to. +/// +/// Clients generally look like this: +/// +/// if (AddStreamFn AddStream = Cache(Task, Key)) +/// ProduceContent(AddStream); +typedef std::function + NativeObjectCache; + /// A ThinBackend defines what happens after the thin-link phase during ThinLTO. /// The details of this type definition aren't important; clients can only /// create a ThinBackend using one of the create*ThinBackend() functions below. typedef std::function( Config &C, ModuleSummaryIndex &CombinedIndex, StringMap &ModuleToDefinedGVSummaries, - AddOutputFn AddOutput)> + AddStreamFn AddStream, NativeObjectCache Cache)> ThinBackend; /// This ThinBackend runs the individual backend jobs in-process. @@ -286,8 +320,9 @@ /// and pass it and an array of symbol resolutions to the add() function. /// - Call the getMaxTasks() function to get an upper bound on the number of /// native object files that LTO may add to the link. -/// - Call the run() function. This function will use the supplied AddOutput -/// function to add up to getMaxTasks() native object files to the link. +/// - Call the run() function. This function will use the supplied AddStream +/// and Cache functions to add up to getMaxTasks() native object files to +/// the link. class LTO { friend InputFile; @@ -310,9 +345,15 @@ /// full description of tasks see LTOBackend.h. unsigned getMaxTasks() const; - /// Runs the LTO pipeline. This function calls the supplied AddOutput function - /// to add native object files to the link. - Error run(AddOutputFn AddOutput); + /// Runs the LTO pipeline. This function calls the supplied AddStream + /// function to add native object files to the link. + /// + /// The Cache parameter is optional. If supplied, it will be used to cache + /// native object files and add them to the link. + /// + /// The client will receive at most one callback (via either AddStream or + /// Cache) for each task identifier. + Error run(AddStreamFn AddStream, NativeObjectCache Cache = nullptr); private: Config Conf; @@ -393,8 +434,9 @@ Error addThinLTO(std::unique_ptr Input, ArrayRef Res); - Error runRegularLTO(AddOutputFn AddOutput); - Error runThinLTO(AddOutputFn AddOutput, bool HasRegularLTO); + Error runRegularLTO(AddStreamFn AddStream); + Error runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache, + bool HasRegularLTO); mutable bool CalledGetMaxTasks = false; }; Index: include/llvm/LTO/LTOBackend.h =================================================================== --- include/llvm/LTO/LTOBackend.h +++ include/llvm/LTO/LTOBackend.h @@ -20,7 +20,7 @@ #include "llvm/ADT/MapVector.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/ModuleSummaryIndex.h" -#include "llvm/LTO/Config.h" +#include "llvm/LTO/LTO.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Transforms/IPO/FunctionImport.h" @@ -34,12 +34,12 @@ namespace lto { /// Runs a regular LTO backend. -Error backend(Config &C, AddOutputFn AddStream, +Error backend(Config &C, AddStreamFn AddStream, unsigned ParallelCodeGenParallelismLevel, std::unique_ptr M); /// Runs a ThinLTO backend. -Error thinBackend(Config &C, unsigned Task, AddOutputFn AddStream, Module &M, +Error thinBackend(Config &C, unsigned Task, AddStreamFn AddStream, Module &M, ModuleSummaryIndex &CombinedIndex, const FunctionImporter::ImportMapTy &ImportList, const GVSummaryMapTy &DefinedGlobals, Index: lib/LTO/Caching.cpp =================================================================== --- lib/LTO/Caching.cpp +++ lib/LTO/Caching.cpp @@ -12,13 +12,9 @@ //===----------------------------------------------------------------------===// #include "llvm/LTO/Caching.h" - -#ifdef HAVE_LLVM_REVISION -#include "LLVMLTORevision.h" -#endif - #include "llvm/ADT/StringExtras.h" #include "llvm/Support/FileSystem.h" +#include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" @@ -30,6 +26,8 @@ auto EC = sys::fs::rename(TempFilename, EntryPath); if (EC) { // Renaming failed, probably not the same filesystem, copy and delete. + // FIXME: Avoid needing to do this by creating the temporary file in the + // cache directory. { auto ReloadedBufferOrErr = MemoryBuffer::getFile(TempFilename); if (auto EC = ReloadedBufferOrErr.getError()) @@ -48,51 +46,54 @@ } } -CacheObjectOutput::~CacheObjectOutput() { - if (EntryPath.empty()) - // The entry was never used by the client (tryLoadFromCache() wasn't called) - return; - // TempFilename is only set if getStream() was called, i.e. on cache miss when - // tryLoadFromCache() returned false. And EntryPath is valid if a Key was - // submitted, otherwise it has been set to CacheDirectoryPath in - // tryLoadFromCache. - if (!TempFilename.empty()) { - if (EntryPath == CacheDirectoryPath) - // The Key supplied to tryLoadFromCache was empty, do not commit the temp. - EntryPath = TempFilename; - else - // We commit the tempfile into the cache now, by moving it to EntryPath. - commitEntry(TempFilename, EntryPath); - } - // Supply the cache path to the user. - AddBuffer(EntryPath.str()); -} +NativeObjectCache lto::localCache(std::string CacheDirectoryPath, + AddFileFn AddFile) { + return [=](unsigned Task, StringRef Key) -> AddStreamFn { + // First, see if we have a cache hit. + SmallString<64> EntryPath; + sys::path::append(EntryPath, CacheDirectoryPath, Key); + if (sys::fs::exists(EntryPath)) { + AddFile(Task, EntryPath); + return AddStreamFn(); + } -// Return an allocated stream for the output, or null in case of failure. -std::unique_ptr CacheObjectOutput::getStream() { - assert(!EntryPath.empty() && "API Violation: client didn't call " - "tryLoadFromCache() before getStream()"); - // Write to a temporary to avoid race condition - int TempFD; - std::error_code EC = - sys::fs::createTemporaryFile("Thin", "tmp.o", TempFD, TempFilename); - if (EC) { - errs() << "Error: " << EC.message() << "\n"; - report_fatal_error("ThinLTO: Can't get a temporary file"); - } - return llvm::make_unique(TempFD, /* ShouldClose */ true); -} + // This native object stream is responsible for commiting the resulting + // file to the cache and calling AddFile to add it to the link. + struct CacheStream : NativeObjectStream { + AddFileFn AddFile; + std::string TempFilename; + std::string EntryPath; + unsigned Task; -// Try loading from a possible cache first, return true on cache hit. -bool CacheObjectOutput::tryLoadFromCache(StringRef Key) { - assert(!CacheDirectoryPath.empty() && - "CacheObjectOutput was initialized without a cache path"); - if (Key.empty()) { - // Client didn't compute a valid key. EntryPath has been set to - // CacheDirectoryPath. - EntryPath = CacheDirectoryPath; - return false; - } - sys::path::append(EntryPath, CacheDirectoryPath, Key); - return sys::fs::exists(EntryPath); + CacheStream(std::unique_ptr OS, AddFileFn AddFile, + std::string TempFilename, std::string EntryPath, + unsigned Task) + : NativeObjectStream(std::move(OS)), AddFile(AddFile), + TempFilename(TempFilename), EntryPath(EntryPath), Task(Task) {} + + ~CacheStream() { + // Make sure the file is closed before committing it. + OS.reset(); + commitEntry(TempFilename, EntryPath); + AddFile(Task, EntryPath); + } + }; + + return [=](size_t Task) -> std::unique_ptr { + // Write to a temporary to avoid race condition + int TempFD; + SmallString<64> TempFilename; + std::error_code EC = + sys::fs::createTemporaryFile("Thin", "tmp.o", TempFD, TempFilename); + if (EC) { + errs() << "Error: " << EC.message() << "\n"; + report_fatal_error("ThinLTO: Can't get a temporary file"); + } + + // This CacheStream will move the temporary file into the cache when done. + return make_unique( + llvm::make_unique(TempFD, /* ShouldClose */ true), + AddFile, TempFilename.str(), EntryPath.str(), Task); + }; + }; } Index: lib/LTO/LTO.cpp =================================================================== --- lib/LTO/LTO.cpp +++ lib/LTO/LTO.cpp @@ -409,19 +409,19 @@ return RegularLTO.ParallelCodeGenParallelismLevel + ThinLTO.ModuleMap.size(); } -Error LTO::run(AddOutputFn AddOutput) { +Error LTO::run(AddStreamFn AddStream, NativeObjectCache Cache) { // Save the status of having a regularLTO combined module, as // this is needed for generating the ThinLTO Task ID, and // the CombinedModule will be moved at the end of runRegularLTO. bool HasRegularLTO = RegularLTO.CombinedModule != nullptr; // Invoke regular LTO if there was a regular LTO module to start with. if (HasRegularLTO) - if (auto E = runRegularLTO(AddOutput)) + if (auto E = runRegularLTO(AddStream)) return E; - return runThinLTO(AddOutput, HasRegularLTO); + return runThinLTO(AddStream, Cache, HasRegularLTO); } -Error LTO::runRegularLTO(AddOutputFn AddOutput) { +Error LTO::runRegularLTO(AddStreamFn AddStream) { // Make sure commons have the right size/alignment: we kept the largest from // all the prevailing when adding the inputs, and we apply it here. const DataLayout &DL = RegularLTO.CombinedModule->getDataLayout(); @@ -478,7 +478,7 @@ !Conf.PostInternalizeModuleHook(0, *RegularLTO.CombinedModule)) return Error(); } - return backend(Conf, AddOutput, RegularLTO.ParallelCodeGenParallelismLevel, + return backend(Conf, AddStream, RegularLTO.ParallelCodeGenParallelismLevel, std::move(RegularLTO.CombinedModule)); } @@ -507,7 +507,8 @@ class InProcessThinBackend : public ThinBackendProc { ThreadPool BackendThreadPool; - AddOutputFn AddOutput; + AddStreamFn AddStream; + NativeObjectCache Cache; Optional Err; std::mutex ErrMu; @@ -517,42 +518,40 @@ Config &Conf, ModuleSummaryIndex &CombinedIndex, unsigned ThinLTOParallelismLevel, const StringMap &ModuleToDefinedGVSummaries, - AddOutputFn AddOutput) + AddStreamFn AddStream, NativeObjectCache Cache) : ThinBackendProc(Conf, CombinedIndex, ModuleToDefinedGVSummaries), BackendThreadPool(ThinLTOParallelismLevel), - AddOutput(std::move(AddOutput)) {} + AddStream(std::move(AddStream)), Cache(std::move(Cache)) {} Error runThinLTOBackendThread( - AddOutputFn AddOutput, unsigned Task, MemoryBufferRef MBRef, - ModuleSummaryIndex &CombinedIndex, + AddStreamFn AddStream, NativeObjectCache Cache, unsigned Task, + MemoryBufferRef MBRef, ModuleSummaryIndex &CombinedIndex, const FunctionImporter::ImportMapTy &ImportList, const FunctionImporter::ExportSetTy &ExportList, const std::map &ResolvedODR, const GVSummaryMapTy &DefinedGlobals, MapVector &ModuleMap) { + auto RunThinBackend = [&](AddStreamFn AddStream) { + LTOLLVMContext BackendContext(Conf); + ErrorOr> MOrErr = + parseBitcodeFile(MBRef, BackendContext); + assert(MOrErr && "Unable to load module in thread?"); + + return thinBackend(Conf, Task, AddStream, **MOrErr, CombinedIndex, + ImportList, DefinedGlobals, ModuleMap); + }; - auto ModuleIdentifier = MBRef.getBufferIdentifier(); - auto Output = AddOutput(Task); - if (Output->isCachingEnabled()) { - SmallString<40> Key; - // The module may be cached, this helps handling it. - computeCacheKey(Key, CombinedIndex, ModuleIdentifier, ImportList, - ExportList, ResolvedODR, DefinedGlobals); - if (Output->tryLoadFromCache(Key)) - return Error(); - } + if (!Cache) + return RunThinBackend(AddStream); - LTOLLVMContext BackendContext(Conf); - ErrorOr> MOrErr = - parseBitcodeFile(MBRef, BackendContext); - assert(MOrErr && "Unable to load module in thread?"); + SmallString<40> Key; + // The module may be cached, this helps handling it. + computeCacheKey(Key, CombinedIndex, MBRef.getBufferIdentifier(), + ImportList, ExportList, ResolvedODR, DefinedGlobals); + if (AddStreamFn CacheAddStream = Cache(Task, Key)) + return RunThinBackend(CacheAddStream); - auto AddOutputWrapper = [&](unsigned TaskId) { - assert(Task == TaskId && "Unexpexted TaskId mismatch"); - return std::move(Output); - }; - return thinBackend(Conf, Task, AddOutputWrapper, **MOrErr, CombinedIndex, - ImportList, DefinedGlobals, ModuleMap); + return Error(); } Error start( @@ -574,8 +573,8 @@ const GVSummaryMapTy &DefinedGlobals, MapVector &ModuleMap) { Error E = runThinLTOBackendThread( - AddOutput, Task, MBRef, CombinedIndex, ImportList, ExportList, - ResolvedODR, DefinedGlobals, ModuleMap); + AddStream, Cache, Task, MBRef, CombinedIndex, ImportList, + ExportList, ResolvedODR, DefinedGlobals, ModuleMap); if (E) { std::unique_lock L(ErrMu); if (Err) @@ -602,10 +601,10 @@ ThinBackend lto::createInProcessThinBackend(unsigned ParallelismLevel) { return [=](Config &Conf, ModuleSummaryIndex &CombinedIndex, const StringMap &ModuleToDefinedGVSummaries, - AddOutputFn AddOutput) { + AddStreamFn AddStream, NativeObjectCache Cache) { return llvm::make_unique( Conf, CombinedIndex, ParallelismLevel, ModuleToDefinedGVSummaries, - AddOutput); + AddStream, Cache); }; } @@ -693,14 +692,15 @@ std::string LinkedObjectsFile) { return [=](Config &Conf, ModuleSummaryIndex &CombinedIndex, const StringMap &ModuleToDefinedGVSummaries, - AddOutputFn AddOutput) { + AddStreamFn AddStream, NativeObjectCache Cache) { return llvm::make_unique( Conf, CombinedIndex, ModuleToDefinedGVSummaries, OldPrefix, NewPrefix, ShouldEmitImportsFiles, LinkedObjectsFile); }; } -Error LTO::runThinLTO(AddOutputFn AddOutput, bool HasRegularLTO) { +Error LTO::runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache, + bool HasRegularLTO) { if (ThinLTO.ModuleMap.empty()) return Error(); @@ -759,8 +759,9 @@ thinLTOResolveWeakForLinkerInIndex(ThinLTO.CombinedIndex, isPrevailing, recordNewLinkage); - std::unique_ptr BackendProc = ThinLTO.Backend( - Conf, ThinLTO.CombinedIndex, ModuleToDefinedGVSummaries, AddOutput); + std::unique_ptr BackendProc = + ThinLTO.Backend(Conf, ThinLTO.CombinedIndex, ModuleToDefinedGVSummaries, + AddStream, Cache); // Partition numbers for ThinLTO jobs start at 1 (see comments for // GlobalResolution in LTO.h). Task numbers, however, start at Index: lib/LTO/LTOBackend.cpp =================================================================== --- lib/LTO/LTOBackend.cpp +++ lib/LTO/LTOBackend.cpp @@ -199,34 +199,20 @@ return !Conf.PostOptModuleHook || Conf.PostOptModuleHook(Task, Mod); } -/// Monolithic LTO does not support caching (yet), this is a convenient wrapper -/// around AddOutput to workaround this. -static AddOutputFn getUncachedOutputWrapper(AddOutputFn &AddOutput, - unsigned Task) { - return [Task, &AddOutput](unsigned TaskId) { - auto Output = AddOutput(Task); - if (Output->isCachingEnabled() && Output->tryLoadFromCache("")) - report_fatal_error("Cache hit without a valid key?"); - assert(Task == TaskId && "Unexpexted TaskId mismatch"); - return Output; - }; -} - -void codegen(Config &Conf, TargetMachine *TM, AddOutputFn AddOutput, +void codegen(Config &Conf, TargetMachine *TM, AddStreamFn AddStream, unsigned Task, Module &Mod) { if (Conf.PreCodeGenModuleHook && !Conf.PreCodeGenModuleHook(Task, Mod)) return; - auto Output = AddOutput(Task); - std::unique_ptr OS = Output->getStream(); + auto Stream = AddStream(Task); legacy::PassManager CodeGenPasses; - if (TM->addPassesToEmitFile(CodeGenPasses, *OS, + if (TM->addPassesToEmitFile(CodeGenPasses, *Stream->OS, TargetMachine::CGFT_ObjectFile)) report_fatal_error("Failed to setup codegen"); CodeGenPasses.run(Mod); } -void splitCodeGen(Config &C, TargetMachine *TM, AddOutputFn AddOutput, +void splitCodeGen(Config &C, TargetMachine *TM, AddStreamFn AddStream, unsigned ParallelCodeGenParallelismLevel, std::unique_ptr Mod) { ThreadPool CodegenThreadPool(ParallelCodeGenParallelismLevel); @@ -260,9 +246,7 @@ std::unique_ptr TM = createTargetMachine(C, MPartInCtx->getTargetTriple(), T); - codegen(C, TM.get(), - getUncachedOutputWrapper(AddOutput, ThreadId), ThreadId, - *MPartInCtx); + codegen(C, TM.get(), AddStream, ThreadId, *MPartInCtx); }, // Pass BC using std::move to ensure that it get moved rather than // copied into the thread's context. @@ -299,7 +283,7 @@ updateCompilerUsed(Mod, TM, AsmUndefinedRefs); } -Error lto::backend(Config &C, AddOutputFn AddOutput, +Error lto::backend(Config &C, AddStreamFn AddStream, unsigned ParallelCodeGenParallelismLevel, std::unique_ptr Mod) { Expected TOrErr = initAndLookupTarget(C, *Mod); @@ -316,15 +300,15 @@ return Error(); if (ParallelCodeGenParallelismLevel == 1) { - codegen(C, TM.get(), getUncachedOutputWrapper(AddOutput, 0), 0, *Mod); + codegen(C, TM.get(), AddStream, 0, *Mod); } else { - splitCodeGen(C, TM.get(), AddOutput, ParallelCodeGenParallelismLevel, + splitCodeGen(C, TM.get(), AddStream, ParallelCodeGenParallelismLevel, std::move(Mod)); } return Error(); } -Error lto::thinBackend(Config &Conf, unsigned Task, AddOutputFn AddOutput, +Error lto::thinBackend(Config &Conf, unsigned Task, AddStreamFn AddStream, Module &Mod, ModuleSummaryIndex &CombinedIndex, const FunctionImporter::ImportMapTy &ImportList, const GVSummaryMapTy &DefinedGlobals, @@ -339,7 +323,7 @@ handleAsmUndefinedRefs(Mod, *TM); if (Conf.CodeGenOnly) { - codegen(Conf, TM.get(), AddOutput, Task, Mod); + codegen(Conf, TM.get(), AddStream, Task, Mod); return Error(); } @@ -379,6 +363,6 @@ if (!opt(Conf, TM.get(), Task, Mod, /*IsThinLto=*/true)) return Error(); - codegen(Conf, TM.get(), AddOutput, Task, Mod); + codegen(Conf, TM.get(), AddStream, Task, Mod); return Error(); } Index: tools/gold/gold-plugin.cpp =================================================================== --- tools/gold/gold-plugin.cpp +++ tools/gold/gold-plugin.cpp @@ -669,24 +669,6 @@ NewPrefix = Split.second.str(); } -namespace { -// Define the LTOOutput handling -class LTOOutput : public lto::NativeObjectOutput { - StringRef Path; - -public: - LTOOutput(StringRef Path) : Path(Path) {} - // Open the filename \p Path and allocate a stream. - std::unique_ptr getStream() override { - int FD; - std::error_code EC = sys::fs::openFileForWrite(Path, FD, sys::fs::F_None); - if (EC) - message(LDPL_FATAL, "Could not open file: %s", EC.message().c_str()); - return llvm::make_unique(FD, true); - } -}; -} - static std::unique_ptr createLTO() { Config Conf; ThinBackend Backend; @@ -830,21 +812,27 @@ std::vector IsTemporary(MaxTasks); std::vector> Filenames(MaxTasks); - auto AddOutput = - [&](size_t Task) -> std::unique_ptr { - auto &OutputName = Filenames[Task]; - getOutputFileName(Filename, /*TempOutFile=*/!SaveTemps, OutputName, + auto AddStream = + [&](size_t Task) -> std::unique_ptr { + IsTemporary[Task] = !SaveTemps; + getOutputFileName(Filename, /*TempOutFile=*/!SaveTemps, Filenames[Task], MaxTasks > 1 ? Task : -1); - IsTemporary[Task] = !SaveTemps && options::cache_dir.empty(); - if (options::cache_dir.empty()) - return llvm::make_unique(OutputName); - - return llvm::make_unique( - options::cache_dir, - [&OutputName](std::string EntryPath) { OutputName = EntryPath; }); + int FD; + std::error_code EC = + sys::fs::openFileForWrite(Filenames[Task], FD, sys::fs::F_None); + if (EC) + message(LDPL_FATAL, "Could not open file: %s", EC.message().c_str()); + return llvm::make_unique( + llvm::make_unique(FD, true)); }; - check(Lto->run(AddOutput)); + auto AddFile = [&](size_t Task, StringRef Path) { Filenames[Task] = Path; }; + + NativeObjectCache Cache; + if (!options::cache_dir.empty()) + Cache = localCache(options::cache_dir, AddFile); + + check(Lto->run(AddStream, Cache)); if (options::TheOutputType == options::OT_DISABLE || options::TheOutputType == options::OT_BC_ONLY) Index: tools/llvm-lto2/llvm-lto2.cpp =================================================================== --- tools/llvm-lto2/llvm-lto2.cpp +++ tools/llvm-lto2/llvm-lto2.cpp @@ -95,22 +95,6 @@ return T(); } -namespace { -// Define the LTOOutput handling -class LTOOutput : public lto::NativeObjectOutput { - std::string Path; - -public: - LTOOutput(std::string Path) : Path(std::move(Path)) {} - std::unique_ptr getStream() override { - std::error_code EC; - auto S = llvm::make_unique(Path, EC, sys::fs::F_None); - check(EC, Path); - return std::move(S); - } -}; -} - int main(int argc, char **argv) { InitializeAllTargets(); InitializeAllTargetMCs(); @@ -203,23 +187,28 @@ if (HasErrors) return 1; - auto AddOutput = - [&](size_t Task) -> std::unique_ptr { + auto AddStream = + [&](size_t Task) -> std::unique_ptr { std::string Path = OutputFilename + "." + utostr(Task); - if (CacheDir.empty()) - return llvm::make_unique(std::move(Path)); - - return llvm::make_unique( - CacheDir, [Path](std::string EntryPath) { - // Load the entry from the cache now. - auto ReloadedBufferOrErr = MemoryBuffer::getFile(EntryPath); - if (auto EC = ReloadedBufferOrErr.getError()) - report_fatal_error(Twine("Can't reload cached file '") + EntryPath + - "': " + EC.message() + "\n"); - - *LTOOutput(Path).getStream() << (*ReloadedBufferOrErr)->getBuffer(); - }); + + std::error_code EC; + auto S = llvm::make_unique(Path, EC, sys::fs::F_None); + check(EC, Path); + return llvm::make_unique(std::move(S)); }; - check(Lto.run(AddOutput), "LTO::run failed"); + auto AddFile = [&](size_t Task, StringRef Path) { + auto ReloadedBufferOrErr = MemoryBuffer::getFile(Path); + if (auto EC = ReloadedBufferOrErr.getError()) + report_fatal_error(Twine("Can't reload cached file '") + Path + "': " + + EC.message() + "\n"); + + *AddStream(Task)->OS << (*ReloadedBufferOrErr)->getBuffer(); + }; + + NativeObjectCache Cache; + if (!CacheDir.empty()) + Cache = localCache(CacheDir, AddFile); + + check(Lto.run(AddStream, Cache), "LTO::run failed"); }