diff --git a/lldb/bindings/interface/SBTrace.i b/lldb/bindings/interface/SBTrace.i --- a/lldb/bindings/interface/SBTrace.i +++ b/lldb/bindings/interface/SBTrace.i @@ -17,6 +17,8 @@ const char *GetStartConfigurationHelp(); + SBFileSpec SaveToDisk(SBError &error, const SBFileSpec &bundle_dir, bool compact = false); + SBError Start(const SBStructuredData &configuration); SBError Start(const SBThread &thread, const SBStructuredData &configuration); diff --git a/lldb/include/lldb/API/SBTrace.h b/lldb/include/lldb/API/SBTrace.h --- a/lldb/include/lldb/API/SBTrace.h +++ b/lldb/include/lldb/API/SBTrace.h @@ -25,6 +25,28 @@ static SBTrace LoadTraceFromFile(SBError &error, SBDebugger &debugger, const SBFileSpec &trace_description_file); + /// Save the trace to the specified directory, which will be created if + /// needed. This will also create a a file \a /trace.json with the + /// main properties of the trace session, along with others files which + /// contain the actual trace data. The trace.json file can be used later as + /// input for the "trace load" command to load the trace in LLDB, or for the + /// method \a SBDebugger.LoadTraceFromFile(). + /// + /// \param[out] error + /// This will be set with an error in case of failures. + /// + /// \param[in] directory + /// The directory where the trace files will be saved. + /// + /// \param[in] compact + /// Try not to save to disk information irrelevant to the traced processes. + /// Each trace plug-in implements this in a different fashion. + /// + /// \return + /// A \a SBFileSpec pointing to the bundle description file. + SBFileSpec SaveToDisk(SBError &error, const SBFileSpec &bundle_dir, + bool compact = false); + /// \return /// A description of the parameters to use for the \a SBTrace::Start /// method, or \b null if the object is invalid. diff --git a/lldb/include/lldb/Target/Trace.h b/lldb/include/lldb/Target/Trace.h --- a/lldb/include/lldb/Target/Trace.h +++ b/lldb/include/lldb/Target/Trace.h @@ -56,21 +56,24 @@ /// A stream object to dump the information to. virtual void Dump(Stream *s) const = 0; - /// Save the trace of a live process to the specified directory, which - /// will be created if needed. - /// This will also create a a file \a /trace.json with the main - /// properties of the trace session, along with others files which contain - /// the actual trace data. The trace.json file can be used later as input - /// for the "trace load" command to load the trace in LLDB. - /// The process being trace is not a live process, return an error. + /// Save the trace to the specified directory, which will be created if + /// needed. This will also create a a file \a /trace.json with the + /// main properties of the trace session, along with others files which + /// contain the actual trace data. The trace.json file can be used later as + /// input for the "trace load" command to load the trace in LLDB. /// /// \param[in] directory /// The directory where the trace files will be saved. /// + /// \param[in] compact + /// Try not to save to disk information irrelevant to the traced processes. + /// Each trace plug-in implements this in a different fashion. + /// /// \return - /// \a llvm::success if the operation was successful, or an \a llvm::Error - /// otherwise. - virtual llvm::Error SaveLiveTraceToDisk(FileSpec directory) = 0; + /// A \a FileSpec pointing to the bundle description file, or an \a + /// llvm::Error otherwise. + virtual llvm::Expected SaveToDisk(FileSpec directory, + bool compact) = 0; /// Find a trace plug-in using JSON data. /// diff --git a/lldb/packages/Python/lldbsuite/test/tools/intelpt/intelpt_testcase.py b/lldb/packages/Python/lldbsuite/test/tools/intelpt/intelpt_testcase.py --- a/lldb/packages/Python/lldbsuite/test/tools/intelpt/intelpt_testcase.py +++ b/lldb/packages/Python/lldbsuite/test/tools/intelpt/intelpt_testcase.py @@ -134,12 +134,24 @@ command += " " + str(thread.GetIndexID()) self.expect(command, error=error, substrs=substrs) - def traceLoad(self, traceDescriptionFilePath="trace.json", error=False, substrs=None): + def traceLoad(self, traceDescriptionFilePath, error=False, substrs=None): if self.USE_SB_API: traceDescriptionFile = lldb.SBFileSpec(traceDescriptionFilePath, True) loadTraceError = lldb.SBError() - _trace = self.dbg.LoadTraceFromFile(loadTraceError, traceDescriptionFile) + self.dbg.LoadTraceFromFile(loadTraceError, traceDescriptionFile) self.assertSBError(loadTraceError, error) else: command = f"trace load -v {traceDescriptionFilePath}" self.expect(command, error=error, substrs=substrs) + + def traceSave(self, traceBundleDir, compact=False, error=False, substrs=None): + if self.USE_SB_API: + save_error = lldb.SBError() + self.target().GetTrace().SaveToDisk( + save_error, lldb.SBFileSpec(traceBundleDir), compact) + self.assertSBError(save_error, error) + else: + command = f"process trace save {traceBundleDir}" + if compact: + command += " -c" + self.expect(command, error=error, substrs=substrs) diff --git a/lldb/source/API/SBTrace.cpp b/lldb/source/API/SBTrace.cpp --- a/lldb/source/API/SBTrace.cpp +++ b/lldb/source/API/SBTrace.cpp @@ -43,6 +43,24 @@ return SBTrace(trace_or_err.get()); } +SBFileSpec SBTrace::SaveToDisk(SBError &error, const SBFileSpec &bundle_dir, + bool compact) { + LLDB_INSTRUMENT_VA(this, error, bundle_dir, compact); + + error.Clear(); + SBFileSpec file_spec; + + if (!m_opaque_sp) + error.SetErrorString("error: invalid trace"); + else if (Expected desc_file = + m_opaque_sp->SaveToDisk(bundle_dir.ref(), compact)) + file_spec.SetFileSpec(*desc_file); + else + error.SetErrorString(llvm::toString(desc_file.takeError()).c_str()); + + return file_spec; +} + const char *SBTrace::GetStartConfigurationHelp() { LLDB_INSTRUMENT_VA(this); return m_opaque_sp ? m_opaque_sp->GetStartConfigurationHelp() : nullptr; diff --git a/lldb/source/Commands/CommandObjectProcess.cpp b/lldb/source/Commands/CommandObjectProcess.cpp --- a/lldb/source/Commands/CommandObjectProcess.cpp +++ b/lldb/source/Commands/CommandObjectProcess.cpp @@ -579,14 +579,14 @@ } } } - + Target *target = m_exe_ctx.GetTargetPtr(); BreakpointIDList run_to_bkpt_ids; // Don't pass an empty run_to_breakpoint list, as Verify will look for the // default breakpoint. if (m_options.m_run_to_bkpt_args.GetArgumentCount() > 0) CommandObjectMultiwordBreakpoint::VerifyBreakpointOrLocationIDs( - m_options.m_run_to_bkpt_args, target, result, &run_to_bkpt_ids, + m_options.m_run_to_bkpt_args, target, result, &run_to_bkpt_ids, BreakpointName::Permissions::disablePerm); if (!result.Succeeded()) { return false; @@ -604,7 +604,7 @@ std::vector bkpts_disabled; std::vector locs_disabled; if (num_run_to_bkpt_ids != 0) { - // Go through the ID's specified, and separate the breakpoints from are + // Go through the ID's specified, and separate the breakpoints from are // the breakpoint.location specifications since the latter require // special handling. We also figure out whether there's at least one // specifier in the set that is enabled. @@ -613,23 +613,22 @@ std::unordered_set bkpts_with_locs_seen; BreakpointIDList with_locs; bool any_enabled = false; - + for (size_t idx = 0; idx < num_run_to_bkpt_ids; idx++) { BreakpointID bkpt_id = run_to_bkpt_ids.GetBreakpointIDAtIndex(idx); break_id_t bp_id = bkpt_id.GetBreakpointID(); break_id_t loc_id = bkpt_id.GetLocationID(); - BreakpointSP bp_sp - = bkpt_list.FindBreakpointByID(bp_id); - // Note, VerifyBreakpointOrLocationIDs checks for existence, so we + BreakpointSP bp_sp = bkpt_list.FindBreakpointByID(bp_id); + // Note, VerifyBreakpointOrLocationIDs checks for existence, so we // don't need to do it again here. if (bp_sp->IsEnabled()) { if (loc_id == LLDB_INVALID_BREAK_ID) { - // A breakpoint (without location) was specified. Make sure that + // A breakpoint (without location) was specified. Make sure that // at least one of the locations is enabled. size_t num_locations = bp_sp->GetNumLocations(); for (size_t loc_idx = 0; loc_idx < num_locations; loc_idx++) { - BreakpointLocationSP loc_sp - = bp_sp->GetLocationAtIndex(loc_idx); + BreakpointLocationSP loc_sp = + bp_sp->GetLocationAtIndex(loc_idx); if (loc_sp->IsEnabled()) { any_enabled = true; break; @@ -641,7 +640,7 @@ if (loc_sp->IsEnabled()) any_enabled = true; } - + // Then sort the bp & bp.loc entries for later use: if (bkpt_id.GetLocationID() == LLDB_INVALID_BREAK_ID) bkpts_seen.insert(bkpt_id.GetBreakpointID()); @@ -653,14 +652,14 @@ } // Do all the error checking here so once we start disabling we don't // have to back out half-way through. - + // Make sure at least one of the specified breakpoints is enabled. if (!any_enabled) { result.AppendError("at least one of the continue-to breakpoints must " "be enabled."); return false; } - + // Also, if you specify BOTH a breakpoint and one of it's locations, // we flag that as an error, since it won't do what you expect, the // breakpoint directive will mean "run to all locations", which is not @@ -671,7 +670,7 @@ "one of its locations: {0}", bp_id); } } - + // Now go through the breakpoints in the target, disabling all the ones // that the user didn't mention: for (BreakpointSP bp_sp : bkpt_list.Breakpoints()) { @@ -695,8 +694,8 @@ BreakpointLocationSP loc_sp = bp_sp->GetLocationAtIndex(loc_idx); tmp_id.SetBreakpointLocationID(loc_idx); size_t position = 0; - if (!with_locs.FindBreakpointID(tmp_id, &position) - && loc_sp->IsEnabled()) { + if (!with_locs.FindBreakpointID(tmp_id, &position) && + loc_sp->IsEnabled()) { locs_disabled.push_back(tmp_id); loc_sp->SetEnabled(false); } @@ -723,20 +722,20 @@ Status error; // For now we can only do -b with synchronous: bool old_sync = GetDebugger().GetAsyncExecution(); - + if (run_to_bkpt_ids.GetSize() != 0) { GetDebugger().SetAsyncExecution(false); synchronous_execution = true; - } + } if (synchronous_execution) error = process->ResumeSynchronous(&stream); else error = process->Resume(); - + if (run_to_bkpt_ids.GetSize() != 0) { GetDebugger().SetAsyncExecution(old_sync); - } - + } + // Now re-enable the breakpoints we disabled: BreakpointList &bkpt_list = target->GetBreakpointList(); for (break_id_t bp_id : bkpts_disabled) { @@ -745,11 +744,11 @@ bp_sp->SetEnabled(true); } for (const BreakpointID &bkpt_id : locs_disabled) { - BreakpointSP bp_sp - = bkpt_list.FindBreakpointByID(bkpt_id.GetBreakpointID()); + BreakpointSP bp_sp = + bkpt_list.FindBreakpointByID(bkpt_id.GetBreakpointID()); if (bp_sp) { - BreakpointLocationSP loc_sp - = bp_sp->FindLocationByID(bkpt_id.GetLocationID()); + BreakpointLocationSP loc_sp = + bp_sp->FindLocationByID(bkpt_id.GetLocationID()); if (loc_sp) loc_sp->SetEnabled(true); } @@ -1731,7 +1730,7 @@ bool DoExecute(Args &signal_args, CommandReturnObject &result) override { Target &target = GetSelectedOrDummyTarget(); - // Any signals that are being set should be added to the Target's + // Any signals that are being set should be added to the Target's // DummySignals so they will get applied on rerun, etc. // If we have a process, however, we can do a more accurate job of vetting // the user's options. @@ -1761,9 +1760,9 @@ "true or false.\n"); return false; } - - bool no_actions = (stop_action == -1 && pass_action == -1 - && notify_action == -1); + + bool no_actions = + (stop_action == -1 && pass_action == -1 && notify_action == -1); if (m_options.only_target_values && !no_actions) { result.AppendError("-t is for reporting, not setting, target values."); return false; @@ -1832,9 +1831,9 @@ } auto set_lazy_bool = [] (int action) -> LazyBool { LazyBool lazy; - if (action == -1) + if (action == -1) lazy = eLazyBoolCalculate; - else if (action) + else if (action) lazy = eLazyBoolYes; else lazy = eLazyBoolNo; @@ -1876,8 +1875,7 @@ PrintSignalInformation(result.GetOutputStream(), signal_args, num_signals_set, signals_sp); else - target.PrintDummySignals(result.GetOutputStream(), - signal_args); + target.PrintDummySignals(result.GetOutputStream(), signal_args); if (num_signals_set > 0) result.SetStatus(eReturnStatusSuccessFinishResult); @@ -1927,10 +1925,8 @@ const int short_option = m_getopt_table[option_idx].val; switch (short_option) { - - case 'd': { - m_directory.SetFile(option_arg, FileSpec::Style::native); - FileSystem::Instance().Resolve(m_directory); + case 'c': { + m_compact = true; break; } default: @@ -1939,16 +1935,19 @@ return error; } - void OptionParsingStarting(ExecutionContext *execution_context) override{}; + void OptionParsingStarting(ExecutionContext *execution_context) override { + m_compact = false; + }; llvm::ArrayRef GetDefinitions() override { return llvm::makeArrayRef(g_process_trace_save_options); }; - FileSpec m_directory; + bool m_compact; }; Options *GetOptions() override { return &m_options; } + CommandObjectProcessTraceSave(CommandInterpreter &interpreter) : CommandObjectParsed( interpreter, "process trace save", @@ -1959,23 +1958,47 @@ "contain the actual trace data. The trace.json file can be used " "later as input for the \"trace load\" command to load the trace " "in LLDB", - "process trace save []", + "process trace save [] ", eCommandRequiresProcess | eCommandTryTargetAPILock | eCommandProcessMustBeLaunched | eCommandProcessMustBePaused | - eCommandProcessMustBeTraced) {} + eCommandProcessMustBeTraced) { + CommandArgumentData bundle_dir{eArgTypeDirectoryName, eArgRepeatPlain}; + m_arguments.push_back({bundle_dir}); + } + + void + HandleArgumentCompletion(CompletionRequest &request, + OptionElementVector &opt_element_vector) override { + CommandCompletions::InvokeCommonCompletionCallbacks( + GetCommandInterpreter(), CommandCompletions::eDiskFileCompletion, + request, nullptr); + } ~CommandObjectProcessTraceSave() override = default; protected: bool DoExecute(Args &command, CommandReturnObject &result) override { + if (command.size() != 1) { + result.AppendError("a single path to a directory where the trace bundle " + "will be created is required"); + return false; + } + + FileSpec bundle_dir(command[0].ref()); + FileSystem::Instance().Resolve(bundle_dir); + ProcessSP process_sp = m_exe_ctx.GetProcessSP(); TraceSP trace_sp = process_sp->GetTarget().GetTrace(); - if (llvm::Error err = trace_sp->SaveLiveTraceToDisk(m_options.m_directory)) - result.AppendError(toString(std::move(err))); - else + if (llvm::Expected desc_file = + trace_sp->SaveToDisk(bundle_dir, m_options.m_compact)) { + result.AppendMessageWithFormatv( + "Trace bundle description file written to: {0}", *desc_file); result.SetStatus(eReturnStatusSuccessFinishResult); + } else { + result.AppendError(toString(desc_file.takeError())); + } return result.Succeeded(); } diff --git a/lldb/source/Commands/CommandObjectTrace.cpp b/lldb/source/Commands/CommandObjectTrace.cpp --- a/lldb/source/Commands/CommandObjectTrace.cpp +++ b/lldb/source/Commands/CommandObjectTrace.cpp @@ -75,11 +75,19 @@ : CommandObjectParsed( interpreter, "trace load", "Load a post-mortem processor trace session from a trace bundle.", - "trace load") { - CommandArgumentData session_file_arg{eArgTypePath, eArgRepeatPlain}; + "trace load ") { + CommandArgumentData session_file_arg{eArgTypeFilename, eArgRepeatPlain}; m_arguments.push_back({session_file_arg}); } + void + HandleArgumentCompletion(CompletionRequest &request, + OptionElementVector &opt_element_vector) override { + CommandCompletions::InvokeCommonCompletionCallbacks( + GetCommandInterpreter(), CommandCompletions::eDiskFileCompletion, + request, nullptr); + } + ~CommandObjectTraceLoad() override = default; Options *GetOptions() override { return &m_options; } diff --git a/lldb/source/Commands/Options.td b/lldb/source/Commands/Options.td --- a/lldb/source/Commands/Options.td +++ b/lldb/source/Commands/Options.td @@ -784,11 +784,11 @@ } let Command = "process trace save" in { - def process_trace_save_directory: Option<"directory", "d">, + def process_trace_save_compact: Option<"compact", "c">, Group<1>, - Arg<"Value">, Required, - Desc<"The directory where the trace will be saved." - "It will be created if it does not exist.">; + Desc<"Try not to save to disk information irrelevant to the traced " + "processes. Each trace plug-in implements this in a different " + "fashion.">; } let Command = "script import" in { diff --git a/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.h b/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.h --- a/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.h +++ b/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.h @@ -14,6 +14,7 @@ #include "llvm/Support/Error.h" +#include #include namespace lldb_private { @@ -139,6 +140,10 @@ lldb::cpu_id_t cpu_id, const LinuxPerfZeroTscConversion &tsc_conversion); +llvm::Expected> +FilterProcessesFromContextSwitchTrace(llvm::ArrayRef data, + const std::set &pids); + } // namespace trace_intel_pt } // namespace lldb_private diff --git a/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.cpp b/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.cpp --- a/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/PerfContextSwitchDecoder.cpp @@ -16,8 +16,13 @@ /// non-linux platforms. /// \{ #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) -#define PERF_RECORD_MAX 19 + +#define PERF_RECORD_LOST 2 +#define PERF_RECORD_THROTTLE 5 +#define PERF_RECORD_UNTHROTTLE 6 +#define PERF_RECORD_LOST_SAMPLES 13 #define PERF_RECORD_SWITCH_CPU_WIDE 15 +#define PERF_RECORD_MAX 19 struct perf_event_header { uint32_t type; @@ -54,6 +59,11 @@ bool IsContextSwitchRecord() const { return type == PERF_RECORD_SWITCH_CPU_WIDE; } + + bool IsErrorRecord() const { + return type == PERF_RECORD_LOST || type == PERF_RECORD_THROTTLE || + type == PERF_RECORD_UNTHROTTLE || type == PERF_RECORD_LOST_SAMPLES; + } }; /// \} @@ -286,3 +296,36 @@ return executions; } + +Expected> +lldb_private::trace_intel_pt::FilterProcessesFromContextSwitchTrace( + llvm::ArrayRef data, const std::set &pids) { + size_t offset = 0; + std::vector out_data; + + while (offset < data.size()) { + const perf_event_header &perf_record = + *reinterpret_cast(data.data() + offset); + if (Error err = perf_record.SanityCheck()) + return std::move(err); + bool should_copy = false; + if (perf_record.IsContextSwitchRecord()) { + const PerfContextSwitchRecord &context_switch_record = + *reinterpret_cast(data.data() + + offset); + if (pids.count(context_switch_record.pid)) + should_copy = true; + } else if (perf_record.IsErrorRecord()) { + should_copy = true; + } + + if (should_copy) { + for (size_t i = 0; i < perf_record.size; i++) { + out_data.push_back(data[offset + i]); + } + } + + offset += perf_record.size; + } + return out_data; +} diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.h @@ -25,7 +25,8 @@ public: void Dump(Stream *s) const override; - llvm::Error SaveLiveTraceToDisk(FileSpec directory) override; + llvm::Expected SaveToDisk(FileSpec directory, + bool compact) override; ~TraceIntelPT() override = default; diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPT.cpp @@ -55,9 +55,9 @@ void TraceIntelPT::Dump(Stream *s) const {} -llvm::Error TraceIntelPT::SaveLiveTraceToDisk(FileSpec directory) { +Expected TraceIntelPT::SaveToDisk(FileSpec directory, bool compact) { RefreshLiveProcessState(); - return TraceIntelPTBundleSaver().SaveToDisk(*this, directory); + return TraceIntelPTBundleSaver().SaveToDisk(*this, directory, compact); } Expected TraceIntelPT::CreateInstanceForTraceBundle( diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.h b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.h --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.h +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.h @@ -31,10 +31,16 @@ /// \param[in] directory /// The directory where the trace bundle will be created. /// + /// \param[in] compact + /// Filter out information irrelevant to the traced processes in the + /// context switch and intel pt traces when using per-cpu mode. This + /// effectively reduces the size of those traces. + /// /// \return - /// \a llvm::success if the operation was successful, or an \a llvm::Error - /// otherwise. - llvm::Error SaveToDisk(TraceIntelPT &trace_ipt, FileSpec directory); + /// A \a FileSpec pointing to the bundle description file, or an \a + /// llvm::Error otherwise. + llvm::Expected SaveToDisk(TraceIntelPT &trace_ipt, + FileSpec directory, bool compact); }; } // namespace trace_intel_pt diff --git a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.cpp b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.cpp --- a/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.cpp +++ b/lldb/source/Plugins/Trace/intel-pt/TraceIntelPTBundleSaver.cpp @@ -7,8 +7,11 @@ //===----------------------------------------------------------------------===// #include "TraceIntelPTBundleSaver.h" + +#include "PerfContextSwitchDecoder.h" #include "TraceIntelPT.h" #include "TraceIntelPTJSONStructs.h" + #include "lldb/Core/Module.h" #include "lldb/Core/ModuleList.h" #include "lldb/Target/Process.h" @@ -30,6 +33,13 @@ using namespace lldb_private::trace_intel_pt; using namespace llvm; +/// Strip the \p directory component from the given \p path. It assumes that \p +/// directory is a prefix of \p path. +static std::string GetRelativePath(const FileSpec &directory, + const FileSpec &path) { + return path.GetPath().substr(directory.GetPath().size() + 1); +} + /// Write a stream of bytes from \p data to the given output file. /// It creates or overwrites the output file, but not append. static llvm::Error WriteBytesToDisk(FileSpec &output_file, @@ -57,11 +67,11 @@ /// The directory where the JSON file will be saved. /// /// \return -/// \a llvm::Success if the operation was successful, or an \a llvm::Error -/// otherwise. -static llvm::Error +/// A \a FileSpec pointing to the bundle description file, or an \a +/// llvm::Error otherwise. +static Expected SaveTraceBundleDescription(const llvm::json::Value &trace_bundle_description, - const FileSpec &directory) { + const FileSpec &directory) { FileSpec trace_path = directory; trace_path.AppendPathComponent("trace.json"); std::ofstream os(trace_path.GetPath()); @@ -71,7 +81,7 @@ return createStringError(inconvertibleErrorCode(), formatv("couldn't write to the file {0}", trace_path.GetPath().c_str())); - return Error::success(); + return trace_path; } /// Build the threads sub-section of the trace bundle description file. @@ -106,7 +116,7 @@ if (trace_sp->GetTracedCpus().empty()) { FileSpec output_file = threads_dir; output_file.AppendPathComponent(std::to_string(tid) + ".intelpt_trace"); - json_thread.ipt_trace = output_file.GetPath(); + json_thread.ipt_trace = GetRelativePath(directory, output_file); llvm::Error err = process.GetTarget().GetTrace()->OnThreadBinaryDataRead( tid, IntelPTDataKinds::kIptTrace, @@ -122,8 +132,68 @@ return json_threads; } +/// \return +/// an \a llvm::Error in case of failures, \a None if the trace is not written +/// to disk because the trace is empty and the \p compact flag is present, or +/// the FileSpec of the trace file on disk. +static Expected> +WriteContextSwitchTrace(TraceIntelPT &trace_ipt, lldb::cpu_id_t cpu_id, + const FileSpec &cpus_dir, bool compact) { + FileSpec output_context_switch_trace = cpus_dir; + output_context_switch_trace.AppendPathComponent(std::to_string(cpu_id) + + ".perf_context_switch_trace"); + + bool should_skip = false; + + Error err = trace_ipt.OnCpuBinaryDataRead( + cpu_id, IntelPTDataKinds::kPerfContextSwitchTrace, + [&](llvm::ArrayRef data) -> llvm::Error { + if (!compact) + return WriteBytesToDisk(output_context_switch_trace, data); + + std::set pids; + for (Process *process : trace_ipt.GetAllProcesses()) + pids.insert(process->GetID()); + + Expected> compact_context_switch_trace = + FilterProcessesFromContextSwitchTrace(data, pids); + if (!compact_context_switch_trace) + return compact_context_switch_trace.takeError(); + + if (compact_context_switch_trace->empty()) { + should_skip = true; + return Error::success(); + } + + return WriteBytesToDisk(output_context_switch_trace, + *compact_context_switch_trace); + }); + if (err) + return std::move(err); + + if (should_skip) + return None; + return output_context_switch_trace; +} + +static Expected WriteIntelPTTrace(TraceIntelPT &trace_ipt, + lldb::cpu_id_t cpu_id, + const FileSpec &cpus_dir) { + FileSpec output_trace = cpus_dir; + output_trace.AppendPathComponent(std::to_string(cpu_id) + ".intelpt_trace"); + + Error err = trace_ipt.OnCpuBinaryDataRead( + cpu_id, IntelPTDataKinds::kIptTrace, + [&](llvm::ArrayRef data) -> llvm::Error { + return WriteBytesToDisk(output_trace, data); + }); + if (err) + return std::move(err); + return output_trace; +} + static llvm::Expected>> -BuildCpusSection(TraceIntelPT &trace_ipt, FileSpec directory) { +BuildCpusSection(TraceIntelPT &trace_ipt, FileSpec directory, bool compact) { if (trace_ipt.GetTracedCpus().empty()) return None; @@ -135,36 +205,21 @@ for (lldb::cpu_id_t cpu_id : trace_ipt.GetTracedCpus()) { JSONCpu json_cpu; json_cpu.id = cpu_id; + Expected> context_switch_trace_path = + WriteContextSwitchTrace(trace_ipt, cpu_id, cpus_dir, compact); + if (!context_switch_trace_path) + return context_switch_trace_path.takeError(); + if (!*context_switch_trace_path) + continue; + json_cpu.context_switch_trace = + GetRelativePath(directory, **context_switch_trace_path); - { - FileSpec output_trace = cpus_dir; - output_trace.AppendPathComponent(std::to_string(cpu_id) + - ".intelpt_trace"); - json_cpu.ipt_trace = output_trace.GetPath(); - - llvm::Error err = trace_ipt.OnCpuBinaryDataRead( - cpu_id, IntelPTDataKinds::kIptTrace, - [&](llvm::ArrayRef data) -> llvm::Error { - return WriteBytesToDisk(output_trace, data); - }); - if (err) - return std::move(err); - } - - { - FileSpec output_context_switch_trace = cpus_dir; - output_context_switch_trace.AppendPathComponent( - std::to_string(cpu_id) + ".perf_context_switch_trace"); - json_cpu.context_switch_trace = output_context_switch_trace.GetPath(); + if (Expected ipt_trace_path = + WriteIntelPTTrace(trace_ipt, cpu_id, cpus_dir)) + json_cpu.ipt_trace = GetRelativePath(directory, *ipt_trace_path); + else + return ipt_trace_path.takeError(); - llvm::Error err = trace_ipt.OnCpuBinaryDataRead( - cpu_id, IntelPTDataKinds::kPerfContextSwitchTrace, - [&](llvm::ArrayRef data) -> llvm::Error { - return WriteBytesToDisk(output_context_switch_trace, data); - }); - if (err) - return std::move(err); - } json_cpus.push_back(std::move(json_cpu)); } return json_cpus; @@ -222,14 +277,14 @@ path_to_copy_module.AppendPathComponent(system_path); sys::fs::create_directories(path_to_copy_module.GetDirectory().AsCString()); - if (std::error_code ec = llvm::sys::fs::copy_file( - system_path, path_to_copy_module.GetPath())) + if (std::error_code ec = + llvm::sys::fs::copy_file(file, path_to_copy_module.GetPath())) return createStringError( inconvertibleErrorCode(), formatv("couldn't write to the file. {0}", ec.message())); json_modules.push_back( - JSONModule{system_path, path_to_copy_module.GetPath(), + JSONModule{system_path, GetRelativePath(directory, path_to_copy_module), JSONUINT64{load_addr}, module_sp->GetUUID().GetAsString()}); } return json_modules; @@ -280,8 +335,9 @@ return processes; } -Error TraceIntelPTBundleSaver::SaveToDisk(TraceIntelPT &trace_ipt, - FileSpec directory) { +Expected TraceIntelPTBundleSaver::SaveToDisk(TraceIntelPT &trace_ipt, + FileSpec directory, + bool compact) { if (std::error_code ec = sys::fs::create_directories(directory.GetPath().c_str())) return llvm::errorCodeToError(ec); @@ -299,7 +355,7 @@ return json_processes.takeError(); Expected>> json_cpus = - BuildCpusSection(trace_ipt, directory); + BuildCpusSection(trace_ipt, directory, compact); if (!json_cpus) return json_cpus.takeError(); diff --git a/lldb/test/API/commands/trace/TestTraceLoad.py b/lldb/test/API/commands/trace/TestTraceLoad.py --- a/lldb/test/API/commands/trace/TestTraceLoad.py +++ b/lldb/test/API/commands/trace/TestTraceLoad.py @@ -20,6 +20,39 @@ substrs=["67911: [tsc=40450075477799536] 0x0000000000400bd7 addl $0x1, -0x4(%rbp)", "m.out`bar() + 26 at multi_thread.cpp:20:6"]) + @testSBAPIAndCommands + def testLoadCompactMultiCoreTrace(self): + src_dir = self.getSourceDir() + trace_description_file_path = os.path.join(src_dir, "intelpt-multi-core-trace", "trace.json") + self.traceLoad(traceDescriptionFilePath=trace_description_file_path, substrs=["intel-pt"]) + + self.expect("thread trace dump info 2", substrs=["Total number of continuous executions found: 153"]) + + # we'll save the trace in compact format + compact_trace_bundle_dir = os.path.join(self.getBuildDir(), "intelpt-multi-core-trace-compact") + self.traceSave(compact_trace_bundle_dir, compact=True) + + # we'll delete the previous target and make sure it's trace object is deleted + self.dbg.DeleteTarget(self.dbg.GetTargetAtIndex(0)) + self.expect("thread trace dump instructions 2 -t", substrs=["error: invalid target"], error=True) + + # we'll load the compact trace and make sure it works + self.traceLoad(os.path.join(compact_trace_bundle_dir, "trace.json"), substrs=["intel-pt"]) + self.expect("thread trace dump instructions 2 -t", + substrs=["19522: [tsc=40450075478109270] (error) expected tracing enabled event", + "m.out`foo() + 65 at multi_thread.cpp:12:21", + "19520: [tsc=40450075477657246] 0x0000000000400ba7 jg 0x400bb3"]) + self.expect("thread trace dump instructions 3 -t", + substrs=["67911: [tsc=40450075477799536] 0x0000000000400bd7 addl $0x1, -0x4(%rbp)", + "m.out`bar() + 26 at multi_thread.cpp:20:6"]) + + # This reduced the number of continuous executions to look at + self.expect("thread trace dump info 2", substrs=["Total number of continuous executions found: 3"]) + + # We clean up for the next run of this test + self.dbg.DeleteTarget(self.dbg.GetTargetAtIndex(0)) + + @testSBAPIAndCommands def testLoadMultiCoreTraceWithStringNumbers(self): src_dir = self.getSourceDir() diff --git a/lldb/test/API/commands/trace/TestTraceSave.py b/lldb/test/API/commands/trace/TestTraceSave.py --- a/lldb/test/API/commands/trace/TestTraceSave.py +++ b/lldb/test/API/commands/trace/TestTraceSave.py @@ -43,12 +43,12 @@ self.expect("n") # Check the output when saving without providing the directory argument - self.expect("process trace save -d", - substrs=["error: last option requires an argument"], + self.expect("process trace save ", + substrs=["error: a single path to a directory where the trace bundle will be created is required"], error=True) # Check the output when saving to an invalid directory - self.expect("process trace save -d /", + self.expect("process trace save /", substrs=["error: couldn't write to the file"], error=True) @@ -58,7 +58,7 @@ substrs=["intel-pt"]) # Check the output when not doing live tracing - self.expect("process trace save -d " + + self.expect("process trace save " + os.path.join(self.getBuildDir(), "intelpt-trace", "trace_not_live_dir")) def testSaveMultiCpuTrace(self): @@ -76,7 +76,7 @@ self.expect("b 7") output_dir = os.path.join(self.getBuildDir(), "intelpt-trace", "trace_save") - self.expect("process trace save -d " + output_dir) + self.expect("process trace save " + output_dir) def checkSessionBundle(session_file_path): with open(session_file_path) as session_file: @@ -107,7 +107,7 @@ output_dir = os.path.join(self.getBuildDir(), "intelpt-trace", "trace_save") self.expect("trace load " + os.path.join(output_dir, "trace.json")) output_copy_dir = os.path.join(self.getBuildDir(), "intelpt-trace", "copy_trace_save") - self.expect("process trace save -d " + output_copy_dir) + self.expect("process trace save " + output_copy_dir) # We now check that the new bundle is correct on its own copied_trace_session_file = os.path.join(output_copy_dir, "trace.json") @@ -153,7 +153,7 @@ last_ten_instructions = res.GetOutput() # Now, save the trace to - self.expect("process trace save -d " + + self.expect("process trace save " + os.path.join(self.getBuildDir(), "intelpt-trace", "trace_copy_dir")) # Load the trace just saved