diff --git a/llvm/docs/CommandGuide/llvm-mca.rst b/llvm/docs/CommandGuide/llvm-mca.rst --- a/llvm/docs/CommandGuide/llvm-mca.rst +++ b/llvm/docs/CommandGuide/llvm-mca.rst @@ -128,7 +128,7 @@ Specify the size of the load queue in the load/store unit emulated by the tool. By default, the tool assumes an unbound number of entries in the load queue. A value of zero for this flag is ignored, and the default load queue size is - used instead. + used instead. .. option:: -squeue= @@ -203,16 +203,18 @@ .. option:: -bottleneck-analysis Print information about bottlenecks that affect the throughput. This analysis - can be expensive, and it is disabled by default. Bottlenecks are highlighted + can be expensive, and it is disabled by default. Bottlenecks are highlighted in the summary view. Bottleneck analysis is currently not supported for processors with an in-order backend. .. option:: -json - Print the requested views in JSON format. The instructions and the processor - resources are printed as members of special top level JSON objects. The - individual views refer to them by index. - + Print the requested views in valid JSON format. The instructions and the + processor resources are printed as members of special top level JSON objects. + The individual views refer to them by index. However, not all views are + supported. Unsupported views are not printed, such as bottleneck analysis, + etc. + .. option:: -disable-cb Force usage of the generic CustomBehaviour class rather than using the target @@ -987,7 +989,7 @@ Custom Behaviour """""""""""""""""""""""""""""""""""" Due to certain instructions not being expressed perfectly within their -scheduling model, :program:`llvm-ma` isn't always able to simulate them +scheduling model, :program:`llvm-mca` isn't always able to simulate them perfectly. Modifying the scheduling model isn't always a viable option though (maybe because the instruction is modeled incorrectly on purpose or the instruction's behaviour is quite complex). The diff --git a/llvm/test/tools/llvm-mca/JSON/X86/views.s b/llvm/test/tools/llvm-mca/JSON/X86/views.s --- a/llvm/test/tools/llvm-mca/JSON/X86/views.s +++ b/llvm/test/tools/llvm-mca/JSON/X86/views.s @@ -3,6 +3,10 @@ # InstructionInfoView and SummaryView. # RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=haswell --json --timeline-max-iterations=1 --timeline < %s | FileCheck %s +# RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=haswell --json --timeline-max-iterations=1 --timeline -o %t.json < %s +# RUN: cat %t.json \ +# RUN: | %python -c 'import json, sys; json.dump(json.loads(sys.stdin.read()), sys.stdout, sort_keys=True, indent=2)' \ +# RUN: | FileCheck %s add %eax, %eax add %ebx, %ebx @@ -10,29 +14,113 @@ add %edx, %edx # CHECK: { -# CHECK-NEXT: "Instructions": [ -# CHECK-NEXT: "addl\t%eax, %eax", -# CHECK-NEXT: "addl\t%ebx, %ebx", -# CHECK-NEXT: "addl\t%ecx, %ecx", -# CHECK-NEXT: "addl\t%edx, %edx" -# CHECK-NEXT: ], -# CHECK-NEXT: "Resources": { -# CHECK-NEXT: "CPUName": "haswell", -# CHECK-NEXT: "Resources": [ -# CHECK-NEXT: "HWDivider", -# CHECK-NEXT: "HWFPDivider", -# CHECK-NEXT: "HWPort0", -# CHECK-NEXT: "HWPort1", -# CHECK-NEXT: "HWPort2", -# CHECK-NEXT: "HWPort3", -# CHECK-NEXT: "HWPort4", -# CHECK-NEXT: "HWPort5", -# CHECK-NEXT: "HWPort6", -# CHECK-NEXT: "HWPort7" +# CHECK-NEXT: "InstructionInfoView": { +# CHECK-NEXT: "InstructionList": [ +# CHECK-NEXT: { +# CHECK-NEXT: "Instruction": 0, +# CHECK-NEXT: "Latency": 1, +# CHECK-NEXT: "NumMicroOpcodes": 1, +# CHECK-NEXT: "RThroughput": 0.25, +# CHECK-NEXT: "hasUnmodeledSideEffects": false, +# CHECK-NEXT: "mayLoad": false, +# CHECK-NEXT: "mayStore": false +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "Instruction": 1, +# CHECK-NEXT: "Latency": 1, +# CHECK-NEXT: "NumMicroOpcodes": 1, +# CHECK-NEXT: "RThroughput": 0.25, +# CHECK-NEXT: "hasUnmodeledSideEffects": false, +# CHECK-NEXT: "mayLoad": false, +# CHECK-NEXT: "mayStore": false +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "Instruction": 2, +# CHECK-NEXT: "Latency": 1, +# CHECK-NEXT: "NumMicroOpcodes": 1, +# CHECK-NEXT: "RThroughput": 0.25, +# CHECK-NEXT: "hasUnmodeledSideEffects": false, +# CHECK-NEXT: "mayLoad": false, +# CHECK-NEXT: "mayStore": false +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "Instruction": 3, +# CHECK-NEXT: "Latency": 1, +# CHECK-NEXT: "NumMicroOpcodes": 1, +# CHECK-NEXT: "RThroughput": 0.25, +# CHECK-NEXT: "hasUnmodeledSideEffects": false, +# CHECK-NEXT: "mayLoad": false, +# CHECK-NEXT: "mayStore": false +# CHECK-NEXT: } # CHECK-NEXT: ] -# CHECK-NEXT: } -# CHECK-NEXT: } -# CHECK-NEXT: { +# CHECK-NEXT: }, +# CHECK-NEXT: "Instructions and CPU resources": { +# CHECK-NEXT: "Instructions": [ +# CHECK-NEXT: "addl\t%eax, %eax", +# CHECK-NEXT: "addl\t%ebx, %ebx", +# CHECK-NEXT: "addl\t%ecx, %ecx", +# CHECK-NEXT: "addl\t%edx, %edx" +# CHECK-NEXT: ], +# CHECK-NEXT: "Resources": { +# CHECK-NEXT: "CPUName": "haswell", +# CHECK-NEXT: "Resources": [ +# CHECK-NEXT: "HWDivider", +# CHECK-NEXT: "HWFPDivider", +# CHECK-NEXT: "HWPort0", +# CHECK-NEXT: "HWPort1", +# CHECK-NEXT: "HWPort2", +# CHECK-NEXT: "HWPort3", +# CHECK-NEXT: "HWPort4", +# CHECK-NEXT: "HWPort5", +# CHECK-NEXT: "HWPort6", +# CHECK-NEXT: "HWPort7" +# CHECK-NEXT: ] +# CHECK-NEXT: } +# CHECK-NEXT: }, +# CHECK-NEXT: "ResourcePressureView": { +# CHECK-NEXT: "ResourcePressureInfo": [ +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 0, +# CHECK-NEXT: "ResourceIndex": 8, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 1, +# CHECK-NEXT: "ResourceIndex": 7, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 2, +# CHECK-NEXT: "ResourceIndex": 3, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 3, +# CHECK-NEXT: "ResourceIndex": 2, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 4, +# CHECK-NEXT: "ResourceIndex": 2, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 4, +# CHECK-NEXT: "ResourceIndex": 3, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 4, +# CHECK-NEXT: "ResourceIndex": 7, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "InstructionIndex": 4, +# CHECK-NEXT: "ResourceIndex": 8, +# CHECK-NEXT: "ResourceUsage": 1 +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: }, # CHECK-NEXT: "SummaryView": { # CHECK-NEXT: "BlockRThroughput": 1, # CHECK-NEXT: "DispatchWidth": 4, @@ -42,119 +130,37 @@ # CHECK-NEXT: "TotalCycles": 103, # CHECK-NEXT: "TotaluOps": 400, # CHECK-NEXT: "uOpsPerCycle": 3.883495145631068 -# CHECK-NEXT: } -# CHECK-NEXT: } -# CHECK-NEXT: [ -# CHECK-NEXT: { -# CHECK-NEXT: "Instruction": 0, -# CHECK-NEXT: "Latency": 1, -# CHECK-NEXT: "NumMicroOpcodes": 1, -# CHECK-NEXT: "RThroughput": 0.25, -# CHECK-NEXT: "hasUnmodeledSideEffects": false, -# CHECK-NEXT: "mayLoad": false, -# CHECK-NEXT: "mayStore": false -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "Instruction": 1, -# CHECK-NEXT: "Latency": 1, -# CHECK-NEXT: "NumMicroOpcodes": 1, -# CHECK-NEXT: "RThroughput": 0.25, -# CHECK-NEXT: "hasUnmodeledSideEffects": false, -# CHECK-NEXT: "mayLoad": false, -# CHECK-NEXT: "mayStore": false # CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "Instruction": 2, -# CHECK-NEXT: "Latency": 1, -# CHECK-NEXT: "NumMicroOpcodes": 1, -# CHECK-NEXT: "RThroughput": 0.25, -# CHECK-NEXT: "hasUnmodeledSideEffects": false, -# CHECK-NEXT: "mayLoad": false, -# CHECK-NEXT: "mayStore": false -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "Instruction": 3, -# CHECK-NEXT: "Latency": 1, -# CHECK-NEXT: "NumMicroOpcodes": 1, -# CHECK-NEXT: "RThroughput": 0.25, -# CHECK-NEXT: "hasUnmodeledSideEffects": false, -# CHECK-NEXT: "mayLoad": false, -# CHECK-NEXT: "mayStore": false +# CHECK-NEXT: "TimelineView": { +# CHECK-NEXT: "TimelineInfo": [ +# CHECK-NEXT: { +# CHECK-NEXT: "CycleDispatched": 0, +# CHECK-NEXT: "CycleExecuted": 2, +# CHECK-NEXT: "CycleIssued": 1, +# CHECK-NEXT: "CycleReady": 0, +# CHECK-NEXT: "CycleRetired": 3 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "CycleDispatched": 0, +# CHECK-NEXT: "CycleExecuted": 2, +# CHECK-NEXT: "CycleIssued": 1, +# CHECK-NEXT: "CycleReady": 0, +# CHECK-NEXT: "CycleRetired": 3 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "CycleDispatched": 0, +# CHECK-NEXT: "CycleExecuted": 2, +# CHECK-NEXT: "CycleIssued": 1, +# CHECK-NEXT: "CycleReady": 0, +# CHECK-NEXT: "CycleRetired": 3 +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "CycleDispatched": 0, +# CHECK-NEXT: "CycleExecuted": 2, +# CHECK-NEXT: "CycleIssued": 1, +# CHECK-NEXT: "CycleReady": 0, +# CHECK-NEXT: "CycleRetired": 3 +# CHECK-NEXT: } +# CHECK-NEXT: ] # CHECK-NEXT: } -# CHECK-NEXT: ] -# CHECK-NEXT: { -# CHECK-NEXT: "ResourcePressureInfo": [ -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 0, -# CHECK-NEXT: "ResourceIndex": 8, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 1, -# CHECK-NEXT: "ResourceIndex": 7, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 2, -# CHECK-NEXT: "ResourceIndex": 3, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 3, -# CHECK-NEXT: "ResourceIndex": 2, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 4, -# CHECK-NEXT: "ResourceIndex": 2, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 4, -# CHECK-NEXT: "ResourceIndex": 3, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 4, -# CHECK-NEXT: "ResourceIndex": 7, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "InstructionIndex": 4, -# CHECK-NEXT: "ResourceIndex": 8, -# CHECK-NEXT: "ResourceUsage": 1 -# CHECK-NEXT: } -# CHECK-NEXT: ] -# CHECK-NEXT: } -# CHECK-NEXT: { -# CHECK-NEXT: "TimelineInfo": [ -# CHECK-NEXT: { -# CHECK-NEXT: "CycleDispatched": 0, -# CHECK-NEXT: "CycleExecuted": 2, -# CHECK-NEXT: "CycleIssued": 1, -# CHECK-NEXT: "CycleReady": 0, -# CHECK-NEXT: "CycleRetired": 3 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "CycleDispatched": 0, -# CHECK-NEXT: "CycleExecuted": 2, -# CHECK-NEXT: "CycleIssued": 1, -# CHECK-NEXT: "CycleReady": 0, -# CHECK-NEXT: "CycleRetired": 3 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "CycleDispatched": 0, -# CHECK-NEXT: "CycleExecuted": 2, -# CHECK-NEXT: "CycleIssued": 1, -# CHECK-NEXT: "CycleReady": 0, -# CHECK-NEXT: "CycleRetired": 3 -# CHECK-NEXT: }, -# CHECK-NEXT: { -# CHECK-NEXT: "CycleDispatched": 0, -# CHECK-NEXT: "CycleExecuted": 2, -# CHECK-NEXT: "CycleIssued": 1, -# CHECK-NEXT: "CycleReady": 0, -# CHECK-NEXT: "CycleRetired": 3 -# CHECK-NEXT: } -# CHECK-NEXT: ] # CHECK-NEXT: } diff --git a/llvm/tools/llvm-mca/PipelinePrinter.cpp b/llvm/tools/llvm-mca/PipelinePrinter.cpp --- a/llvm/tools/llvm-mca/PipelinePrinter.cpp +++ b/llvm/tools/llvm-mca/PipelinePrinter.cpp @@ -18,8 +18,18 @@ namespace mca { void PipelinePrinter::printReport(llvm::raw_ostream &OS) const { - for (const auto &V : Views) - V->printView(OutputKind, OS); + json::Object JO; + for (const auto &V : Views) { + if ((OutputKind == View::OK_JSON)) { + if (V->isSerializable()) { + JO.try_emplace(V->getNameAsString().str(), V->toJSON()); + } + } else { + V->printView(OS); + } + } + if (OutputKind == View::OK_JSON) + OS << formatv("{0:2}", json::Value(std::move(JO))) << "\n"; } } // namespace mca. } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/BottleneckAnalysis.h b/llvm/tools/llvm-mca/Views/BottleneckAnalysis.h --- a/llvm/tools/llvm-mca/Views/BottleneckAnalysis.h +++ b/llvm/tools/llvm-mca/Views/BottleneckAnalysis.h @@ -15,7 +15,7 @@ /// Example of bottleneck analysis report for a dot-product on X86 btver2: /// /// Cycles with backend pressure increase [ 40.76% ] -/// Throughput Bottlenecks: +/// Throughput Bottlenecks: /// Resource Pressure [ 39.34% ] /// - JFPA [ 39.34% ] /// - JFPU0 [ 39.34% ] @@ -33,9 +33,9 @@ /// In particular, this occurs when there is a delta between the number of uOps /// dispatched and the number of uOps issued to the underlying pipelines. /// -/// The bottleneck analysis view is also responsible for identifying and printing -/// the most "critical" sequence of dependent instructions according to the -/// simulated run. +/// The bottleneck analysis view is also responsible for identifying and +/// printing the most "critical" sequence of dependent instructions according to +/// the simulated run. /// /// Below is the critical sequence computed for the dot-product example on /// btver2: @@ -43,32 +43,35 @@ /// Instruction Dependency Information /// +----< 2. vhaddps %xmm3, %xmm3, %xmm4 /// | -/// | < loop carried > +/// | < loop carried > /// | /// | 0. vmulps %xmm0, %xmm0, %xmm2 -/// +----> 1. vhaddps %xmm2, %xmm2, %xmm3 ## RESOURCE interference: JFPA [ probability: 73% ] +/// +----> 1. vhaddps %xmm2, %xmm2, %xmm3 ## RESOURCE interference: JFPA +/// [ probability: 73% ] /// +----> 2. vhaddps %xmm3, %xmm3, %xmm4 ## REGISTER dependency: %xmm3 /// | -/// | < loop carried > +/// | < loop carried > /// | -/// +----> 1. vhaddps %xmm2, %xmm2, %xmm3 ## RESOURCE interference: JFPA [ probability: 73% ] +/// +----> 1. vhaddps %xmm2, %xmm2, %xmm3 ## RESOURCE interference: JFPA +/// [ probability: 73% ] /// /// /// The algorithm that computes the critical sequence is very similar to a /// critical path analysis. -/// +/// /// A dependency graph is used internally to track dependencies between nodes. /// Nodes of the graph represent instructions from the input assembly sequence, /// and edges of the graph represent data dependencies or processor resource /// interferences. /// -/// Edges are dynamically 'discovered' by observing instruction state transitions -/// and backend pressure increase events. Edges are internally ranked based on -/// their "criticality". A dependency is considered to be critical if it takes a -/// long time to execute, and if it contributes to backend pressure increases. -/// Criticality is internally measured in terms of cycles; it is computed for -/// every edge in the graph as a function of the edge latency and the number of -/// backend pressure increase cycles contributed by that edge. +/// Edges are dynamically 'discovered' by observing instruction state +/// transitions and backend pressure increase events. Edges are internally +/// ranked based on their "criticality". A dependency is considered to be +/// critical if it takes a long time to execute, and if it contributes to +/// backend pressure increases. Criticality is internally measured in terms of +/// cycles; it is computed for every edge in the graph as a function of the edge +/// latency and the number of backend pressure increase cycles contributed by +/// that edge. /// /// At the end of simulation, costs are propagated to nodes through the edges of /// the graph, and the most expensive path connecting the root-set (a @@ -217,8 +220,8 @@ // Loop carried dependencies are carefully expanded by the bottleneck analysis // to guarantee that the graph stays acyclic. To this end, extra nodes are // pre-allocated at construction time to describe instructions from "past and -// future" iterations. The graph is kept acyclic mainly because it simplifies the -// complexity of the algorithm that computes the critical sequence. +// future" iterations. The graph is kept acyclic mainly because it simplifies +// the complexity of the algorithm that computes the critical sequence. class DependencyGraph { struct DGNode { unsigned NumPredecessors; @@ -239,7 +242,8 @@ void pruneEdges(unsigned Iterations); void initializeRootSet(SmallVectorImpl &RootSet) const; - void propagateThroughEdges(SmallVectorImpl &RootSet, unsigned Iterations); + void propagateThroughEdges(SmallVectorImpl &RootSet, + unsigned Iterations); #ifndef NDEBUG void dumpDependencyEdge(raw_ostream &OS, const DependencyEdge &DE, @@ -333,7 +337,7 @@ void printView(raw_ostream &OS) const override; StringRef getNameAsString() const override { return "BottleneckAnalysis"; } - json::Value toJSON() const override { return "not implemented"; } + bool isSerializable() const override { return false; } #ifndef NDEBUG void dump(raw_ostream &OS, MCInstPrinter &MCIP) const { DG.dump(OS, MCIP); } diff --git a/llvm/tools/llvm-mca/Views/DispatchStatistics.h b/llvm/tools/llvm-mca/Views/DispatchStatistics.h --- a/llvm/tools/llvm-mca/Views/DispatchStatistics.h +++ b/llvm/tools/llvm-mca/Views/DispatchStatistics.h @@ -79,6 +79,7 @@ printDispatchHistogram(OS); } StringRef getNameAsString() const override { return "DispatchStatistics"; } + json::Value toJSON() const override; }; } // namespace mca } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/DispatchStatistics.cpp b/llvm/tools/llvm-mca/Views/DispatchStatistics.cpp --- a/llvm/tools/llvm-mca/Views/DispatchStatistics.cpp +++ b/llvm/tools/llvm-mca/Views/DispatchStatistics.cpp @@ -84,5 +84,16 @@ OS << Buffer; } +json::Value DispatchStatistics::toJSON() const { + json::Object JO({{"RAT", HWStalls[HWStallEvent::RegisterFileStall]}, + {"RCU", HWStalls[HWStallEvent::RetireControlUnitStall]}, + {"SCHEDQ", HWStalls[HWStallEvent::SchedulerQueueFull]}, + {"LQ", HWStalls[HWStallEvent::LoadQueueFull]}, + {"SQ", HWStalls[HWStallEvent::StoreQueueFull]}, + {"GROUP", HWStalls[HWStallEvent::DispatchGroupStall]}, + {"USH", HWStalls[HWStallEvent::CustomBehaviourStall]}}); + return JO; +} + } // namespace mca } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp b/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp --- a/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp +++ b/llvm/tools/llvm-mca/Views/InstructionInfoView.cpp @@ -147,7 +147,7 @@ JO.try_emplace("Instruction", (unsigned)I.index()); InstInfo.push_back(std::move(JO)); } - return json::Value(std::move(InstInfo)); + return json::Object({{"InstructionList", json::Value(std::move(InstInfo))}}); } } // namespace mca. } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/RegisterFileStatistics.h b/llvm/tools/llvm-mca/Views/RegisterFileStatistics.h --- a/llvm/tools/llvm-mca/Views/RegisterFileStatistics.h +++ b/llvm/tools/llvm-mca/Views/RegisterFileStatistics.h @@ -76,6 +76,7 @@ StringRef getNameAsString() const override { return "RegisterFileStatistics"; } + bool isSerializable() const override { return false; } }; } // namespace mca } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/RetireControlUnitStatistics.h b/llvm/tools/llvm-mca/Views/RetireControlUnitStatistics.h --- a/llvm/tools/llvm-mca/Views/RetireControlUnitStatistics.h +++ b/llvm/tools/llvm-mca/Views/RetireControlUnitStatistics.h @@ -55,6 +55,7 @@ StringRef getNameAsString() const override { return "RetireControlUnitStatistics"; } + bool isSerializable() const override { return false; } }; } // namespace mca diff --git a/llvm/tools/llvm-mca/Views/SchedulerStatistics.h b/llvm/tools/llvm-mca/Views/SchedulerStatistics.h --- a/llvm/tools/llvm-mca/Views/SchedulerStatistics.h +++ b/llvm/tools/llvm-mca/Views/SchedulerStatistics.h @@ -89,6 +89,7 @@ void printView(llvm::raw_ostream &OS) const override; StringRef getNameAsString() const override { return "SchedulerStatistics"; } + bool isSerializable() const override { return false; } }; } // namespace mca } // namespace llvm diff --git a/llvm/tools/llvm-mca/Views/TimelineView.h b/llvm/tools/llvm-mca/Views/TimelineView.h --- a/llvm/tools/llvm-mca/Views/TimelineView.h +++ b/llvm/tools/llvm-mca/Views/TimelineView.h @@ -125,7 +125,7 @@ unsigned LastCycle; struct TimelineViewEntry { - int CycleDispatched; // A negative value is an "invalid cycle". + int CycleDispatched; // A negative value is an "invalid cycle". unsigned CycleReady; unsigned CycleIssued; unsigned CycleExecuted; diff --git a/llvm/tools/llvm-mca/Views/View.h b/llvm/tools/llvm-mca/Views/View.h --- a/llvm/tools/llvm-mca/Views/View.h +++ b/llvm/tools/llvm-mca/Views/View.h @@ -17,8 +17,8 @@ #include "llvm/MC/MCInstPrinter.h" #include "llvm/MCA/HWEventListener.h" -#include "llvm/Support/raw_ostream.h" #include "llvm/Support/JSON.h" +#include "llvm/Support/raw_ostream.h" namespace llvm { namespace mca { @@ -43,6 +43,7 @@ virtual ~View() = default; virtual StringRef getNameAsString() const = 0; virtual json::Value toJSON() const { return "not implemented"; } + virtual bool isSerializable() const { return true; } void anchor() override; }; } // namespace mca