diff --git a/lnt/server/ui/static/lnt_graph.js b/lnt/server/ui/static/lnt_graph.js
--- a/lnt/server/ui/static/lnt_graph.js
+++ b/lnt/server/ui/static/lnt_graph.js
@@ -1,13 +1,5 @@
/*jslint vars: true, browser: true, devel: true, plusplus: true, unparam: true*/
-/*global $, jQuery, alert, db_name, test_suite_name, init, changes */
-/*global update_graph*/
-// Keep the graph data we download.
-// Each element is a list of graph data points.
-var data_cache = [];
-var is_checked = []; // The current list of lines to plot.
-var normalize = false;
-
-var MAX_TO_DRAW = 10;
+/*global $, jQuery, alert, db_name, test_suite_name */
var STATE_NAMES = {0: 'Detected',
1: 'Staged',
@@ -20,17 +12,22 @@
var regression_cache = [];
var lnt_graph = {};
-
// Grab the graph API url for this line.
function get_api_url(kind, db, ts, mtf) {
"use strict";
return [lnt_url_base, "api", "db_" + db, "v4", ts, kind, mtf].join('/');
}
-// Grab the URL for a regression by id.
-function get_regression_url(db, ts, regression) {
+// Grab the URL for a machine by id.
+function get_machine_url(db, ts, machineID) {
"use strict";
- return [lnt_url_base, "db_" + db, "v4", ts, "regressions", regression].join('/');
+ return [lnt_url_base, "db_" + db, "v4", ts, "machine", machineID].join('/');
+}
+
+// Grab the URL for a order by id.
+function get_order_url(db, ts, orderID) {
+ "use strict";
+ return [lnt_url_base, "db_" + db, "v4", ts, "order", orderID].join('/');
}
// Grab the URL for a run by id.
@@ -39,103 +36,84 @@
return [lnt_url_base, "db_" + db, "v4", ts, runID].join('/');
}
-// Create a new regression manually URL.
-function get_manual_regression_url(db, ts, url, runID) {
+// Grab the URL for a regression by id.
+function get_regression_url(db, ts, regression) {
"use strict";
- return [lnt_url_base,
- "db_" + db,
- "v4",
- ts,
- "regressions/new_from_graph",
- url,
- runID].join('/');
+ return [lnt_url_base, "db_" + db, "v4", ts, "regressions", regression].join('/');
}
-
-
-/* Bind events to the zoom bar buttons, so that
- * the zoom buttons work, then position them
- * over top of the main graph.
- */
-function bind_zoom_bar(my_plot) {
+// Create a new regression manually URL.
+function get_manual_regression_url(db, ts, url, runID) {
"use strict";
- $('#out').click(function (e) {
- e.preventDefault();
- my_plot.zoomOut();
- });
-
- $('#in').click(function (e) {
- e.preventDefault();
- my_plot.zoom();
- });
-
- // Now move the bottons onto the graph.
- $('#graphbox').css('position', 'relative');
- $('#zoombar').css('position', 'absolute');
-
- $('#zoombar').css('left', '40px');
- $('#zoombar').css('top', '15px');
-
+ return [lnt_url_base, "db_" + db, "v4", ts, "regressions/new_from_graph", url, runID].join('/');
}
-
// Show our overlay tooltip.
lnt_graph.current_tip_point = null;
-function show_tooltip(x, y, item, pos, graph_data) {
+function plotly_show_tooltip(data) {
"use strict";
- // Given the event handler item, get the graph metadata.
- function extract_metadata(item) {
- var index = item.dataIndex;
- // Graph data is formatted as [x, y, meta_data].
- var meta_data = item.series.data[index][2];
- return meta_data;
- }
- var data = item.datapoint;
- var meta_data = extract_metadata(item);
var tip_body = '
';
+ var point = data.points[0];
+
+ if (point.data.regression && point.data.regressionID) {
+ tip_body += "
" + point.data.regression + "";
+ }
- if (meta_data.title) {
- tip_body += "
" + meta_data.title + "";
+ if (point.data.machine && point.data.machineID) {
+ tip_body += "
Machine: " + point.data.machine + "";
}
- if (meta_data.machine) {
- tip_body += "
Machine: " + meta_data.machine + "
";
+ if (point.data.test_name) {
+ tip_body += "
Test: " + point.data.test_name + "
";
}
- if (meta_data.test_name) {
- tip_body += "
Test: " + meta_data.test_name + "
";
+ if (point.data.metric) {
+ tip_body += "
Metric: " + point.data.metric + "
";
}
- if (meta_data.label) {
- tip_body += "
Revision: " + meta_data.label + "
";
+ if (point.meta.order) {
+ if (point.meta.orderID) {
+ tip_body += "
Order: " + point.meta.order + "";
+ } else {
+ tip_body += "
Order: " + point.meta.order + "
";
+ }
}
- tip_body += "
Value: " + data[1].toFixed(4) + "
";
- if (meta_data.date) {
- tip_body += "
Date: " + meta_data.date + "
";
+ tip_body += "
Value: " + point.y.toFixed(4) + "
";
+
+ if (point.meta.date) {
+ tip_body += "
Date: " + point.meta.date + "
";
}
- if (meta_data.state) {
- tip_body += "
State: " + meta_data.state + "
";
+
+ if (point.meta.state) {
+ tip_body += "
State: " + point.meta.state + "
";
}
- if (meta_data.runID) {
+
+ if (point.meta.runID) {
tip_body += "
Run: " + meta_data.runID + "
";
+ get_run_url(db_name, test_suite_name, point.meta.runID) +
+ "\">" + point.meta.runID + "";
}
- if (meta_data.runID && item.series.url) {
+ if (point.meta.runID && point.data.url) { // url = machine.id/test.id/field_index
tip_body += "
Mark Change.
";
+ get_manual_regression_url(db_name, test_suite_name, point.data.url, point.meta.runID) +
+ "\">Mark Change.";
}
tip_body += "
";
var tooltip_div = $(tip_body).css({
position: 'absolute',
display: 'none',
- top: y + 5,
- left: x + 5,
+ top: data.event.pageY + 5,
+ left: data.event.pageX + 5,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
@@ -165,9 +143,9 @@
}
// Event handler function to update the tooltop.
-function update_tooltip(event, pos, item, show_fn, graph_data) {
+function plotly_update_tooltip(data) {
"use strict";
- if (!item) {
+ if (!data || data.points.length == 0) {
$("#tooltip").fadeOut(200, function () {
$("#tooltip").remove();
});
@@ -175,95 +153,19 @@
return;
}
- if (!lnt_graph.current_tip_point || (lnt_graph.current_tip_point[0] !== item.datapoint[0] ||
- lnt_graph.current_tip_point[1] !== item.datapoint[1])) {
+ if (!lnt_graph.current_tip_point || (lnt_graph.current_tip_point[0] !== data.points.curveNumber ||
+ lnt_graph.current_tip_point[1] !== data.points.pointNumber)) {
$("#tooltip").remove();
- lnt_graph.current_tip_point = item.datapoint;
- show_fn(pos.pageX, pos.pageY, item, pos, graph_data);
- }
-}
-
-
-// Normalize this data to the element in index
-function normalize_data(data_array, index) {
- "use strict";
- var new_data = new Array(data_array.length);
- var i = 0;
- var factor = 0;
- for (i = 0; i < data_array.length; i++) {
- if (data_array[i][0] == index) {
- factor = data_array[i][1];
- break;
- }
- }
- console.assert(factor !== 0, "Did not find the element to normalize on.");
- for (i = 0; i < data_array.length; i++) {
- new_data[i] = jQuery.extend({}, data_array[i]);
- new_data[i][1] = (data_array[i][1] / factor) * 100;
- }
- return new_data;
-}
-
-
-function try_normal(data_array, index) {
- "use strict";
- if (normalize) {
- return normalize_data(data_array, index);
- }
- return data_array;
-}
-
-
-function make_graph_point_entry(data, color, regression) {
- "use strict";
- var radius = 0.25;
- var fill = true;
- if (regression) {
- radius = 5.0;
- fill = false;
- color = "red";
- }
- var entry = {"color": color,
- "data": data,
- "lines": {"show": false},
- "points": {"fill": fill,
- "radius": radius,
- "show": true
- }
- };
- if (regression) {
- entry.points.symbol = "triangle";
+ lnt_graph.current_tip_point = [data.points[0].curveNumber, data.points[0].pointNumber];
+ plotly_show_tooltip(data);
}
- return entry;
}
-var color_codes = ["#4D4D4D",
- "#5DA5DA",
- "#FAA43A",
- "#60BD68",
- "#F17CB0",
- "#B2912F",
- "#B276B2",
- "#DECF3F",
- "#F15854",
- "#1F78B4",
- "#33A02C",
- "#E31A1C",
- "#FF7F00",
- "#6A3D9A",
- "#A6CEE3",
- "#B2DF8A",
- "#FB9A99",
- "#FDBF6F",
- "#CAB2D6"];
-
-function new_graph_data_callback(data, index) {
+function plotly_hide_tooltip(data) {
"use strict";
- data_cache[index] = data;
- update_graph();
+ plotly_update_tooltip(null);
}
-
function get_regression_id() {
"use strict";
var path = window.location.pathname.split("/");
@@ -272,213 +174,57 @@
}
}
-
-function new_graph_regression_callback(data, index, update_func) {
+function plotly_graph_regression_callback(data, index, item, yaxis, update_func) {
"use strict";
- $.each(data, function (i, d) {
-
+ $.each(data, function (i, r) {
if (get_regression_id() !== null) {
- if (get_regression_id() === d.id || d.state === 21) {
+ if (get_regression_id() === r.id || r.state === 21) {
return;
}
}
if (!(regression_cache[index])) {
regression_cache[index] = [];
}
- var metadata = {'label': d.end_point[0],
- 'title': d.title,
- 'id': d.id,
- 'link': get_regression_url(db_name, test_suite_name, d.id),
- 'state': STATE_NAMES[d.state]};
- regression_cache[index].push([parseInt(d.end_point[0], 10), d.end_point[1], metadata]);
+ regression_cache[index].push({
+ "x": [r.end_point[0]],
+ "y": [r.end_point[1]],
+ "meta": [{
+ "order": r.end_point[0],
+ "state": STATE_NAMES[r.state]
+ }],
+ "name": r.title,
+ "machine": item[0].name,
+ "machineID": item[0].id,
+ "metric": item[2],
+ "yaxis": yaxis,
+ "regression": r.title,
+ "regressionID": r.id,
+ "legendgroup": "regressions",
+ "showlegend": true,
+ "mode": "markers",
+ "marker": {
+ "color": "red",
+ "symbol": "triangle-up-open",
+ "size": 13}
+ });
});
update_func();
}
-
-var NOT_DRAWING = '' +
- 'Too many lines to plot. Limit is ' + MAX_TO_DRAW + "." +
- '
×' +
- '
';
-
-
-function update_graph() {
- "use strict";
- var to_draw = [];
- var starts = [];
- var ends = [];
- var lines_to_draw = 0;
- var i = 0;
- var color = null;
- var data = null;
- var regressions = null;
- // We need to find the x bounds of the data, sine regressions may be
- // outside that range.
- var mins = [];
- var maxs = [];
- // Data processing.
- for (i = 0; i < changes.length; i++) {
- if (is_checked[i] && data_cache[i]) {
- lines_to_draw++;
- starts.push(changes[i].start);
- ends.push(changes[i].end);
- color = color_codes[i % color_codes.length];
- data = try_normal(data_cache[i], changes[i].start);
- // Find local x-axis min and max.
- var local_min = parseFloat(data[0][0]);
- var local_max = parseFloat(data[0][0]);
- for (var j = 0; j < data.length; j++) {
- var datum = data[j];
- var d = parseFloat(datum[0]);
- if (d < local_min) {
- local_min = d;
- }
- if (d > local_max) {
- local_max = d;
- }
- }
- mins.push(local_min);
- maxs.push(local_max);
-
- to_draw.push(make_graph_point_entry(data, color, false));
- to_draw.push({"color": color, "data": data, "url": changes[i].url});
- }
- }
- // Zoom the graph to only the data sets, not the regressions.
- var min_x = Math.min.apply(Math, mins);
- var max_x = Math.max.apply(Math, maxs);
- // Regressions.
- for (i = 0; i < changes.length; i++) {
- if (is_checked[i] && data_cache[i]) {
- if (regression_cache[i]) {
- regressions = try_normal(regression_cache[i]);
- to_draw.push(make_graph_point_entry(regressions, color, true));
- }
- }
- }
- // Limit the number of lines to plot: the graph gets cluttered and slow.
- if (lines_to_draw > MAX_TO_DRAW) {
- $('#errors').empty().prepend(NOT_DRAWING);
- return;
- }
- var lowest_rev = Math.min.apply(Math, starts);
- var highest_rev = Math.max.apply(Math, ends);
- init(to_draw, lowest_rev, highest_rev, min_x, max_x);
-}
-
-// To be called by main page. It will fetch data and make graph ready.
-function add_data_to_graph(URL, index, max_samples) {
- "use strict";
- $.getJSON(get_api_url("graph", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
- new_graph_data_callback(data, index);
- });
- $.getJSON(get_api_url("regression", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
- new_graph_regression_callback(data, index, update_graph);
- });
- is_checked[index] = true;
-}
-
-
-function init_axis() {
- "use strict";
- function onlyUnique(value, index, self) {
- return self.indexOf(value) === index;
- }
-
- var metrics = $('.metric').map(function () {
- return $(this).text();
- }).get();
- metrics = metrics.filter(onlyUnique);
-
- var yaxis_name = metrics.join(", ");
- yaxis_name = yaxis_name.replace("_", " ");
-
- $('#yaxis').text(yaxis_name);
-
- $('#normalize').click(function (e) {
- normalize = !normalize;
- if (normalize) {
- $('#normalize').toggleClass("btn-default btn-primary");
- $('#normalize').text("x1");
- $('#yaxis').text("Normalized (%)");
- } else {
- $('#normalize').toggleClass("btn-primary btn-default");
- $('#normalize').text("%");
- $('#yaxis').text(yaxis_name);
- }
- update_graph();
- });
-
- $('#xaxis').css('position', 'absolute');
- $('#xaxis').css('left', '50%');
- $('#xaxis').css('bottom', '-15px');
- $('#xaxis').css('width', '100px');
- $('#xaxis').css('margin-left', '-50px');
-
- $('#yaxis').css('position', 'absolute');
- $('#yaxis').css('left', '-55px');
- $('#yaxis').css('top', '50%');
- $('#yaxis').css('-webkit-transform', 'rotate(-90deg)');
- $('#yaxis').css('-moz-transform', 'rotate(-90deg)');
-}
/* On the normal graph page, data is loaded during page load.
This function takes the plots from page load and adds the regressions
that are asynchrounusly fetched.
*/
-function update_graphplots(old_plot) {
+function plotly_update_graphplots(old_plot) {
"use strict";
// Regressions.
- var regressions = null;
- var i = 0;
var new_plot = $.extend([], old_plot);
- for (i = 0; i < regression_cache.length; i++) {
+ for (var i = 0; i < regression_cache.length; i++) {
if (regression_cache[i]) {
- regressions = regression_cache[i];
- new_plot.push(make_graph_point_entry(regressions, "#000000", true));
+ regression_cache[i].forEach(function(j){
+ new_plot.push(j);
+ });
}
}
return new_plot;
}
-
-
-function init(data, start_highlight, end_highlight, x_min, x_max) {
- "use strict";
- // First, set up the primary graph.
- var graph = $("#graph");
- var graph_plots = data;
- var line_width = 1;
- if (data.length > 0 && data[0].data.length < 50) {
- line_width = 2;
- }
- var graph_options = {
- xaxis: {
- min: x_min,
- max: x_max
- },
- series : {
- lines : {lineWidth : line_width},
- shadowSize : 0
- },
- highlight : {
- range: {"end": [end_highlight], "start": [start_highlight]},
- alpha: "0.35",
- stroke: true
- },
- zoom : { interactive : false },
- pan : { interactive : true,
- frameRate: 60 },
- grid : {
- hoverable : true,
- clickable: true
- }
- };
-
- var main_plot = $.plot("#graph", graph_plots, graph_options);
-
- // Add tooltips.
- graph.bind("plotclick", function (e, p, i) {
- update_tooltip(e, p, i, show_tooltip, graph_plots);
- });
-
- bind_zoom_bar(main_plot);
-}
diff --git a/lnt/server/ui/templates/v4_graph.html b/lnt/server/ui/templates/v4_graph.html
--- a/lnt/server/ui/templates/v4_graph.html
+++ b/lnt/server/ui/templates/v4_graph.html
@@ -4,32 +4,10 @@
{% extends "layout.html" %}
{% set components = [(ts.name, v4_url_for(".v4_recent_activity"))] %}
{% block head %}
-
-
+ src="{{ url_for('.static', filename='lnt_graph.js') }}">
-
-
-
-
-
-
+ src="https://cdn.plot.ly/plotly-2.4.2.min.js">
{% endblock %}
{% block title %}Graph{% endblock %}
@@ -38,125 +16,80 @@
{% block onload %}init_page(){% endblock %}
{% block javascript %}
-var g = {};
var test_suite_name = "{{ request.view_args.testsuite_name }}";
var db_name = "{{ request.view_args.get('db_name','') }}";
var graph_plots = {{graph_plots|tojson|safe}};
-var baseline_plots = {{baseline_plots|tojson|safe}};
+var metrics = {{metrics|tojson|safe}};
+var legend = {{legend|tojson|safe}};
var options = {{options|tojson|safe}};
-prefix = "{{request.base_url}}";
-
-transform_fn = function (v) { return v; }
-inverse_transform_fn = function (v) { return v; }
-if (options.logarithmic_scale) {
- transform_fn = function(v) {
- if (v < 0)
- return -Math.log10(-v);
- else if (v > 0)
- return Math.log10(v);
- else
- return 0;
+function init_graph() {
+ // Add regressions
+ var tmp_plots = plotly_update_graphplots(graph_plots);
+
+ var graph_layout = {
+ // title: 'Graph',
+ hovermode: 'closest',
+ showlegend: true,
+ legend: { x: 0, y: -1.0,
+ // yanchor: 'bottom',
+ // size: 'top left',
+ bgcolor: 'rgba(0,0,0,0)' },
+ margin: { l: 50, r: 0, t: 10, b: 0 },
+ height: 700
+ };
+ if (options.xaxis_date) {
+ graph_layout['xaxis'] = {title: 'Date', type: 'date'};
+ } else {
+ graph_layout['xaxis'] = {title: 'Order', type: 'category'};
+ }
+
+ var xaxis_left = 0.0;
+ var xaxis_right = 1.0;
+ for (var i = 0; i < metrics.length; i++) {
+ var yaxis = 'yaxis';
+ if (i > 0) yaxis += (i+1).toString();
+ graph_layout[yaxis] = {title: metrics[i]};
+ if (options.logarithmic_scale) {
+ graph_layout[yaxis]['type'] = 'log';
+ graph_layout[yaxis]['autorange'] = true;
}
- inverse_transform_fn = function(v) {
- if (v < 0)
- return -Math.pow(10, -v);
- else if (v > 0)
- return Math.pow(10, v);
- else
- return 0;
+ if (i > 0 ) {
+ graph_layout[yaxis]['overlaying'] = 'y';
}
-}
-
-function init_graph() {
- // Set up the primary graph.
- var graph = $("#graph");
- var graph_options = {
- series : {
- lines : {
- lineWidth : 1 },
- shadowSize : 0
- },
- highlight : {
-{% if revision_range is not none %}
- range: {{revision_range|tojson|safe}}
-{% else %}
- enabled: false
-{% endif %}
- },
- zoom : { interactive : false },
- pan : { interactive : true,
- frameRate: 60 },
- grid : {
- hoverable : true,
- clickable: true },
- yaxis: {
- transform: transform_fn,
- inverseTransform: inverse_transform_fn }
- };
-
- // Add baseline lines
- graph_options['grid']['markings'] = baseline_plots;
- var tmp_plots = update_graphplots(graph_plots);
- var main_plot = $.plot(graph, tmp_plots, graph_options);
-
- // Add tooltips.
- graph.bind("plotclick", function (e, p, i) {
- update_tooltip(e, p, i, show_tooltip, tmp_plots);
- });
-
- // Set up the overview graph.
- var overview = $("#overview")
- var overview_plots = {{overview_plots|tojson|safe}};
- $.plot(overview, overview_plots, {
- series : {
- lines : {
- lineWidth : 1 },
- shadowSize : 0 },
- selection: { mode: "x" },
- touch: {
- enabled: false
- },
- highlight : {
-{% if revision_range is not none %}
- range: {{revision_range|tojson|safe}},
- alpha: "1",
- stroke: true,
-{% else %}
- enabled: false
-{% endif %}
- },
- yaxis: { ticks: [] } });
-
- // Connect selection on the overview graph to the main plot.
- $("#overview").bind("plotselected", function (event, ranges) {
- // Set the zooming on the plot.
- $.plot(graph, graph_plots,
- $.extend(true, {}, graph_options, {
- xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
- yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
- }));
- });
- bind_zoom_bar(main_plot);
-
+ if (i & 1) {
+ graph_layout[yaxis]['side'] = 'right';
+ xaxis_right = 1 - 0.03 * i;
+ graph_layout[yaxis]['position'] = xaxis_right;
+ } else {
+ xaxis_left = 0.03 * i
+ graph_layout[yaxis]['position'] = xaxis_left;
+ }
+ }
+ graph_layout['xaxis']['domain'] = [xaxis_left, xaxis_right];
+ Plotly.newPlot('graph', tmp_plots, graph_layout);
+ var graph = document.getElementById('graph')
+ graph.on('plotly_click', plotly_update_tooltip);
+ graph.on('plotly_doubleclick', plotly_hide_tooltip);
}
function init_page() {
- // First start the requests for regrssion data.
- var urls = $(".data-row").each(function (index, val) {
- $.getJSON(get_api_url("regression",
- db_name,
- test_suite_name,
- $(val).data('url')),
- function (data) {
- new_graph_regression_callback(data, index, init_graph);
- });
- return $(val).data('url');
+ if (!options.xaxis_date) {
+ // First start the requests for regression data.
+ legend.forEach(function(item, index) {
+ if (item[4]) { // legend.url
+ var yaxis_index = metrics.indexOf(item[2]); // legend.field_name
+ var yaxis = ((yaxis_index == 0) ? "y" : ("y"+(yaxis_index + 1).toString()));
+ $.getJSON(get_api_url("regression", db_name, test_suite_name, item[4]),
+ function (data) {
+ plotly_graph_regression_callback(data, index, item, yaxis, init_graph);
+ }
+ );
+ }
});
-
- init_graph();
- init_axis();
+ }
+ init_graph();
}
{% endblock %}
@@ -164,8 +97,7 @@
{% block sidebar %}
Controls
- - Left Mouse: Pan
-
- Double Left Mouse: Zoom
+
- Double Left Mouse: Hide Tooltip
{% endblock %}
@@ -173,6 +105,22 @@
Graph |
+
+
+ |
|
-
-
-
-
-
-
-
-
-
-
- Metric
- Order
-
- |
-
-
-
-
-
- |
-
-
+
+
+
+ |
+
-
- Legend
-
-
- |
- Machine |
- Test |
- Type |
-
- {% for machine, test_name, field_name, col, url in legend %}
-
- |
- {{ utils.render_machine(machine) }} |
- {{ test_name }} |
- {{ field_name }} |
-
- {% endfor %}
-
{% endblock %}
diff --git a/lnt/server/ui/views.py b/lnt/server/ui/views.py
--- a/lnt/server/ui/views.py
+++ b/lnt/server/ui/views.py
@@ -7,6 +7,7 @@
import time
from collections import namedtuple, defaultdict
from urllib.parse import urlparse, urljoin
+from io import BytesIO
import flask
import sqlalchemy.sql
@@ -17,6 +18,7 @@
from flask import make_response
from flask import render_template
from flask import request, url_for
+from flask import send_file
from flask_wtf import Form
from sqlalchemy.orm import joinedload
from sqlalchemy.orm.exc import NoResultFound
@@ -864,8 +866,14 @@
return machine, field
-def load_graph_data(plot_parameter, show_failures, revision_cache=None):
-
+def load_graph_data(plot_parameter, show_failures, limit, xaxis_date, revision_cache=None):
+ """
+ Load all the field values for this test on the same machine.
+ :param plot_parameter: Stores machine, test and field to load.
+ :param show_failures: Filter only passed values if False.
+ :param limit: Limit points if specified.
+ :param xaxis_date: X axis is Date, otherwise Order.
+ """
session = request.session
ts = request.get_testsuite()
@@ -873,9 +881,7 @@
#
# FIXME: Don't join to Order here, aggregate this across all the tests
# we want to load. Actually, we should just make this a single query.
- #
- # FIXME: Don't hard code field name.
- values = session.query(plot_parameter.field.column, ts.Order.llvm_project_revision,
+ values = session.query(plot_parameter.field.column, ts.Order,
ts.Run.start_time, ts.Run.id) \
.join(ts.Run).join(ts.Order) \
.filter(ts.Run.machine_id == plot_parameter.machine.id) \
@@ -886,30 +892,64 @@
if plot_parameter.field.status_field:
values = values.filter((plot_parameter.field.status_field.column == PASS) |
(plot_parameter.field.status_field.column.is_(None)))
+ if limit:
+ values = values.limit(limit)
+
+ if xaxis_date:
+ # Aggregate by date.
+ data = list(multidict.multidict(
+ (date, (val, order, date, run_id))
+ for val, order, date, run_id in values).items())
+ # Sort data points according to date.
+ data.sort(key=lambda sample: sample[0])
+ else:
+ # Aggregate by order (revision).
+ data = list(multidict.multidict(
+ (order.llvm_project_revision, (val, order, date, run_id))
+ for val, order, date, run_id in values).items())
+ # Sort data points according to order (revision).
+ data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
- # Aggregate by revision.
- data = list(multidict.multidict((rev, (val, date, run_id))
- for val, rev, date, run_id in values).items())
- data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
return data
-def load_geomean_data(field, machine):
+def load_geomean_data(field, machine, limit, xaxis_date, revision_cache=None):
+ """
+ Load geomean for specified field on the same machine.
+ :param field: Field.
+ :param machine: Machine.
+ :param limit: Limit points if specified.
+ :param xaxis_date: X axis is Date, otherwise Order.
+ """
session = request.session
ts = request.get_testsuite()
values = session.query(sqlalchemy.sql.func.min(field.column),
- ts.Order.llvm_project_revision,
+ ts.Order,
sqlalchemy.sql.func.min(ts.Run.start_time)) \
.join(ts.Run).join(ts.Order).join(ts.Test) \
.filter(ts.Run.machine_id == machine.id) \
.filter(field.column.isnot(None)) \
.group_by(ts.Order.llvm_project_revision, ts.Test)
+ if limit:
+ values = values.limit(limit)
+
+ data = multidict.multidict(
+ ((order, date), val)
+ for val, order, date in values).items()
+
# Calculate geomean of each revision.
- data = multidict.multidict(((rev, date), val) for val, rev, date in values).items()
- data = [(rev, [(calc_geomean(vals), date)]) for ((rev, date), vals) in data]
- # Sort data points according to revision number.
- data.sort(key=lambda sample: convert_revision(sample[0]))
+ if xaxis_date:
+ data = [(date, [(calc_geomean(vals), order, date)])
+ for ((order, date), vals) in data]
+ # Sort data points according to date.
+ data.sort(key=lambda sample: sample[0])
+ else:
+ data = [(order.llvm_project_revision, [(calc_geomean(vals), order, date)])
+ for ((order, date), vals) in data]
+ # Sort data points according to order (revision).
+ data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
+
return data
@@ -946,6 +986,12 @@
bool(request.args.get('show_stddev'))
options['hide_all_points'] = hide_all_points = bool(
request.args.get('hide_all_points'))
+ options['xaxis_date'] = xaxis_date = bool(
+ request.args.get('xaxis_date'))
+ options['limit'] = limit = int(
+ request.args.get('limit', 0))
+ options['show_cumulative_minimum'] = show_cumulative_minimum = bool(
+ request.args.get('show_cumulative_minimum'))
options['show_linear_regression'] = show_linear_regression = bool(
request.args.get('show_linear_regression'))
options['show_failures'] = show_failures = bool(
@@ -981,7 +1027,7 @@
# Baselines to graph are passed as:
#
# baseline.title=
- if not name.startswith(str('baseline.')):
+ if not name.startswith('baseline.'):
continue
baseline_title = name[len('baseline.'):]
@@ -1022,18 +1068,20 @@
start_rev = prev_runs[0].order.llvm_project_revision
end_rev = highlight_run.order.llvm_project_revision
revision_range = {
- "start": convert_revision(start_rev),
- "end": convert_revision(end_rev),
+ "start": start_rev,
+ "end": end_rev,
}
# Build the graph data.
legend = []
graph_plots = []
graph_datum = []
- overview_plots = []
baseline_plots = []
revision_cache = {}
num_plots = len(plot_parameters)
+
+ metrics = list(set(req.field.name for req in plot_parameters))
+
for i, req in enumerate(plot_parameters):
# Determine the base plot color.
col = list(util.makeDarkColor(float(i) / num_plots))
@@ -1042,7 +1090,7 @@
tuple(col), url))
# Load all the field values for this test on the same machine.
- data = load_graph_data(req, show_failures, revision_cache)
+ data = load_graph_data(req, show_failures, limit, xaxis_date, revision_cache)
graph_datum.append((req.test.name, data, col, req.field, url, req.machine))
@@ -1073,10 +1121,11 @@
dark_col = list(util.makeDarkerColor(my_color))
str_dark_col = util.toColorString(dark_col)
baseline_plots.append({
- 'color': str_dark_col,
- 'lineWidth': 2,
- 'yaxis': {'from': mean, 'to': mean},
- 'name': q_baseline[0].llvm_project_revision,
+ "color": str_dark_col,
+ "lineWidth": 2,
+ "yaxis": {"from": mean, "to": mean},
+ # "name": q_baseline[0].llvm_project_revision,
+ "name": "Baseline %s: %s (%s)" % (baseline_title, req.test.name, req.field.name),
})
baseline_name = ("Baseline {} on {}"
.format(baseline_title, q_baseline[0].name))
@@ -1089,18 +1138,37 @@
machine, field = mean_parameter
test_name = 'Geometric Mean'
+ if field.name not in metrics:
+ metrics.append(field.name)
+
col = (0, 0, 0)
legend.append(LegendItem(machine, test_name, field.name, col, None))
- data = load_geomean_data(field, machine)
+ data = load_geomean_data(field, machine, limit, xaxis_date, revision_cache)
graph_datum.append((test_name, data, col, field, None, machine))
- for name, data, col, field, url, machine in graph_datum:
+ def trace_name(name, test_name, field_name):
+ return "%s: %s (%s)" % (name, test_name, field_name)
+
+ for test_name, data, col, field, url, machine in graph_datum:
+ # Generate trace metadata.
+ trace_meta = {}
+ trace_meta["machine"] = machine.name
+ trace_meta["machineID"] = machine.id
+ if len(graph_datum) > 1:
+ # If there are more than one plot in the graph, also label the
+ # test name.
+ trace_meta["test_name"] = test_name
+ trace_meta["metric"] = field.name
+
# Compute the graph points.
- errorbar_data = []
- points_data = []
- pts = []
- moving_median_data = []
- moving_average_data = []
+ pts_x = []
+ pts_y = []
+ meta = []
+ errorbar = {"x": [], "y": [], "error_y": {"type": "data", "visible": True, "array": []}}
+ cumulative_minimum = {"x": [], "y": []}
+ moving_median_data = {"x": [], "y": []}
+ moving_average_data = {"x": [], "y": []}
+ multisample_points_data = {"x": [], "y": [], "meta": []}
if normalize_by_median:
normalize_by = 1.0/stats.median([min([d[0] for d in values])
@@ -1108,20 +1176,22 @@
else:
normalize_by = 1.0
- for pos, (point_label, datapoints) in enumerate(data):
+ min_val = None
+ # Note data is sorted in load_graph_data().
+ for point_label, datapoints in data:
# Get the samples.
- data = [data_date[0] for data_date in datapoints]
+ values = [data_array[0] for data_array in datapoints]
+ orders = [data_array[1] for data_array in datapoints]
# And the date on which they were taken.
- dates = [data_date[1] for data_date in datapoints]
- # Run where this point was collected.
- runs = [data_pts[2]
- for data_pts in datapoints if len(data_pts) == 3]
+ dates = [data_array[2] for data_array in datapoints]
+ # Run ID where this point was collected.
+ run_ids = [data_array[3] for data_array in datapoints if len(data_array) == 4]
- x = determine_x_value(point_label, pos, revision_cache)
+ values = [v * normalize_by for v in values]
- values = [v*normalize_by for v in data]
- aggregation_fn = min
+ is_multisample = (len(values) > 1)
+ aggregation_fn = min
if switch_min_mean_local:
aggregation_fn = lnt.util.stats.agg_mean
if field.bigger_is_better:
@@ -1130,55 +1200,72 @@
agg_value, agg_index = \
aggregation_fn((value, index)
for (index, value) in enumerate(values))
-
- # Generate metadata.
- metadata = {"label": point_label}
- metadata["machine"] = machine.name
- metadata["date"] = str(dates[agg_index])
- if runs:
- metadata["runID"] = str(runs[agg_index])
-
- if len(graph_datum) > 1:
- # If there are more than one plot in the graph, also label the
- # test name.
- metadata["test_name"] = name
-
- pts.append((x, agg_value, metadata))
-
- # Add the individual points, if requested.
- # For each point add a text label for the mouse over.
- if not hide_all_points:
+ pts_y.append(agg_value)
+
+ # Plotly does not sort X axis in case of type: 'category'.
+ # point_label is a string (order revision) if xaxis_date = False
+ pts_x.append(point_label)
+
+ # Generate point metadata.
+ point_metadata = {"order": orders[agg_index].as_ordered_string(),
+ "orderID": orders[agg_index].id,
+ "date": str(dates[agg_index])}
+ if run_ids:
+ point_metadata["runID"] = str(run_ids[agg_index])
+ meta.append(point_metadata)
+
+ # Add the multisample points, if requested.
+ if not hide_all_points and (is_multisample or
+ bool(request.args.get('csv')) or bool(request.args.get('download_csv'))):
for i, v in enumerate(values):
- point_metadata = dict(metadata)
- point_metadata["date"] = str(dates[i])
- points_data.append((x, v, point_metadata))
+ multisample_metadata = {"order": orders[i].as_ordered_string(),
+ "orderID": orders[i].id,
+ "date": str(dates[i])}
+ if run_ids:
+ multisample_metadata["runID"] = str(run_ids[i])
+ multisample_points_data["x"].append(point_label)
+ multisample_points_data["y"].append(v)
+ multisample_points_data["meta"].append(multisample_metadata)
# Add the standard deviation error bar, if requested.
if show_stddev:
mean = stats.mean(values)
sigma = stats.standard_deviation(values)
- errorbar_data.append((x, mean, sigma))
+ errorbar["x"].append(point_label)
+ errorbar["y"].append(mean)
+ errorbar["error_y"]["array"].append(sigma)
# Add the MAD error bar, if requested.
if show_mad:
med = stats.median(values)
mad = stats.median_absolute_deviation(values, med)
- errorbar_data.append((x, med, mad))
+ errorbar["x"].append(point_label)
+ errorbar["y"].append(med)
+ errorbar["error_y"]["array"].append(mad)
+
+ if show_cumulative_minimum:
+ min_val = agg_value if min_val is None else min(min_val, agg_value)
+ cumulative_minimum["x"].append(point_label)
+ cumulative_minimum["y"].append(min_val)
# Compute the moving average and or moving median of our data if
# requested.
if moving_average or moving_median:
def compute_moving_average(x, window, average_list, _):
- average_list.append((x, lnt.util.stats.mean(window)))
+ average_list["x"].append(x)
+ average_list["y"].append(lnt.util.stats.mean(window))
def compute_moving_median(x, window, _, median_list):
- median_list.append((x, lnt.util.stats.median(window)))
+ median_list["x"].append(x)
+ median_list["y"].append(lnt.util.stats.median(window))
def compute_moving_average_and_median(x, window, average_list,
median_list):
- average_list.append((x, lnt.util.stats.mean(window)))
- median_list.append((x, lnt.util.stats.median(window)))
+ average_list["x"].append(x)
+ average_list["y"].append(lnt.util.stats.mean(window))
+ median_list["x"].append(x)
+ median_list["y"].append(lnt.util.stats.median(window))
if moving_average and moving_median:
fun = compute_moving_average_and_median
@@ -1187,117 +1274,170 @@
else:
fun = compute_moving_median
- len_pts = len(pts)
+ len_pts = len(pts_x)
for i in range(len_pts):
start_index = max(0, i - moving_window_size)
end_index = min(len_pts, i + moving_window_size)
- window_pts = [x[1] for x in pts[start_index:end_index]]
- fun(pts[i][0], window_pts, moving_average_data,
+ window_pts = pts_y[start_index:end_index]
+ fun(pts_x[i], window_pts, moving_average_data,
moving_median_data)
- # On the overview, we always show the line plot.
- overview_plots.append({
- "data": pts,
- "color": util.toColorString(col),
- })
+ yaxis_index = metrics.index(field.name)
+ yaxis = "y" if yaxis_index == 0 else "y%d" % (yaxis_index + 1)
# Add the minimum line plot, if requested.
if show_lineplot:
plot = {
- "data": pts,
- "color": util.toColorString(col),
+ "name": trace_name("Line", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "type": "scatter",
+ "mode": "lines+markers",
+ "line": {"color": util.toColorString(col)},
+ "x": pts_x,
+ "y": pts_y,
+ "meta": meta
}
+ plot.update(trace_meta)
if url:
plot["url"] = url
graph_plots.append(plot)
+
# Add regression line, if requested.
- if show_linear_regression:
- xs = [t for t, v, _ in pts]
- ys = [v for t, v, _ in pts]
-
- # We compute the regression line in terms of a normalized X scale.
- x_min, x_max = min(xs), max(xs)
- try:
- norm_xs = [(x - x_min) / (x_max - x_min)
- for x in xs]
- except ZeroDivisionError:
- norm_xs = xs
-
- try:
- info = ext_stats.linregress(norm_xs, ys)
- except ZeroDivisionError:
- info = None
- except ValueError:
- info = None
-
- if info is not None:
- slope, intercept, _, _, _ = info
-
- reglin_col = [c * .7 for c in col]
- reglin_pts = [(x_min, 0.0 * slope + intercept),
- (x_max, 1.0 * slope + intercept)]
- graph_plots.insert(0, {
- "data": reglin_pts,
- "color": util.toColorString(reglin_col),
- "lines": {
- "lineWidth": 2
- },
- "shadowSize": 4,
- })
+ if show_linear_regression and len(pts_x) >= 2:
+ unique_x = list(set(pts_x))
+ if xaxis_date:
+ unique_x.sort()
+ else:
+ unique_x.sort(key=lambda sample: convert_revision(sample, cache=revision_cache))
+ num_unique_x = len(unique_x)
+ if num_unique_x >= 2:
+ dict_x = {}
+ x_min = pts_x[0]
+ x_max = pts_x[-1]
+
+ # We compute the regression line in terms of a normalized X scale.
+ if xaxis_date:
+ x_range = float((x_max - x_min).total_seconds())
+ for x_key in unique_x:
+ dict_x[x_key] = (x_key - x_min).total_seconds() / x_range
+ else:
+ for i, x_key in enumerate(unique_x):
+ dict_x[x_key] = i/(num_unique_x - 1)
+
+ norm_x = [dict_x[xi] for xi in pts_x]
+
+ try:
+ info = ext_stats.linregress(norm_x, pts_y)
+ except ZeroDivisionError:
+ info = None
+ except ValueError:
+ info = None
+
+ if info is not None:
+ slope, intercept, _, _, _ = info
+
+ reglin_col = [c * 0.8 for c in col]
+ if xaxis_date:
+ reglin_y = [(xi - x_min).total_seconds() / x_range * slope +
+ intercept for xi in unique_x]
+ else:
+ reglin_y = [i/(num_unique_x - 1) * slope +
+ intercept for i in range(num_unique_x)]
+ plot = {
+ "name": trace_name("Linear Regression", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(reglin_col), "width": 2},
+ # "shadowSize": 4,
+ "x": unique_x,
+ "y": reglin_y
+ }
+ plot.update(trace_meta)
+ graph_plots.insert(0, plot)
# Add the points plot, if used.
- if points_data:
+ if multisample_points_data["x"]:
pts_col = (0, 0, 0)
- plot = {
- "data": points_data,
- "color": util.toColorString(pts_col),
- "lines": {"show": False},
- "points": {
- "show": True,
- "radius": .25,
- "fill": True,
- },
- }
+ multisample_points_data.update({
+ "name": trace_name("Points", test_name, field.name),
+ "legendgroup": test_name,
+ "showlegend": False,
+ "yaxis": yaxis,
+ # "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "markers",
+ "marker": {"color": util.toColorString(pts_col), "size": 5}
+ })
+ multisample_points_data.update(trace_meta)
if url:
- plot['url'] = url
- graph_plots.append(plot)
+ multisample_points_data["url"] = url
+ graph_plots.append(multisample_points_data)
# Add the error bar plot, if used.
- if errorbar_data:
- bar_col = [c*.7 for c in col]
- graph_plots.append({
- "data": errorbar_data,
- "lines": {"show": False},
- "color": util.toColorString(bar_col),
- "points": {
- "errorbars": "y",
- "yerr": {
- "show": True,
- "lowerCap": "-",
- "upperCap": "-",
- "lineWidth": 1,
- }
- }
+ if errorbar["x"]:
+ bar_col = [c * 0.4 for c in col]
+ errorbar.update({
+ "name": trace_name("Error bars", test_name, field.name),
+ "showlegend": False,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "markers",
+ "marker": {"color": util.toColorString(bar_col)}
})
+ errorbar.update(trace_meta)
+ graph_plots.append(errorbar)
# Add the moving average plot, if used.
- if moving_average_data:
- col = [0.32, 0.6, 0.0]
- graph_plots.append({
- "data": moving_average_data,
- "color": util.toColorString(col),
+ if moving_average_data["x"]:
+ avg_col = [c * 0.7 for c in col]
+ moving_average_data.update({
+ "name": trace_name("Moving average", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(avg_col)}
})
+ moving_average_data.update(trace_meta)
+ graph_plots.append(moving_average_data)
# Add the moving median plot, if used.
- if moving_median_data:
- col = [0.75, 0.0, 1.0]
- graph_plots.append({
- "data": moving_median_data,
- "color": util.toColorString(col),
+ if moving_median_data["x"]:
+ med_col = [c * 0.6 for c in col]
+ moving_median_data.update({
+ "name": trace_name("Moving median: ", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(med_col)}
})
+ moving_median_data.update(trace_meta)
+ graph_plots.append(moving_median_data)
+
+ if cumulative_minimum["x"]:
+ min_col = [c * 0.5 for c in col]
+ cumulative_minimum.update({
+ "name": trace_name("Cumulative Minimum", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(min_col)}
+ })
+ cumulative_minimum.update(trace_meta)
+ graph_plots.append(cumulative_minimum)
- if bool(request.args.get('json')):
+ if bool(request.args.get("json")) or bool(request.args.get("download_json")):
json_obj = dict()
json_obj['data'] = graph_plots
# Flatten ORM machine objects to their string names.
@@ -1317,39 +1457,27 @@
json_obj['current_options'] = options
json_obj['test_suite_name'] = ts.name
json_obj['baselines'] = baseline_plots
- return flask.jsonify(**json_obj)
+ flask_json = flask.jsonify(**json_obj)
+
+ if bool(request.args.get('json')):
+ return flask_json
+ else:
+ json_file = BytesIO()
+ lines = flask_json.get_data()
+ json_file.write(lines)
+ json_file.seek(0)
+ return send_file(json_file,
+ mimetype='text/json',
+ attachment_filename='Graph.json',
+ as_attachment=True)
return render_template("v4_graph.html", options=options,
- revision_range=revision_range,
graph_plots=graph_plots,
- overview_plots=overview_plots, legend=legend,
- baseline_plots=baseline_plots,
+ metrics=metrics,
+ legend=legend,
**ts_data(ts))
-def determine_x_value(point_label, fallback, revision_cache):
- """Given the order data, lets make a reasonable x axis value.
-
- :param point_label: the text representation of the x value
- :param fallback: The value to use for non
- :param revision_cache: a dict to use as a cache for convert_revision.
- :return: an integer or float value that is like the point_label or fallback.
-
- """
- rev_x = convert_revision(point_label, revision_cache)
- if len(rev_x) == 1:
- x = rev_x[0]
- elif len(rev_x) == 2:
- try:
- x = float(point_label)
- except ValueError:
- # It might have dashes or something silly
- x = float(str(rev_x[0]) + '.' + str(rev_x[1]))
- else:
- return fallback
- return x
-
-
@v4_route("/global_status")
def v4_global_status():
session = request.session