diff --git a/lnt/server/ui/api.py b/lnt/server/ui/api.py
--- a/lnt/server/ui/api.py
+++ b/lnt/server/ui/api.py
@@ -453,11 +453,11 @@
q = q.limit(limit)
samples = [
- [convert_revision(rev), val,
+ [rev, val,
{'label': rev, 'date': str(time), 'runID': str(rid)}]
for val, rev, time, rid in q.all()[::-1]
]
- samples.sort(key=lambda x: x[0])
+ samples.sort(key=lambda x: convert_revision(x[0]))
return samples
diff --git a/lnt/server/ui/static/lnt_graph.js b/lnt/server/ui/static/lnt_graph.js
--- a/lnt/server/ui/static/lnt_graph.js
+++ b/lnt/server/ui/static/lnt_graph.js
@@ -1,13 +1,5 @@
/*jslint vars: true, browser: true, devel: true, plusplus: true, unparam: true*/
-/*global $, jQuery, alert, db_name, test_suite_name, init, changes */
-/*global update_graph*/
-// Keep the graph data we download.
-// Each element is a list of graph data points.
-var data_cache = [];
-var is_checked = []; // The current list of lines to plot.
-var normalize = false;
-
-var MAX_TO_DRAW = 10;
+/*global $, jQuery, alert, db_name, test_suite_name */
var STATE_NAMES = {0: 'Detected',
1: 'Staged',
@@ -20,17 +12,22 @@
var regression_cache = [];
var lnt_graph = {};
-
// Grab the graph API url for this line.
function get_api_url(kind, db, ts, mtf) {
"use strict";
return [lnt_url_base, "api", "db_" + db, "v4", ts, kind, mtf].join('/');
}
-// Grab the URL for a regression by id.
-function get_regression_url(db, ts, regression) {
+// Grab the URL for a machine by id.
+function get_machine_url(db, ts, machineID) {
"use strict";
- return [lnt_url_base, "db_" + db, "v4", ts, "regressions", regression].join('/');
+ return [lnt_url_base, "db_" + db, "v4", ts, "machine", machineID].join('/');
+}
+
+// Grab the URL for a order by id.
+function get_order_url(db, ts, orderID) {
+ "use strict";
+ return [lnt_url_base, "db_" + db, "v4", ts, "order", orderID].join('/');
}
// Grab the URL for a run by id.
@@ -39,103 +36,84 @@
return [lnt_url_base, "db_" + db, "v4", ts, runID].join('/');
}
-// Create a new regression manually URL.
-function get_manual_regression_url(db, ts, url, runID) {
+// Grab the URL for a regression by id.
+function get_regression_url(db, ts, regression) {
"use strict";
- return [lnt_url_base,
- "db_" + db,
- "v4",
- ts,
- "regressions/new_from_graph",
- url,
- runID].join('/');
+ return [lnt_url_base, "db_" + db, "v4", ts, "regressions", regression].join('/');
}
-
-
-/* Bind events to the zoom bar buttons, so that
- * the zoom buttons work, then position them
- * over top of the main graph.
- */
-function bind_zoom_bar(my_plot) {
+// Create a new regression manually URL.
+function get_manual_regression_url(db, ts, url, runID) {
"use strict";
- $('#out').click(function (e) {
- e.preventDefault();
- my_plot.zoomOut();
- });
-
- $('#in').click(function (e) {
- e.preventDefault();
- my_plot.zoom();
- });
-
- // Now move the bottons onto the graph.
- $('#graphbox').css('position', 'relative');
- $('#zoombar').css('position', 'absolute');
-
- $('#zoombar').css('left', '40px');
- $('#zoombar').css('top', '15px');
-
+ return [lnt_url_base, "db_" + db, "v4", ts, "regressions/new_from_graph", url, runID].join('/');
}
-
// Show our overlay tooltip.
lnt_graph.current_tip_point = null;
-function show_tooltip(x, y, item, pos, graph_data) {
+function plotly_show_tooltip(data) {
"use strict";
- // Given the event handler item, get the graph metadata.
- function extract_metadata(item) {
- var index = item.dataIndex;
- // Graph data is formatted as [x, y, meta_data].
- var meta_data = item.series.data[index][2];
- return meta_data;
- }
- var data = item.datapoint;
- var meta_data = extract_metadata(item);
var tip_body = '
';
+ var point = data.points[0];
+
+ if (point.data.regression && point.data.regressionID) {
+ tip_body += "
" + point.data.regression + "";
+ }
- if (meta_data.title) {
- tip_body += "
" + meta_data.title + "";
+ if (point.data.machine && point.data.machineID) {
+ tip_body += "
Machine: " + point.data.machine + "";
}
- if (meta_data.machine) {
- tip_body += "
Machine: " + meta_data.machine + "
";
+ if (point.data.test_name) {
+ tip_body += "
Test: " + point.data.test_name + "
";
}
- if (meta_data.test_name) {
- tip_body += "
Test: " + meta_data.test_name + "
";
+ if (point.data.metric) {
+ tip_body += "
Metric: " + point.data.metric + "
";
}
- if (meta_data.label) {
- tip_body += "
Revision: " + meta_data.label + "
";
+ if (point.meta.order) {
+ if (point.meta.orderID) {
+ tip_body += "
Order: " + point.meta.order + "";
+ } else {
+ tip_body += "
Order: " + point.meta.order + "
";
+ }
}
- tip_body += "
Value: " + data[1].toFixed(4) + "
";
- if (meta_data.date) {
- tip_body += "
Date: " + meta_data.date + "
";
+ tip_body += "
Value: " + point.y.toFixed(4) + "
";
+
+ if (point.meta.date) {
+ tip_body += "
Date: " + point.meta.date + "
";
}
- if (meta_data.state) {
- tip_body += "
State: " + meta_data.state + "
";
+
+ if (point.meta.state) {
+ tip_body += "
State: " + point.meta.state + "
";
}
- if (meta_data.runID) {
+
+ if (point.meta.runID) {
tip_body += "
Run: " + meta_data.runID + "
";
+ get_run_url(db_name, test_suite_name, point.meta.runID) +
+ "\">" + point.meta.runID + "";
}
- if (meta_data.runID && item.series.url) {
+ if (point.meta.runID && point.data.url) { // url = machine.id/test.id/field_index
tip_body += "
Mark Change.
";
+ get_manual_regression_url(db_name, test_suite_name, point.data.url, point.meta.runID) +
+ "\">Mark Change.";
}
tip_body += "
";
var tooltip_div = $(tip_body).css({
position: 'absolute',
display: 'none',
- top: y + 5,
- left: x + 5,
+ top: data.event.pageY + 5,
+ left: data.event.pageX + 5,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
@@ -165,9 +143,9 @@
}
// Event handler function to update the tooltop.
-function update_tooltip(event, pos, item, show_fn, graph_data) {
+function plotly_update_tooltip(data) {
"use strict";
- if (!item) {
+ if (!data || data.points.length == 0) {
$("#tooltip").fadeOut(200, function () {
$("#tooltip").remove();
});
@@ -175,95 +153,19 @@
return;
}
- if (!lnt_graph.current_tip_point || (lnt_graph.current_tip_point[0] !== item.datapoint[0] ||
- lnt_graph.current_tip_point[1] !== item.datapoint[1])) {
+ if (!lnt_graph.current_tip_point || (lnt_graph.current_tip_point[0] !== data.points.curveNumber ||
+ lnt_graph.current_tip_point[1] !== data.points.pointNumber)) {
$("#tooltip").remove();
- lnt_graph.current_tip_point = item.datapoint;
- show_fn(pos.pageX, pos.pageY, item, pos, graph_data);
- }
-}
-
-
-// Normalize this data to the element in index
-function normalize_data(data_array, index) {
- "use strict";
- var new_data = new Array(data_array.length);
- var i = 0;
- var factor = 0;
- for (i = 0; i < data_array.length; i++) {
- if (data_array[i][0] == index) {
- factor = data_array[i][1];
- break;
- }
- }
- console.assert(factor !== 0, "Did not find the element to normalize on.");
- for (i = 0; i < data_array.length; i++) {
- new_data[i] = jQuery.extend({}, data_array[i]);
- new_data[i][1] = (data_array[i][1] / factor) * 100;
- }
- return new_data;
-}
-
-
-function try_normal(data_array, index) {
- "use strict";
- if (normalize) {
- return normalize_data(data_array, index);
- }
- return data_array;
-}
-
-
-function make_graph_point_entry(data, color, regression) {
- "use strict";
- var radius = 0.25;
- var fill = true;
- if (regression) {
- radius = 5.0;
- fill = false;
- color = "red";
- }
- var entry = {"color": color,
- "data": data,
- "lines": {"show": false},
- "points": {"fill": fill,
- "radius": radius,
- "show": true
- }
- };
- if (regression) {
- entry.points.symbol = "triangle";
+ lnt_graph.current_tip_point = [data.points[0].curveNumber, data.points[0].pointNumber];
+ plotly_show_tooltip(data);
}
- return entry;
}
-var color_codes = ["#4D4D4D",
- "#5DA5DA",
- "#FAA43A",
- "#60BD68",
- "#F17CB0",
- "#B2912F",
- "#B276B2",
- "#DECF3F",
- "#F15854",
- "#1F78B4",
- "#33A02C",
- "#E31A1C",
- "#FF7F00",
- "#6A3D9A",
- "#A6CEE3",
- "#B2DF8A",
- "#FB9A99",
- "#FDBF6F",
- "#CAB2D6"];
-
-function new_graph_data_callback(data, index) {
+function plotly_hide_tooltip(data) {
"use strict";
- data_cache[index] = data;
- update_graph();
+ plotly_update_tooltip(null);
}
-
function get_regression_id() {
"use strict";
var path = window.location.pathname.split("/");
@@ -272,213 +174,57 @@
}
}
-
-function new_graph_regression_callback(data, index, update_func) {
+function plotly_graph_regression_callback(data, index, item, yaxis, update_func) {
"use strict";
- $.each(data, function (i, d) {
-
+ $.each(data, function (i, r) {
if (get_regression_id() !== null) {
- if (get_regression_id() === d.id || d.state === 21) {
+ if (get_regression_id() === r.id || r.state === 21) {
return;
}
}
if (!(regression_cache[index])) {
regression_cache[index] = [];
}
- var metadata = {'label': d.end_point[0],
- 'title': d.title,
- 'id': d.id,
- 'link': get_regression_url(db_name, test_suite_name, d.id),
- 'state': STATE_NAMES[d.state]};
- regression_cache[index].push([parseInt(d.end_point[0], 10), d.end_point[1], metadata]);
+ regression_cache[index].push({
+ "x": [r.end_point[0]],
+ "y": [r.end_point[1]],
+ "meta": [{
+ "order": r.end_point[0],
+ "state": STATE_NAMES[r.state]
+ }],
+ "name": r.title,
+ "machine": item[0].name,
+ "machineID": item[0].id,
+ "metric": item[2],
+ "yaxis": yaxis,
+ "regression": r.title,
+ "regressionID": r.id,
+ "legendgroup": "regressions",
+ "showlegend": true,
+ "mode": "markers",
+ "marker": {
+ "color": "red",
+ "symbol": "triangle-up-open",
+ "size": 13}
+ });
});
update_func();
}
-
-var NOT_DRAWING = '' +
- 'Too many lines to plot. Limit is ' + MAX_TO_DRAW + "." +
- '
×' +
- '
';
-
-
-function update_graph() {
- "use strict";
- var to_draw = [];
- var starts = [];
- var ends = [];
- var lines_to_draw = 0;
- var i = 0;
- var color = null;
- var data = null;
- var regressions = null;
- // We need to find the x bounds of the data, sine regressions may be
- // outside that range.
- var mins = [];
- var maxs = [];
- // Data processing.
- for (i = 0; i < changes.length; i++) {
- if (is_checked[i] && data_cache[i]) {
- lines_to_draw++;
- starts.push(changes[i].start);
- ends.push(changes[i].end);
- color = color_codes[i % color_codes.length];
- data = try_normal(data_cache[i], changes[i].start);
- // Find local x-axis min and max.
- var local_min = parseFloat(data[0][0]);
- var local_max = parseFloat(data[0][0]);
- for (var j = 0; j < data.length; j++) {
- var datum = data[j];
- var d = parseFloat(datum[0]);
- if (d < local_min) {
- local_min = d;
- }
- if (d > local_max) {
- local_max = d;
- }
- }
- mins.push(local_min);
- maxs.push(local_max);
-
- to_draw.push(make_graph_point_entry(data, color, false));
- to_draw.push({"color": color, "data": data, "url": changes[i].url});
- }
- }
- // Zoom the graph to only the data sets, not the regressions.
- var min_x = Math.min.apply(Math, mins);
- var max_x = Math.max.apply(Math, maxs);
- // Regressions.
- for (i = 0; i < changes.length; i++) {
- if (is_checked[i] && data_cache[i]) {
- if (regression_cache[i]) {
- regressions = try_normal(regression_cache[i]);
- to_draw.push(make_graph_point_entry(regressions, color, true));
- }
- }
- }
- // Limit the number of lines to plot: the graph gets cluttered and slow.
- if (lines_to_draw > MAX_TO_DRAW) {
- $('#errors').empty().prepend(NOT_DRAWING);
- return;
- }
- var lowest_rev = Math.min.apply(Math, starts);
- var highest_rev = Math.max.apply(Math, ends);
- init(to_draw, lowest_rev, highest_rev, min_x, max_x);
-}
-
-// To be called by main page. It will fetch data and make graph ready.
-function add_data_to_graph(URL, index, max_samples) {
- "use strict";
- $.getJSON(get_api_url("graph", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
- new_graph_data_callback(data, index);
- });
- $.getJSON(get_api_url("regression", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
- new_graph_regression_callback(data, index, update_graph);
- });
- is_checked[index] = true;
-}
-
-
-function init_axis() {
- "use strict";
- function onlyUnique(value, index, self) {
- return self.indexOf(value) === index;
- }
-
- var metrics = $('.metric').map(function () {
- return $(this).text();
- }).get();
- metrics = metrics.filter(onlyUnique);
-
- var yaxis_name = metrics.join(", ");
- yaxis_name = yaxis_name.replace("_", " ");
-
- $('#yaxis').text(yaxis_name);
-
- $('#normalize').click(function (e) {
- normalize = !normalize;
- if (normalize) {
- $('#normalize').toggleClass("btn-default btn-primary");
- $('#normalize').text("x1");
- $('#yaxis').text("Normalized (%)");
- } else {
- $('#normalize').toggleClass("btn-primary btn-default");
- $('#normalize').text("%");
- $('#yaxis').text(yaxis_name);
- }
- update_graph();
- });
-
- $('#xaxis').css('position', 'absolute');
- $('#xaxis').css('left', '50%');
- $('#xaxis').css('bottom', '-15px');
- $('#xaxis').css('width', '100px');
- $('#xaxis').css('margin-left', '-50px');
-
- $('#yaxis').css('position', 'absolute');
- $('#yaxis').css('left', '-55px');
- $('#yaxis').css('top', '50%');
- $('#yaxis').css('-webkit-transform', 'rotate(-90deg)');
- $('#yaxis').css('-moz-transform', 'rotate(-90deg)');
-}
/* On the normal graph page, data is loaded during page load.
This function takes the plots from page load and adds the regressions
that are asynchrounusly fetched.
*/
-function update_graphplots(old_plot) {
+function plotly_update_graphplots(old_plot) {
"use strict";
// Regressions.
- var regressions = null;
- var i = 0;
var new_plot = $.extend([], old_plot);
- for (i = 0; i < regression_cache.length; i++) {
+ for (var i = 0; i < regression_cache.length; i++) {
if (regression_cache[i]) {
- regressions = regression_cache[i];
- new_plot.push(make_graph_point_entry(regressions, "#000000", true));
+ regression_cache[i].forEach(function(j){
+ new_plot.push(j);
+ });
}
}
return new_plot;
}
-
-
-function init(data, start_highlight, end_highlight, x_min, x_max) {
- "use strict";
- // First, set up the primary graph.
- var graph = $("#graph");
- var graph_plots = data;
- var line_width = 1;
- if (data.length > 0 && data[0].data.length < 50) {
- line_width = 2;
- }
- var graph_options = {
- xaxis: {
- min: x_min,
- max: x_max
- },
- series : {
- lines : {lineWidth : line_width},
- shadowSize : 0
- },
- highlight : {
- range: {"end": [end_highlight], "start": [start_highlight]},
- alpha: "0.35",
- stroke: true
- },
- zoom : { interactive : false },
- pan : { interactive : true,
- frameRate: 60 },
- grid : {
- hoverable : true,
- clickable: true
- }
- };
-
- var main_plot = $.plot("#graph", graph_plots, graph_options);
-
- // Add tooltips.
- graph.bind("plotclick", function (e, p, i) {
- update_tooltip(e, p, i, show_tooltip, graph_plots);
- });
-
- bind_zoom_bar(main_plot);
-}
diff --git a/lnt/server/ui/static/lnt_regression.js b/lnt/server/ui/static/lnt_regression.js
new file mode 100644
--- /dev/null
+++ b/lnt/server/ui/static/lnt_regression.js
@@ -0,0 +1,463 @@
+/*jslint vars: true, browser: true, devel: true, plusplus: true, unparam: true*/
+/*global $, jQuery, alert, db_name, test_suite_name, init, changes */
+/*global update_graph*/
+// Keep the graph data we download.
+// Each element is a list of graph data points.
+var data_cache = [];
+var is_checked = []; // The current list of lines to plot.
+var normalize = false;
+
+var MAX_TO_DRAW = 10;
+
+var STATE_NAMES = {0: 'Detected',
+ 1: 'Staged',
+ 10: 'Active',
+ 20: 'Not to be Fixed',
+ 21: 'Ignored',
+ 23: 'Verify',
+ 22: 'Fixed'};
+
+var regression_cache = [];
+var lnt_graph = {};
+
+
+// Grab the graph API url for this line.
+function get_api_url(kind, db, ts, mtf) {
+ "use strict";
+ return [lnt_url_base, "api", "db_" + db, "v4", ts, kind, mtf].join('/');
+}
+
+// Grab the URL for a regression by id.
+function get_regression_url(db, ts, regression) {
+ "use strict";
+ return [lnt_url_base, "db_" + db, "v4", ts, "regressions", regression].join('/');
+}
+
+// Grab the URL for a run by id.
+function get_run_url(db, ts, runID) {
+ "use strict";
+ return [lnt_url_base, "db_" + db, "v4", ts, runID].join('/');
+}
+
+// Create a new regression manually URL.
+function get_manual_regression_url(db, ts, url, runID) {
+ "use strict";
+ return [lnt_url_base, "db_" + db, "v4", ts,
+ "regressions/new_from_graph", url, runID].join('/');
+}
+
+
+
+/* Bind events to the zoom bar buttons, so that
+ * the zoom buttons work, then position them
+ * over top of the main graph.
+ */
+function bind_zoom_bar(my_plot) {
+ "use strict";
+ $('#out').click(function (e) {
+ e.preventDefault();
+ my_plot.zoomOut();
+ });
+
+ $('#in').click(function (e) {
+ e.preventDefault();
+ my_plot.zoom();
+ });
+
+ // Now move the bottons onto the graph.
+ $('#graphbox').css('position', 'relative');
+ $('#zoombar').css('position', 'absolute');
+
+ $('#zoombar').css('left', '40px');
+ $('#zoombar').css('top', '15px');
+
+}
+
+
+// Show our overlay tooltip.
+lnt_graph.current_tip_point = null;
+
+function show_tooltip(x, y, item, pos, graph_data) {
+ "use strict";
+ // Given the event handler item, get the graph metadata.
+ function extract_metadata(item) {
+ var index = item.dataIndex;
+ // Graph data is formatted as [x, y, meta_data].
+ var meta_data = item.series.data[index][2];
+ return meta_data;
+ }
+ var data = item.datapoint;
+ var meta_data = extract_metadata(item);
+ var tip_body = '";
+ var tooltip_div = $(tip_body).css({
+ position: 'absolute',
+ display: 'none',
+ top: y + 5,
+ left: x + 5,
+ border: '1px solid #fdd',
+ padding: '2px',
+ 'background-color': '#fee',
+ opacity: 0.80,
+ 'z-index': 100000
+ }).appendTo("body").fadeIn(200);
+
+ // Now make sure the tool tip is on the graph canvas.
+ var tt_position = tooltip_div.position();
+
+ var graph_div = $("#graph");
+ var graph_position = graph_div.position();
+
+ // The right edge of the graph.
+ var max_width = graph_position.left + graph_div.width();
+ // The right edge of the tool tip.
+ var tt_right = tt_position.left + tooltip_div.width();
+
+ if (tt_right > max_width) {
+ var diff = tt_right - max_width;
+ var GRAPH_BORDER = 10;
+ var VISUAL_APPEAL = 10;
+ tooltip_div.css({'left' : tt_position.left - diff
+ - GRAPH_BORDER - VISUAL_APPEAL});
+ }
+
+}
+
+// Event handler function to update the tooltop.
+function update_tooltip(event, pos, item, show_fn, graph_data) {
+ "use strict";
+ if (!item) {
+ $("#tooltip").fadeOut(200, function () {
+ $("#tooltip").remove();
+ });
+ lnt_graph.current_tip_point = null;
+ return;
+ }
+
+ if (!lnt_graph.current_tip_point || (lnt_graph.current_tip_point[0] !== item.datapoint[0] ||
+ lnt_graph.current_tip_point[1] !== item.datapoint[1])) {
+ $("#tooltip").remove();
+ lnt_graph.current_tip_point = item.datapoint;
+ show_fn(pos.pageX, pos.pageY, item, pos, graph_data);
+ }
+}
+
+// Normalize this data to the element in index
+function normalize_data(data_array, index) {
+ "use strict";
+ var new_data = new Array(data_array.length);
+ var i = 0;
+ var factor = 0;
+ for (i = 0; i < data_array.length; i++) {
+ if (data_array[i][0] == index) {
+ factor = data_array[i][1];
+ break;
+ }
+ }
+ console.assert(factor !== 0, "Did not find the element to normalize on.");
+ for (i = 0; i < data_array.length; i++) {
+ new_data[i] = jQuery.extend({}, data_array[i]);
+ new_data[i][1] = (data_array[i][1] / factor) * 100;
+ }
+ return new_data;
+}
+
+
+function try_normal(data_array, index) {
+ "use strict";
+ if (normalize) {
+ return normalize_data(data_array, index);
+ }
+ return data_array;
+}
+
+
+function make_graph_point_entry(data, color, regression) {
+ "use strict";
+ var radius = 0.25;
+ var fill = true;
+ if (regression) {
+ radius = 5.0;
+ fill = false;
+ color = "red";
+ }
+ var entry = {"color": color,
+ "data": data,
+ "lines": {"show": false},
+ "points": {"fill": fill,
+ "radius": radius,
+ "show": true
+ }
+ };
+ if (regression) {
+ entry.points.symbol = "triangle";
+ }
+ return entry;
+}
+
+var color_codes = ["#4D4D4D",
+ "#5DA5DA",
+ "#FAA43A",
+ "#60BD68",
+ "#F17CB0",
+ "#B2912F",
+ "#B276B2",
+ "#DECF3F",
+ "#F15854",
+ "#1F78B4",
+ "#33A02C",
+ "#E31A1C",
+ "#FF7F00",
+ "#6A3D9A",
+ "#A6CEE3",
+ "#B2DF8A",
+ "#FB9A99",
+ "#FDBF6F",
+ "#CAB2D6"];
+
+function new_graph_data_callback(data, index) {
+ "use strict";
+ data_cache[index] = data;
+ update_graph();
+}
+
+
+function get_regression_id() {
+ "use strict";
+ var path = window.location.pathname.split("/");
+ if (path[path.length - 2] === "regressions") {
+ return parseInt(path[path.length - 1], 10);
+ }
+}
+
+
+function new_graph_regression_callback(data, index, update_func) {
+ "use strict";
+ $.each(data, function (i, d) {
+
+ if (get_regression_id() !== null) {
+ if (get_regression_id() === d.id || d.state === 21) {
+ return;
+ }
+ }
+ if (!(regression_cache[index])) {
+ regression_cache[index] = [];
+ }
+ var metadata = {'label': d.end_point[0],
+ 'title': d.title,
+ 'id': d.id,
+ 'link': get_regression_url(db_name, test_suite_name, d.id),
+ 'state': STATE_NAMES[d.state]};
+ regression_cache[index].push([parseInt(d.end_point[0], 10), d.end_point[1], metadata]);
+ });
+ update_func();
+}
+
+var NOT_DRAWING = '' +
+ 'Too many lines to plot. Limit is ' + MAX_TO_DRAW + "." +
+ '
×' +
+ '
';
+
+
+function update_graph() {
+ "use strict";
+ var to_draw = [];
+ var starts = [];
+ var ends = [];
+ var lines_to_draw = 0;
+ var i = 0;
+ var color = null;
+ var data = null;
+ var regressions = null;
+ // We need to find the x bounds of the data, sine regressions may be
+ // outside that range.
+ var mins = [];
+ var maxs = [];
+ // Data processing.
+ for (i = 0; i < changes.length; i++) {
+ if (is_checked[i] && data_cache[i]) {
+ lines_to_draw++;
+ starts.push(changes[i].start);
+ ends.push(changes[i].end);
+ color = color_codes[i % color_codes.length];
+ data = try_normal(data_cache[i], changes[i].start);
+ // Find local x-axis min and max.
+ var local_min = parseFloat(data[0][0]);
+ var local_max = parseFloat(data[0][0]);
+ for (var j = 0; j < data.length; j++) {
+ var datum = data[j];
+ var d = parseFloat(datum[0]);
+ if (d < local_min) {
+ local_min = d;
+ }
+ if (d > local_max) {
+ local_max = d;
+ }
+ }
+ mins.push(local_min);
+ maxs.push(local_max);
+
+ to_draw.push(make_graph_point_entry(data, color, false));
+ to_draw.push({"color": color, "data": data, "url": changes[i].url});
+ }
+ }
+ // Zoom the graph to only the data sets, not the regressions.
+ var min_x = Math.min.apply(Math, mins);
+ var max_x = Math.max.apply(Math, maxs);
+ // Regressions.
+ for (i = 0; i < changes.length; i++) {
+ if (is_checked[i] && data_cache[i]) {
+ if (regression_cache[i]) {
+ regressions = try_normal(regression_cache[i]);
+ to_draw.push(make_graph_point_entry(regressions, color, true));
+ }
+ }
+ }
+ // Limit the number of lines to plot: the graph gets cluttered and slow.
+ if (lines_to_draw > MAX_TO_DRAW) {
+ $('#errors').empty().prepend(NOT_DRAWING);
+ return;
+ }
+ var lowest_rev = Math.min.apply(Math, starts);
+ var highest_rev = Math.max.apply(Math, ends);
+ init(to_draw, lowest_rev, highest_rev, min_x, max_x);
+}
+
+// To be called by main page. It will fetch data and make graph ready.
+function add_data_to_graph(URL, index, max_samples) {
+ "use strict";
+ $.getJSON(get_api_url("graph", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
+ new_graph_data_callback(data, index);
+ });
+ $.getJSON(get_api_url("regression", db_name, test_suite_name, URL) + "?limit=" + max_samples, function (data) {
+ new_graph_regression_callback(data, index, update_graph);
+ });
+ is_checked[index] = true;
+}
+
+
+function init_axis() {
+ "use strict";
+ function onlyUnique(value, index, self) {
+ return self.indexOf(value) === index;
+ }
+
+ var metrics = $('.metric').map(function () {
+ return $(this).text();
+ }).get();
+ metrics = metrics.filter(onlyUnique);
+
+ var yaxis_name = metrics.join(", ");
+ yaxis_name = yaxis_name.replace("_", " ");
+
+ $('#yaxis').text(yaxis_name);
+
+ $('#normalize').click(function (e) {
+ normalize = !normalize;
+ if (normalize) {
+ $('#normalize').toggleClass("btn-default btn-primary");
+ $('#normalize').text("x1");
+ $('#yaxis').text("Normalized (%)");
+ } else {
+ $('#normalize').toggleClass("btn-primary btn-default");
+ $('#normalize').text("%");
+ $('#yaxis').text(yaxis_name);
+ }
+ update_graph();
+ });
+
+ $('#xaxis').css('position', 'absolute');
+ $('#xaxis').css('left', '50%');
+ $('#xaxis').css('bottom', '-15px');
+ $('#xaxis').css('width', '100px');
+ $('#xaxis').css('margin-left', '-50px');
+
+ $('#yaxis').css('position', 'absolute');
+ $('#yaxis').css('left', '-55px');
+ $('#yaxis').css('top', '50%');
+ $('#yaxis').css('-webkit-transform', 'rotate(-90deg)');
+ $('#yaxis').css('-moz-transform', 'rotate(-90deg)');
+}
+
+function init(data, start_highlight, end_highlight, x_min, x_max) {
+ "use strict";
+ // First, set up the primary graph.
+ var graph = $("#graph");
+ var graph_plots = data;
+ var line_width = 1;
+ if (data.length > 0 && data[0].data.length < 50) {
+ line_width = 2;
+ }
+ var graph_options = {
+ xaxis: {
+ min: x_min,
+ max: x_max
+ },
+ series : {
+ lines : {lineWidth : line_width},
+ shadowSize : 0
+ },
+ highlight : {
+ range: {"end": [end_highlight], "start": [start_highlight]},
+ alpha: "0.35",
+ stroke: true
+ },
+ zoom : { interactive : false },
+ pan : { interactive : true,
+ frameRate: 60 },
+ grid : {
+ hoverable : true,
+ clickable: true
+ }
+ };
+
+ var main_plot = $.plot("#graph", graph_plots, graph_options);
+
+ // Add tooltips.
+ graph.bind("plotclick", function (e, p, i) {
+ update_tooltip(e, p, i, show_tooltip, graph_plots);
+ });
+
+ bind_zoom_bar(main_plot);
+}
diff --git a/lnt/server/ui/templates/v4_graph.html b/lnt/server/ui/templates/v4_graph.html
--- a/lnt/server/ui/templates/v4_graph.html
+++ b/lnt/server/ui/templates/v4_graph.html
@@ -4,32 +4,10 @@
{% extends "layout.html" %}
{% set components = [(ts.name, v4_url_for(".v4_recent_activity"))] %}
{% block head %}
-
-
+ src="{{ url_for('.static', filename='lnt_graph.js') }}">
-
-
-
-
-
-
+ src="https://cdn.plot.ly/plotly-2.4.2.min.js">
{% endblock %}
{% block title %}Graph{% endblock %}
@@ -38,125 +16,80 @@
{% block onload %}init_page(){% endblock %}
{% block javascript %}
-var g = {};
var test_suite_name = "{{ request.view_args.testsuite_name }}";
var db_name = "{{ request.view_args.get('db_name','') }}";
var graph_plots = {{graph_plots|tojson|safe}};
-var baseline_plots = {{baseline_plots|tojson|safe}};
+var metrics = {{metrics|tojson|safe}};
+var legend = {{legend|tojson|safe}};
var options = {{options|tojson|safe}};
-prefix = "{{request.base_url}}";
-
-transform_fn = function (v) { return v; }
-inverse_transform_fn = function (v) { return v; }
-
-if (options.logarithmic_scale) {
- transform_fn = function(v) {
- if (v < 0)
- return -Math.log10(-v);
- else if (v > 0)
- return Math.log10(v);
- else
- return 0;
- }
- inverse_transform_fn = function(v) {
- if (v < 0)
- return -Math.pow(10, -v);
- else if (v > 0)
- return Math.pow(10, v);
- else
- return 0;
- }
-}
function init_graph() {
- // Set up the primary graph.
- var graph = $("#graph");
- var graph_options = {
- series : {
- lines : {
- lineWidth : 1 },
- shadowSize : 0
- },
- highlight : {
-{% if revision_range is not none %}
- range: {{revision_range|tojson|safe}}
-{% else %}
- enabled: false
-{% endif %}
- },
- zoom : { interactive : false },
- pan : { interactive : true,
- frameRate: 60 },
- grid : {
- hoverable : true,
- clickable: true },
- yaxis: {
- transform: transform_fn,
- inverseTransform: inverse_transform_fn }
- };
-
- // Add baseline lines
- graph_options['grid']['markings'] = baseline_plots;
- var tmp_plots = update_graphplots(graph_plots);
- var main_plot = $.plot(graph, tmp_plots, graph_options);
-
- // Add tooltips.
- graph.bind("plotclick", function (e, p, i) {
- update_tooltip(e, p, i, show_tooltip, tmp_plots);
- });
-
- // Set up the overview graph.
- var overview = $("#overview")
- var overview_plots = {{overview_plots|tojson|safe}};
- $.plot(overview, overview_plots, {
- series : {
- lines : {
- lineWidth : 1 },
- shadowSize : 0 },
- selection: { mode: "x" },
- touch: {
- enabled: false
- },
- highlight : {
-{% if revision_range is not none %}
- range: {{revision_range|tojson|safe}},
- alpha: "1",
- stroke: true,
-{% else %}
- enabled: false
-{% endif %}
- },
- yaxis: { ticks: [] } });
+ // Add regressions
+ var tmp_plots = plotly_update_graphplots(graph_plots);
- // Connect selection on the overview graph to the main plot.
- $("#overview").bind("plotselected", function (event, ranges) {
- // Set the zooming on the plot.
- $.plot(graph, graph_plots,
- $.extend(true, {}, graph_options, {
- xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
- yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
- }));
- });
- bind_zoom_bar(main_plot);
+ var graph_layout = {
+ // title: 'Graph',
+ hovermode: 'closest',
+ showlegend: true,
+ legend: { x: 0, y: -1.0,
+ // yanchor: 'bottom',
+ // size: 'top left',
+ bgcolor: 'rgba(0,0,0,0)' },
+ margin: { l: 50, r: 0, t: 10, b: 0 },
+ height: 700
+ };
+ if (options.xaxis_date) {
+ graph_layout['xaxis'] = {title: 'Date', type: 'date'};
+ } else {
+ graph_layout['xaxis'] = {title: 'Order', type: 'category'};
+ }
+ var xaxis_left = 0.0;
+ var xaxis_right = 1.0;
+ for (var i = 0; i < metrics.length; i++) {
+ var yaxis = 'yaxis';
+ if (i > 0) yaxis += (i+1).toString();
+ graph_layout[yaxis] = {title: metrics[i]};
+ if (options.logarithmic_scale) {
+ graph_layout[yaxis]['type'] = 'log';
+ graph_layout[yaxis]['autorange'] = true;
+ }
+ if (i > 0 ) {
+ graph_layout[yaxis]['overlaying'] = 'y';
+ }
+ if (i & 1) {
+ graph_layout[yaxis]['side'] = 'right';
+ xaxis_right = 1 - 0.03 * i;
+ graph_layout[yaxis]['position'] = xaxis_right;
+ } else {
+ xaxis_left = 0.03 * i
+ graph_layout[yaxis]['position'] = xaxis_left;
+ }
+ }
+ graph_layout['xaxis']['domain'] = [xaxis_left, xaxis_right];
+ Plotly.newPlot('graph', tmp_plots, graph_layout);
+ var graph = document.getElementById('graph')
+ graph.on('plotly_click', plotly_update_tooltip);
+ graph.on('plotly_doubleclick', plotly_hide_tooltip);
}
function init_page() {
- // First start the requests for regrssion data.
- var urls = $(".data-row").each(function (index, val) {
- $.getJSON(get_api_url("regression",
- db_name,
- test_suite_name,
- $(val).data('url')),
- function (data) {
- new_graph_regression_callback(data, index, init_graph);
- });
- return $(val).data('url');
+ if (!options.xaxis_date) {
+ // First start the requests for regression data.
+ legend.forEach(function(item, index) {
+ if (item[4]) { // legend.url
+ var yaxis_index = metrics.indexOf(item[2]); // legend.field_name
+ var yaxis = ((yaxis_index == 0) ? "y" : ("y"+(yaxis_index + 1).toString()));
+ $.getJSON(get_api_url("regression", db_name, test_suite_name, item[4]),
+ function (data) {
+ plotly_graph_regression_callback(data, index, item, yaxis, init_graph);
+ }
+ );
+ }
});
-
- init_graph();
- init_axis();
+ }
+ init_graph();
}
{% endblock %}
@@ -164,8 +97,7 @@
{% block sidebar %}
Controls
- - Left Mouse: Pan
-
- Double Left Mouse: Zoom
+
- Double Left Mouse: Hide Tooltip
{% endblock %}
@@ -173,6 +105,32 @@
Graph |
+
+
+ |
|
-
-
-
-
-
-
-
-
-
-
- Metric
- Order
-
- |
-
-
-
-
-
- |
-
-
+
+
+
+ |
+
-
- Legend
-
-
- |
- Machine |
- Test |
- Type |
-
- {% for machine, test_name, field_name, col, url in legend %}
-
- |
- {{ utils.render_machine(machine) }} |
- {{ test_name }} |
- {{ field_name }} |
-
- {% endfor %}
-
{% endblock %}
diff --git a/lnt/server/ui/templates/v4_new_regressions.html b/lnt/server/ui/templates/v4_new_regressions.html
--- a/lnt/server/ui/templates/v4_new_regressions.html
+++ b/lnt/server/ui/templates/v4_new_regressions.html
@@ -25,7 +25,7 @@
-
+
{% endblock %}
{% block javascript %}
@@ -36,8 +36,8 @@
{% set fc = changes[loop.index -1] %}
{% set fc_ri_field_index = ts.get_field_index(fc.ri.field) %}
{"url": "/{{api_graph}}/{{ fc.ri.machine.id}}/{{fc.ri.test.id}}/{{fc_ri_field_index}}",
- "start": {{fc.ri.start_order.llvm_project_revision}},
- "end": {{fc.ri.end_order.llvm_project_revision}}
+ "start": "{{fc.ri.start_order.as_ordered_string()}}",
+ "end": "{{fc.ri.end_order.as_ordered_string()}}"
},
{% endfor %}
];
diff --git a/lnt/server/ui/templates/v4_regression_detail.html b/lnt/server/ui/templates/v4_regression_detail.html
--- a/lnt/server/ui/templates/v4_regression_detail.html
+++ b/lnt/server/ui/templates/v4_regression_detail.html
@@ -27,7 +27,7 @@
-
+
{% endblock %}
@@ -43,8 +43,8 @@
{% set fc = changes[loop.index -1] %}
{% set fc_ri_field_index = ts.get_field_index(fc.ri.field) %}
{"url": "{{fc.ri.machine.id}}/{{fc.ri.test.id}}/{{fc_ri_field_index}}",
- "start": {{fc.ri.start_order.llvm_project_revision}},
- "end": {{fc.ri.end_order.llvm_project_revision}}
+ "start": "{{fc.ri.start_order.as_ordered_string()}}",
+ "end": "{{fc.ri.end_order.as_ordered_string()}}"
},
{% endfor %}
];
diff --git a/lnt/server/ui/views.py b/lnt/server/ui/views.py
--- a/lnt/server/ui/views.py
+++ b/lnt/server/ui/views.py
@@ -45,6 +45,7 @@
from lnt.server.ui.util import PrecomputedCR
from lnt.server.ui.util import baseline_key, convert_revision
from lnt.server.ui.util import mean
+from lnt.server.ui.views_util import json_response, graph_csv_response
from lnt.testing import PASS
from lnt.util import logger
from lnt.util import multidict
@@ -80,9 +81,9 @@
path = request.args.get('path')
db = request.args.get('db')
if path is None:
- abort(400)
+ abort(400, "'path' argument is missing.")
if db not in current_app.old_config.databases:
- abort(404)
+ abort(404, "'db' argument is missing or invalid.")
# Rewrite the path.
new_path = "/db_%s" % db
@@ -312,7 +313,7 @@
try:
machine = session.query(ts.Machine).filter(ts.Machine.id == id).one()
except NoResultFound:
- abort(404)
+ abort(404, "Invalid machine id {}.".format(id))
if request.args.get('json'):
json_obj = dict()
@@ -347,7 +348,7 @@
self.ts = ts = request.get_testsuite()
self.run = run = session.query(ts.Run).filter_by(id=run_id).first()
if run is None:
- abort(404)
+ abort(404, "Invalid run id {}.".format(run_id))
# Get the aggregation function to use.
aggregation_fn_name = request.args.get('aggregation_fn')
@@ -613,7 +614,7 @@
# Get the order.
order = session.query(ts.Order).filter(ts.Order.id == id).first()
if order is None:
- abort(404)
+ abort(404, "Invalid order id {}.".format(id))
previous_order = None
if order.previous_order_id:
@@ -643,7 +644,7 @@
ts = request.get_testsuite()
base = session.query(ts.Baseline).get(id)
if not base:
- return abort(404)
+ return abort(404, "Invalid baseline id {}.".format(id))
flash("Baseline set to " + base.name, FLASH_SUCCESS)
flask.session[baseline_key(ts.name)] = id
@@ -671,7 +672,7 @@
ts = request.get_testsuite()
run = session.query(ts.Run).filter_by(id=id).first()
if run is None:
- abort(404)
+ abort(404, "Invalid run id {}.".format(id))
# Convert the old style test parameters encoding.
args = {'highlight_run': id}
@@ -719,7 +720,7 @@
ts = request.get_testsuite()
target_sample = session.query(ts.Sample).get(sample_id)
if not target_sample:
- abort(404, "Could not find sample id: {}".format(sample_id))
+ abort(404, "Could not find sample id {}.".format(sample_id))
# Get the field index we are interested in.
field_index = None
@@ -741,6 +742,212 @@
return v4_redirect(graph_url)
+class PlotParameter(object):
+ def __init__(self, machine, test, field, field_index):
+ self.machine = machine
+ self.test = test
+ self.field = field
+ self.field_index = field_index
+ self.samples = None
+
+ def __repr__(self):
+ return "{}:{}({} samples)" \
+ .format(self.machine.name,
+ self.test.name,
+ len(self.samples) if self.samples else "No")
+
+
+def assert_field_idx_valid(field_idx, count):
+ if not (0 <= field_idx < count):
+ return abort(404,
+ "Invalid field index {}. Total sample_fileds for the current suite is {}." \
+ .format(field_idx, count))
+
+
+def load_plot_parameter(machine_id, test_id, field_index, session, ts):
+ try:
+ machine_id = int(machine_id)
+ test_id = int(test_id)
+ field_index = int(field_index)
+ except ValueError:
+ return abort(400, "Invalid plot arguments.")
+
+ try:
+ machine = session.query(ts.Machine) \
+ .filter(ts.Machine.id == machine_id) \
+ .one()
+ except NoResultFound:
+ return abort(404, "Invalid machine id {}.".format(machine_id))
+ try:
+ test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
+ except NoResultFound:
+ return abort(404, "Invalid test id {}.".format(test_id))
+
+ assert_field_idx_valid(field_index, len(ts.sample_fields))
+ try:
+ field = ts.sample_fields[field_index]
+ except NoResultFound:
+ return abort(404, "Invalid field_index {}.".format(field_index))
+
+ return PlotParameter(machine, test, field, field_index)
+
+
+def parse_plot_parameters(args):
+ """
+ Returns a list of tuples of integers (machine_id, test_id, field_index).
+ :param args: The request parameters dictionary.
+ """
+ plot_parameters = []
+ for name, value in args.items():
+ # Plots are passed as::
+ #
+ # plot.=..
+ if not name.startswith('plot.'):
+ continue
+
+ # Ignore the extra part of the key, it is unused.
+
+ try:
+ machine_id, test_id, field_index = map(int, value.split('.'))
+ except ValueError:
+ return abort(400, "Expected int as plot value")
+
+ plot_parameters.append((machine_id, test_id, field_index))
+
+ return plot_parameters
+
+
+def parse_and_load_plot_parameters(args, session, ts):
+ """
+ Parses plot parameters and loads the corresponding entities from the database.
+ Returns a list of PlotParameter instances sorted by machine name, test name and then field.
+ :param args: The request parameters dictionary.
+ :param session: The database session.
+ :param ts: The test suite.
+ """
+ plot_parameters = [load_plot_parameter(machine_id, test_id, field_index, session, ts)
+ for (machine_id, test_id, field_index) in parse_plot_parameters(args)]
+ # Order the plots by machine name, test name and then field.
+ plot_parameters.sort(key=lambda plot_parameter:
+ (plot_parameter.machine.name, plot_parameter.test.name,
+ plot_parameter.field.name, plot_parameter.field_index))
+
+ return plot_parameters
+
+
+def parse_mean_parameter(args, session, ts):
+ # Mean to graph is passed as:
+ #
+ # mean=.
+ value = args.get('mean')
+ if not value:
+ return None
+
+ try:
+ machine_id, field_index = map(int, value.split('.'))
+ except ValueError:
+ return abort(400,
+ "Invalid format of 'mean={}', expected mean=.".format(value))
+
+ try:
+ machine = session.query(ts.Machine) \
+ .filter(ts.Machine.id == machine_id) \
+ .one()
+ except NoResultFound:
+ return abort(404, "Invalid machine id {}.".format(machine_id))
+
+ assert_field_idx_valid(field_index, len(ts.sample_fields))
+ field = ts.sample_fields[field_index]
+
+ return machine, field
+
+
+def load_graph_data(plot_parameter, show_failures, limit, xaxis_date, revision_cache=None):
+ """
+ Load all the field values for this test on the same machine.
+ :param plot_parameter: Stores machine, test and field to load.
+ :param show_failures: Filter only passed values if False.
+ :param limit: Limit points if specified.
+ :param xaxis_date: X axis is Date, otherwise Order.
+ """
+ session = request.session
+ ts = request.get_testsuite()
+
+ # FIXME: Don't join to Order here, aggregate this across all the tests
+ # we want to load. Actually, we should just make this a single query.
+ values = session.query(plot_parameter.field.column, ts.Order,
+ ts.Run.start_time, ts.Run.id) \
+ .join(ts.Run).join(ts.Order) \
+ .filter(ts.Run.machine_id == plot_parameter.machine.id) \
+ .filter(ts.Sample.test == plot_parameter.test) \
+ .filter(plot_parameter.field.column.isnot(None))
+ # Unless all samples requested, filter out failing tests.
+ if not show_failures:
+ if plot_parameter.field.status_field:
+ values = values.filter((plot_parameter.field.status_field.column == PASS) |
+ (plot_parameter.field.status_field.column.is_(None)))
+ if limit:
+ values = values.limit(limit)
+
+ if xaxis_date:
+ # Aggregate by date.
+ data = list(multidict.multidict(
+ (date, (val, order, date, run_id))
+ for val, order, date, run_id in values).items())
+ # Sort data points according to date.
+ data.sort(key=lambda sample: sample[0])
+ else:
+ # Aggregate by order (revision).
+ data = list(multidict.multidict(
+ (order.llvm_project_revision, (val, order, date, run_id))
+ for val, order, date, run_id in values).items())
+ # Sort data points according to order (revision).
+ data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
+
+ return data
+
+
+def load_geomean_data(field, machine, limit, xaxis_date, revision_cache=None):
+ """
+ Load geomean for specified field on the same machine.
+ :param field: Field.
+ :param machine: Machine.
+ :param limit: Limit points if specified.
+ :param xaxis_date: X axis is Date, otherwise Order.
+ """
+ session = request.session
+ ts = request.get_testsuite()
+ values = session.query(sqlalchemy.sql.func.min(field.column),
+ ts.Order,
+ sqlalchemy.sql.func.min(ts.Run.start_time)) \
+ .join(ts.Run).join(ts.Order).join(ts.Test) \
+ .filter(ts.Run.machine_id == machine.id) \
+ .filter(field.column.isnot(None)) \
+ .group_by(ts.Order.llvm_project_revision, ts.Test)
+
+ if limit:
+ values = values.limit(limit)
+
+ data = multidict.multidict(
+ ((order, date), val)
+ for val, order, date in values).items()
+
+ # Calculate geomean of each revision.
+ if xaxis_date:
+ data = [(date, [(calc_geomean(vals), order, date)])
+ for ((order, date), vals) in data]
+ # Sort data points according to date.
+ data.sort(key=lambda sample: sample[0])
+ else:
+ data = [(order.llvm_project_revision,
+ [(calc_geomean(vals), order, date)])
+ for ((order, date), vals) in data]
+ # Sort data points according to order (revision).
+ data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
+
+ return data
+
+
@v4_route("/graph")
def v4_graph():
@@ -772,18 +979,31 @@
options['show_mad'] = show_mad = bool(request.args.get('show_mad'))
options['show_stddev'] = show_stddev = \
bool(request.args.get('show_stddev'))
- options['hide_all_points'] = hide_all_points = bool(
- request.args.get('hide_all_points'))
+
+ # Always add all points for csv export
+ if bool(request.args.get('csv')) or bool(request.args.get('download_csv')):
+ options['hide_all_points'] = hide_all_points = False
+ options['xaxis_date'] = xaxis_date = False
+ else:
+ options['hide_all_points'] = hide_all_points = bool(
+ request.args.get('hide_all_points'))
+ options['xaxis_date'] = xaxis_date = bool(
+ request.args.get('xaxis_date'))
+
options['show_linear_regression'] = show_linear_regression = bool(
request.args.get('show_linear_regression'))
options['show_failures'] = show_failures = bool(
request.args.get('show_failures'))
+ options['limit'] = limit = int(
+ request.args.get('limit', 0))
options['normalize_by_median'] = normalize_by_median = bool(
request.args.get('normalize_by_median'))
options['show_moving_average'] = moving_average = bool(
request.args.get('show_moving_average'))
options['show_moving_median'] = moving_median = bool(
request.args.get('show_moving_median'))
+ options['show_cumulative_minimum'] = show_cumulative_minimum = bool(
+ request.args.get('show_cumulative_minimum'))
options['moving_window_size'] = moving_window_size = int(
request.args.get('moving_window_size', 10))
options['hide_highlight'] = bool(
@@ -794,74 +1014,13 @@
show_highlight = not options['hide_highlight']
# Load the graph parameters.
- GraphParameter = namedtuple('GraphParameter',
- ['machine', 'test', 'field', 'field_index'])
- graph_parameters = []
- for name, value in request.args.items():
- # Plots to graph are passed as::
- #
- # plot.=..
- if not name.startswith(str('plot.')):
- continue
-
- # Ignore the extra part of the key, it is unused.
- try:
- machine_id_str, test_id_str, field_index_str = value.split('.')
- machine_id = int(machine_id_str)
- test_id = int(test_id_str)
- field_index = int(field_index_str)
- except ValueError:
- return abort(400)
-
- if not (0 <= field_index < len(ts.sample_fields)):
- return abort(404)
-
- try:
- machine = session.query(ts.Machine) \
- .filter(ts.Machine.id == machine_id) \
- .one()
- test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
- field = ts.sample_fields[field_index]
- except NoResultFound:
- return abort(404)
- graph_parameters.append(GraphParameter(machine, test, field, field_index))
-
- # Order the plots by machine name, test name and then field.
- graph_parameters.sort(key=lambda graph_parameter:
- (graph_parameter.machine.name, graph_parameter.test.name,
- graph_parameter.field.name, graph_parameter.field_index))
+ plot_parameters = parse_and_load_plot_parameters(request.args, session, ts)
# Extract requested mean trend.
- mean_parameter = None
- for name, value in request.args.items():
- # Mean to graph is passed as:
- #
- # mean=.
- if name != 'mean':
- continue
-
- machine_id_str, field_index_str = value.split('.')
- try:
- machine_id = int(machine_id_str)
- field_index = int(field_index_str)
- except ValueError:
- return abort(400)
-
- if not (0 <= field_index < len(ts.sample_fields)):
- return abort(404)
-
- try:
- machine = session.query(ts.Machine) \
- .filter(ts.Machine.id == machine_id) \
- .one()
- except NoResultFound:
- return abort(404)
- field = ts.sample_fields[field_index]
-
- mean_parameter = (machine, field)
+ mean_parameter = parse_mean_parameter(request.args, session, ts)
# Sanity check the arguments.
- if not graph_parameters and not mean_parameter:
+ if not plot_parameters and not mean_parameter:
return render_template("error.html", message="Nothing to graph.")
# Extract requested baselines, and their titles.
@@ -870,16 +1029,16 @@
# Baselines to graph are passed as:
#
# baseline.title=
- if not name.startswith(str('baseline.')):
+ if not name.startswith("baseline."):
continue
- baseline_title = name[len('baseline.'):]
+ baseline_title = name[len("baseline."):]
run_id_str = value
try:
run_id = int(run_id_str)
except Exception:
- return abort(400)
+ return abort(400, "Invalid baseline run id {}.".format(run_id_str))
try:
run = session.query(ts.Run) \
@@ -902,7 +1061,7 @@
highlight_run = session.query(ts.Run).filter_by(
id=int(highlight_run_id)).first()
if highlight_run is None:
- abort(404)
+ abort(404, "Invalid highlight_run id {}.".format(highlight_run_id))
# Find the neighboring runs, by order.
prev_runs = list(ts.get_previous_runs_on_machine(session,
@@ -911,8 +1070,8 @@
start_rev = prev_runs[0].order.llvm_project_revision
end_rev = highlight_run.order.llvm_project_revision
revision_range = {
- "start": convert_revision(start_rev),
- "end": convert_revision(end_rev),
+ "start": start_rev,
+ "end": end_rev,
}
# Build the graph data.
@@ -922,53 +1081,31 @@
overview_plots = []
baseline_plots = []
revision_cache = {}
- num_plots = len(graph_parameters)
- for i, (machine, test, field, field_index) in enumerate(graph_parameters):
+ num_plots = len(plot_parameters)
+
+ metrics = list(set(req.field.name for req in plot_parameters))
+
+ for i, req in enumerate(plot_parameters):
# Determine the base plot color.
col = list(util.makeDarkColor(float(i) / num_plots))
- url = "/".join([str(machine.id), str(test.id), str(field_index)])
- legend.append(LegendItem(machine, test.name, field.name,
+ url = "/".join([str(req.machine.id), str(req.test.id), str(req.field_index)])
+ legend.append(LegendItem(req.machine, req.test.name, req.field.name,
tuple(col), url))
- # Load all the field values for this test on the same machine.
- #
- # FIXME: Don't join to Order here, aggregate this across all the tests
- # we want to load. Actually, we should just make this a single query.
- #
- # FIXME: Don't hard code field name.
- q = session.query(field.column, ts.Order.llvm_project_revision,
- ts.Run.start_time, ts.Run.id) \
- .join(ts.Run).join(ts.Order) \
- .filter(ts.Run.machine_id == machine.id) \
- .filter(ts.Sample.test == test) \
- .filter(field.column.isnot(None))
-
- # Unless all samples requested, filter out failing tests.
- if not show_failures:
- if field.status_field:
- q = q.filter((field.status_field.column == PASS) |
- (field.status_field.column.is_(None)))
-
- # Aggregate by revision.
- data = list(multidict.multidict((rev, (val, date, run_id))
- for val, rev, date, run_id in q)
- .items())
-
- data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache))
-
- graph_datum.append((test.name, data, col, field, url, machine))
+ data = load_graph_data(req, show_failures, limit, xaxis_date, revision_cache)
+ graph_datum.append((req.test.name, data, col, req.field, url, req.machine))
# Get baselines for this line
num_baselines = len(baseline_parameters)
for baseline_id, (baseline, baseline_title) in \
enumerate(baseline_parameters):
- q_baseline = session.query(field.column,
+ q_baseline = session.query(req.field.column,
ts.Order.llvm_project_revision,
ts.Run.start_time, ts.Machine.name) \
.join(ts.Run).join(ts.Order).join(ts.Machine) \
.filter(ts.Run.id == baseline.id) \
- .filter(ts.Sample.test == test) \
- .filter(field.column.isnot(None))
+ .filter(ts.Sample.test == req.test) \
+ .filter(req.field.column.isnot(None))
# In the event of many samples, use the mean of the samples as the
# baseline.
samples = []
@@ -985,52 +1122,54 @@
dark_col = list(util.makeDarkerColor(my_color))
str_dark_col = util.toColorString(dark_col)
baseline_plots.append({
- 'color': str_dark_col,
- 'lineWidth': 2,
- 'yaxis': {'from': mean, 'to': mean},
- 'name': q_baseline[0].llvm_project_revision,
+ "color": str_dark_col,
+ "lineWidth": 2,
+ "yaxis": {"from": mean, "to": mean},
+ # "name": q_baseline[0].llvm_project_revision,
+ "name": "Baseline %s: %s (%s)" % (baseline_title, req.test.name, req.field.name),
})
baseline_name = ("Baseline {} on {}"
.format(baseline_title, q_baseline[0].name))
legend.append(LegendItem(BaselineLegendItem(
- baseline_name, baseline.id), test.name, field.name, dark_col,
+ baseline_name, baseline.id), req.test.name, req.field.name, dark_col,
None))
# Draw mean trend if requested.
if mean_parameter:
machine, field = mean_parameter
- test_name = 'Geometric Mean'
+ test_name = "Geometric Mean"
+
+ if field.name not in metrics:
+ metrics.append(field.name)
col = (0, 0, 0)
legend.append(LegendItem(machine, test_name, field.name, col, None))
+ data = load_geomean_data(field, machine, limit, xaxis_date, revision_cache)
+ graph_datum.append((test_name, data, col, field, None, machine))
- q = session.query(sqlalchemy.sql.func.min(field.column),
- ts.Order.llvm_project_revision,
- sqlalchemy.sql.func.min(ts.Run.start_time)) \
- .join(ts.Run).join(ts.Order).join(ts.Test) \
- .filter(ts.Run.machine_id == machine.id) \
- .filter(field.column.isnot(None)) \
- .group_by(ts.Order.llvm_project_revision, ts.Test)
-
- # Calculate geomean of each revision.
- data = multidict.multidict(
- ((rev, date), val) for val, rev, date in q).items()
- data = [(rev,
- [(lnt.server.reporting.analysis.calc_geomean(vals), date)])
- for ((rev, date), vals) in data]
-
- # Sort data points according to revision number.
- data.sort(key=lambda sample: convert_revision(sample[0]))
+ def trace_name(name, test_name, field_name):
+ return "%s: %s (%s)" % (name, test_name, field_name)
- graph_datum.append((test_name, data, col, field, None, machine))
+ for test_name, data, col, field, url, machine in graph_datum:
+ # Generate trace metadata.
+ trace_meta = {}
+ trace_meta["machine"] = machine.name
+ trace_meta["machineID"] = machine.id
+ if len(graph_datum) > 1:
+ # If there are more than one plot in the graph, also label the
+ # test name.
+ trace_meta["test_name"] = test_name
+ trace_meta["metric"] = field.name
- for name, data, col, field, url, machine in graph_datum:
# Compute the graph points.
- errorbar_data = []
- points_data = []
- pts = []
- moving_median_data = []
- moving_average_data = []
+ pts_x = []
+ pts_y = []
+ meta = []
+ errorbar = {"x": [], "y": [], "error_y": {"type": "data", "visible": True, "array": []}}
+ cumulative_minimum = {"x": [], "y": []}
+ moving_median_data = {"x": [], "y": []}
+ moving_average_data = {"x": [], "y": []}
+ multisample_points_data = {"x": [], "y": [], "meta": []}
if normalize_by_median:
normalize_by = 1.0/stats.median([min([d[0] for d in values])
@@ -1038,20 +1177,22 @@
else:
normalize_by = 1.0
- for pos, (point_label, datapoints) in enumerate(data):
+ min_val = None
+ # Note data is sorted in load_graph_data().
+ for point_label, datapoints in data:
# Get the samples.
- data = [data_date[0] for data_date in datapoints]
+ values = [data_array[0] for data_array in datapoints]
+ orders = [data_array[1] for data_array in datapoints]
# And the date on which they were taken.
- dates = [data_date[1] for data_date in datapoints]
- # Run where this point was collected.
- runs = [data_pts[2]
- for data_pts in datapoints if len(data_pts) == 3]
+ dates = [data_array[2] for data_array in datapoints]
+ # Run ID where this point was collected.
+ run_ids = [data_array[3] for data_array in datapoints if len(data_array) == 4]
- x = determine_x_value(point_label, pos, revision_cache)
+ values = [v * normalize_by for v in values]
- values = [v*normalize_by for v in data]
- aggregation_fn = min
+ is_multisample = (len(values) > 1)
+ aggregation_fn = min
if switch_min_mean_local:
aggregation_fn = lnt.util.stats.agg_mean
if field.bigger_is_better:
@@ -1060,55 +1201,72 @@
agg_value, agg_index = \
aggregation_fn((value, index)
for (index, value) in enumerate(values))
-
- # Generate metadata.
- metadata = {"label": point_label}
- metadata["machine"] = machine.name
- metadata["date"] = str(dates[agg_index])
- if runs:
- metadata["runID"] = str(runs[agg_index])
-
- if len(graph_datum) > 1:
- # If there are more than one plot in the graph, also label the
- # test name.
- metadata["test_name"] = name
-
- pts.append((x, agg_value, metadata))
-
- # Add the individual points, if requested.
- # For each point add a text label for the mouse over.
- if not hide_all_points:
+ pts_y.append(agg_value)
+
+ # Plotly does not sort X axis in case of type: 'category'.
+ # point_label is a string (order revision) if xaxis_date = False
+ pts_x.append(point_label)
+
+ # Generate point metadata.
+ point_metadata = {"order": orders[agg_index].as_ordered_string(),
+ "orderID": orders[agg_index].id,
+ "date": str(dates[agg_index])}
+ if run_ids:
+ point_metadata["runID"] = str(run_ids[agg_index])
+ meta.append(point_metadata)
+
+ # Add the multisample points, if requested.
+ if not hide_all_points and (is_multisample or
+ bool(request.args.get('csv')) or bool(request.args.get('download_csv'))):
for i, v in enumerate(values):
- point_metadata = dict(metadata)
- point_metadata["date"] = str(dates[i])
- points_data.append((x, v, point_metadata))
+ multisample_metadata = {"order": orders[i].as_ordered_string(),
+ "orderID": orders[i].id,
+ "date": str(dates[i])}
+ if run_ids:
+ multisample_metadata["runID"] = str(run_ids[i])
+ multisample_points_data["x"].append(point_label)
+ multisample_points_data["y"].append(v)
+ multisample_points_data["meta"].append(multisample_metadata)
# Add the standard deviation error bar, if requested.
if show_stddev:
mean = stats.mean(values)
sigma = stats.standard_deviation(values)
- errorbar_data.append((x, mean, sigma))
+ errorbar["x"].append(point_label)
+ errorbar["y"].append(mean)
+ errorbar["error_y"]["array"].append(sigma)
# Add the MAD error bar, if requested.
if show_mad:
med = stats.median(values)
mad = stats.median_absolute_deviation(values, med)
- errorbar_data.append((x, med, mad))
+ errorbar["x"].append(point_label)
+ errorbar["y"].append(med)
+ errorbar["error_y"]["array"].append(mad)
+
+ if show_cumulative_minimum:
+ min_val = agg_value if min_val is None else min(min_val, agg_value)
+ cumulative_minimum["x"].append(point_label)
+ cumulative_minimum["y"].append(min_val)
# Compute the moving average and or moving median of our data if
# requested.
if moving_average or moving_median:
def compute_moving_average(x, window, average_list, _):
- average_list.append((x, lnt.util.stats.mean(window)))
+ average_list["x"].append(x)
+ average_list["y"].append(lnt.util.stats.mean(window))
def compute_moving_median(x, window, _, median_list):
- median_list.append((x, lnt.util.stats.median(window)))
+ median_list["x"].append(x)
+ median_list["y"].append(lnt.util.stats.median(window))
def compute_moving_average_and_median(x, window, average_list,
median_list):
- average_list.append((x, lnt.util.stats.mean(window)))
- median_list.append((x, lnt.util.stats.median(window)))
+ average_list["x"].append(x)
+ average_list["y"].append(lnt.util.stats.mean(window))
+ median_list["x"].append(x)
+ median_list["y"].append(lnt.util.stats.median(window))
if moving_average and moving_median:
fun = compute_moving_average_and_median
@@ -1117,169 +1275,187 @@
else:
fun = compute_moving_median
- len_pts = len(pts)
+ len_pts = len(pts_x)
for i in range(len_pts):
start_index = max(0, i - moving_window_size)
end_index = min(len_pts, i + moving_window_size)
- window_pts = [x[1] for x in pts[start_index:end_index]]
- fun(pts[i][0], window_pts, moving_average_data,
+ window_pts = pts_y[start_index:end_index]
+ fun(pts_x[i], window_pts, moving_average_data,
moving_median_data)
- # On the overview, we always show the line plot.
- overview_plots.append({
- "data": pts,
- "color": util.toColorString(col),
- })
+ yaxis_index = metrics.index(field.name)
+ yaxis = "y" if yaxis_index == 0 else "y%d" % (yaxis_index + 1)
# Add the minimum line plot, if requested.
if show_lineplot:
plot = {
- "data": pts,
- "color": util.toColorString(col),
+ "name": trace_name("Line", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "type": "scatter",
+ "mode": "lines+markers",
+ "line": {"color": util.toColorString(col)},
+ "x": pts_x,
+ "y": pts_y,
+ "meta": meta
}
+ plot.update(trace_meta)
if url:
plot["url"] = url
graph_plots.append(plot)
+
# Add regression line, if requested.
- if show_linear_regression:
- xs = [t for t, v, _ in pts]
- ys = [v for t, v, _ in pts]
-
- # We compute the regression line in terms of a normalized X scale.
- x_min, x_max = min(xs), max(xs)
- try:
- norm_xs = [(x - x_min) / (x_max - x_min)
- for x in xs]
- except ZeroDivisionError:
- norm_xs = xs
-
- try:
- info = ext_stats.linregress(norm_xs, ys)
- except ZeroDivisionError:
- info = None
- except ValueError:
- info = None
-
- if info is not None:
- slope, intercept, _, _, _ = info
-
- reglin_col = [c * .7 for c in col]
- reglin_pts = [(x_min, 0.0 * slope + intercept),
- (x_max, 1.0 * slope + intercept)]
- graph_plots.insert(0, {
- "data": reglin_pts,
- "color": util.toColorString(reglin_col),
- "lines": {
- "lineWidth": 2
- },
- "shadowSize": 4,
- })
+ if show_linear_regression and len(pts_x) >= 2:
+ unique_x = list(set(pts_x))
+ if xaxis_date:
+ unique_x.sort()
+ else:
+ unique_x.sort(key=lambda sample: convert_revision(sample, cache=revision_cache))
+ num_unique_x = len(unique_x)
+ if num_unique_x >= 2:
+ dict_x = {}
+ x_min = pts_x[0]
+ x_max = pts_x[-1]
+
+ # We compute the regression line in terms of a normalized X scale.
+ if xaxis_date:
+ x_range = float((x_max - x_min).total_seconds())
+ for x_key in unique_x:
+ dict_x[x_key] = (x_key - x_min).total_seconds() / x_range
+ else:
+ for i, x_key in enumerate(unique_x):
+ dict_x[x_key] = i/(num_unique_x - 1)
+
+ norm_x = [dict_x[xi] for xi in pts_x]
+
+ try:
+ info = ext_stats.linregress(norm_x, pts_y)
+ except ZeroDivisionError:
+ info = None
+ except ValueError:
+ info = None
+
+ if info is not None:
+ slope, intercept, _, _, _ = info
+
+ reglin_col = [c * 0.8 for c in col]
+ if xaxis_date:
+ reglin_y = [(xi - x_min).total_seconds() / x_range * slope +
+ intercept for xi in unique_x]
+ else:
+ reglin_y = [i/(num_unique_x - 1) * slope +
+ intercept for i in range(num_unique_x)]
+ plot = {
+ "name": trace_name("Linear Regression", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(reglin_col), "width": 2},
+ # "shadowSize": 4,
+ "x": unique_x,
+ "y": reglin_y
+ }
+ plot.update(trace_meta)
+ graph_plots.insert(0, plot)
# Add the points plot, if used.
- if points_data:
+ if multisample_points_data["x"]:
pts_col = (0, 0, 0)
- plot = {
- "data": points_data,
- "color": util.toColorString(pts_col),
- "lines": {"show": False},
- "points": {
- "show": True,
- "radius": .25,
- "fill": True,
- },
- }
+ multisample_points_data.update({
+ "name": trace_name("Points", test_name, field.name),
+ "legendgroup": test_name,
+ "showlegend": False,
+ "yaxis": yaxis,
+ # "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "markers",
+ "marker": {"color": util.toColorString(pts_col), "size": 5}
+ })
+ multisample_points_data.update(trace_meta)
if url:
- plot['url'] = url
- graph_plots.append(plot)
+ multisample_points_data["url"] = url
+ graph_plots.append(multisample_points_data)
# Add the error bar plot, if used.
- if errorbar_data:
- bar_col = [c*.7 for c in col]
- graph_plots.append({
- "data": errorbar_data,
- "lines": {"show": False},
- "color": util.toColorString(bar_col),
- "points": {
- "errorbars": "y",
- "yerr": {
- "show": True,
- "lowerCap": "-",
- "upperCap": "-",
- "lineWidth": 1,
- }
- }
+ if errorbar["x"]:
+ bar_col = [c * 0.4 for c in col]
+ errorbar.update({
+ "name": trace_name("Error bars", test_name, field.name),
+ "showlegend": False,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "markers",
+ "marker": {"color": util.toColorString(bar_col)}
})
+ errorbar.update(trace_meta)
+ graph_plots.append(errorbar)
# Add the moving average plot, if used.
- if moving_average_data:
- col = [0.32, 0.6, 0.0]
- graph_plots.append({
- "data": moving_average_data,
- "color": util.toColorString(col),
+ if moving_average_data["x"]:
+ avg_col = [c * 0.7 for c in col]
+ moving_average_data.update({
+ "name": trace_name("Moving average", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(avg_col)}
})
+ moving_average_data.update(trace_meta)
+ graph_plots.append(moving_average_data)
# Add the moving median plot, if used.
- if moving_median_data:
- col = [0.75, 0.0, 1.0]
- graph_plots.append({
- "data": moving_median_data,
- "color": util.toColorString(col),
+ if moving_median_data["x"]:
+ med_col = [c * 0.6 for c in col]
+ moving_median_data.update({
+ "name": trace_name("Moving median: ", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(med_col)}
})
-
- if bool(request.args.get('json')):
- json_obj = dict()
- json_obj['data'] = graph_plots
- # Flatten ORM machine objects to their string names.
- simple_type_legend = []
- for li in legend:
- # Flatten name, make color a dict.
- new_entry = {
- 'name': li.machine.name,
- 'test': li.test_name,
- 'unit': li.field_name,
- 'color': util.toColorString(li.color),
- 'url': li.url,
- }
- simple_type_legend.append(new_entry)
- json_obj['legend'] = simple_type_legend
- json_obj['revision_range'] = revision_range
- json_obj['current_options'] = options
- json_obj['test_suite_name'] = ts.name
- json_obj['baselines'] = baseline_plots
- return flask.jsonify(**json_obj)
+ moving_median_data.update(trace_meta)
+ graph_plots.append(moving_median_data)
+
+ if cumulative_minimum["x"]:
+ min_col = [c * 0.5 for c in col]
+ cumulative_minimum.update({
+ "name": trace_name("Cumulative Minimum", test_name, field.name),
+ "legendgroup": test_name,
+ "yaxis": yaxis,
+ "hoverinfo": "skip",
+ "type": "scatter",
+ "mode": "lines",
+ "line": {"color": util.toColorString(min_col)}
+ })
+ cumulative_minimum.update(trace_meta)
+ graph_plots.append(cumulative_minimum)
+
+ if bool(request.args.get("json")) or bool(request.args.get("download_json")):
+ return json_response(options, graph_plots, legend, revision_range, ts, baseline_plots)
+ if bool(request.args.get("csv")) or bool(request.args.get("download_csv")):
+ # Add "Geometric mean" fake test to graph_parameter
+ if mean_parameter:
+ plot_parameters.append(PlotParameter(mean_parameter[0],
+ "Geometric Mean", # must be NT_test, butfor geomean we use string
+ mean_parameter[1],
+ ts.sample_fields.index(mean_parameter[1])))
+ num_plots += 1
+ return graph_csv_response(options, num_plots, plot_parameters, graph_plots, mean_parameter)
return render_template("v4_graph.html", options=options,
- revision_range=revision_range,
graph_plots=graph_plots,
- overview_plots=overview_plots, legend=legend,
- baseline_plots=baseline_plots,
+ metrics=metrics,
+ legend=legend,
**ts_data(ts))
-
-def determine_x_value(point_label, fallback, revision_cache):
- """Given the order data, lets make a reasonable x axis value.
-
- :param point_label: the text representation of the x value
- :param fallback: The value to use for non
- :param revision_cache: a dict to use as a cache for convert_revision.
- :return: an integer or float value that is like the point_label or fallback.
-
- """
- rev_x = convert_revision(point_label, revision_cache)
- if len(rev_x) == 1:
- x = rev_x[0]
- elif len(rev_x) == 2:
- try:
- x = float(point_label)
- except ValueError:
- # It might have dashes or something silly
- x = float(str(rev_x[0]) + '.' + str(rev_x[1]))
- else:
- return fallback
- return x
-
-
@v4_route("/global_status")
def v4_global_status():
session = request.session
@@ -1631,19 +1807,6 @@
for r in results])
-class MatrixDataRequest(object):
- def __init__(self, machine, test, field):
- self.machine = machine
- self.test = test
- self.field = field
-
- def __repr__(self):
- return "{}:{}({} samples)" \
- .format(self.machine.name,
- self.test.name,
- len(self.samples) if self.samples else "No")
-
-
# How much data to render in the Matrix view.
MATRIX_LIMITS = [
('12', 'Small'),
@@ -1692,64 +1855,29 @@
post_limit = form.limit.data
else:
post_limit = MATRIX_LIMITS[0][0]
- data_parameters = [] # type: List[MatrixDataRequest]
- for name, value in request.args.items():
- # plot.=..
- if not name.startswith(str('plot.')):
- continue
-
- # Ignore the extra part of the key, it is unused.
- machine_id_str, test_id_str, field_index_str = value.split('.')
- try:
- machine_id = int(machine_id_str)
- test_id = int(test_id_str)
- field_index = int(field_index_str)
- except ValueError:
- err_msg = "data {} was malformed. {} must be int.int.int"
- return abort(400, err_msg.format(name, value))
-
- if not (0 <= field_index < len(ts.sample_fields)):
- return abort(404, "Invalid field index: {}".format(field_index))
-
- try:
- machine = session.query(ts.Machine) \
- .filter(ts.Machine.id == machine_id) \
- .one()
- except NoResultFound:
- return abort(404, "Invalid machine ID: {}".format(machine_id))
- try:
- test = session.query(ts.Test).filter(ts.Test.id == test_id).one()
- except NoResultFound:
- return abort(404, "Invalid test ID: {}".format(test_id))
- try:
- field = ts.sample_fields[field_index]
- except NoResultFound:
- return abort(404, "Invalid field_index: {}".format(field_index))
-
- valid_request = MatrixDataRequest(machine, test, field)
- data_parameters.append(valid_request)
+ plot_parameters = parse_and_load_plot_parameters(request.args, session, ts)
- if not data_parameters:
- abort(404, "Request requires some data arguments.")
+ if not plot_parameters:
+ abort(404, "Request requires some plot arguments.")
# Feature: if all of the results are from the same machine, hide the name
# to make the headers more compact.
dedup = True
- for r in data_parameters:
- if r.machine.id != data_parameters[0].machine.id:
+ for r in plot_parameters:
+ if r.machine.id != plot_parameters[0].machine.id:
dedup = False
if dedup:
- machine_name_common = data_parameters[0].machine.name
- machine_id_common = data_parameters[0].machine.id
+ machine_name_common = plot_parameters[0].machine.name
+ machine_id_common = plot_parameters[0].machine.id
else:
machine_name_common = machine_id_common = None
# It is nice for the columns to be sorted by name.
- data_parameters.sort(key=lambda x: x.test.name),
+ plot_parameters.sort(key=lambda x: x.test.name),
# Now lets get the data.
all_orders = set()
order_to_id = {}
- for req in data_parameters:
+ for req in plot_parameters:
q = session.query(req.field.column, ts.Order.llvm_project_revision,
ts.Order.id) \
.join(ts.Run) \
@@ -1772,7 +1900,7 @@
all_orders.add(s[1])
order_to_id[s[1]] = s[2]
if not all_orders:
- abort(404, "No data found.")
+ abort(404, "No orders found.")
# Now grab the baseline data.
user_baseline = baseline()
backup_baseline = next(iter(all_orders))
@@ -1784,7 +1912,7 @@
baseline_rev = backup_baseline
baseline_name = backup_baseline
- for req in data_parameters:
+ for req in plot_parameters:
q_baseline = session.query(req.field.column,
ts.Order.llvm_project_revision,
ts.Order.id) \
@@ -1814,7 +1942,7 @@
all_orders.insert(0, baseline_rev)
# Now calculate Changes between each run.
- for req in data_parameters:
+ for req in plot_parameters:
req.change = {}
for order in all_orders:
cur_samples = req.samples[order]
@@ -1834,7 +1962,7 @@
for order in all_orders:
curr_samples = []
prev_samples = []
- for req in data_parameters:
+ for req in plot_parameters:
curr_samples.extend(req.samples[order])
prev_samples.extend(req.samples[baseline_rev])
prev_geomean = calc_geomean(prev_samples)
@@ -1874,7 +2002,7 @@
return render_template("v4_matrix.html",
testsuite_name=g.testsuite_name,
- associated_runs=data_parameters,
+ associated_runs=plot_parameters,
orders=all_orders,
options=FakeOptions(),
analysis=lnt.server.reporting.analysis,
diff --git a/lnt/server/ui/views_util.py b/lnt/server/ui/views_util.py
new file mode 100644
--- /dev/null
+++ b/lnt/server/ui/views_util.py
@@ -0,0 +1,327 @@
+from future import standard_library
+standard_library.install_aliases()
+from math import floor, log10
+
+import flask
+from flask import abort
+from flask import send_file
+from flask import request
+
+from lnt.server.ui import util
+
+import csv
+try:
+ from StringIO import StringIO as CsvStringIO # for Python 2
+ from io import BytesIO as CsvBytesIO # for Python 2
+except ImportError:
+ from io import StringIO as CsvStringIO # for Python 3
+ from io import BytesIO as CsvBytesIO # for Python 3
+
+class BytesIOWrapper:
+ def __init__(self, string_buffer, encoding='utf-8'):
+ self.string_buffer = string_buffer
+ self.encoding = encoding
+
+ def __getattr__(self, attr):
+ return getattr(self.string_buffer, attr)
+
+ def read(self, size=-1):
+ content = self.string_buffer.read(size)
+ return content.encode(self.encoding)
+
+ def write(self, b):
+ content = b.decode(self.encoding)
+ return self.string_buffer.write(content)
+
+def json_response(options, graph_plots, legend, revision_range, test_suite, baseline_plots):
+ json_obj = dict()
+ json_obj['data'] = graph_plots
+ # Flatten ORM machine objects to their string names.
+ simple_type_legend = []
+ for li in legend:
+ # Flatten name, make color a dict.
+ new_entry = {
+ 'name': li.machine.name,
+ 'test': li.test_name,
+ 'unit': li.field_name,
+ 'color': util.toColorString(li.color),
+ 'url': li.url,
+ }
+ simple_type_legend.append(new_entry)
+ json_obj['legend'] = simple_type_legend
+ json_obj['revision_range'] = revision_range
+ json_obj['current_options'] = options
+ json_obj['test_suite_name'] = test_suite.name
+ json_obj['baselines'] = baseline_plots
+ flask_json = flask.jsonify(**json_obj)
+
+ if bool(request.args.get('json')):
+ return flask_json
+ else:
+ json_file = CsvBytesIO()
+ lines = flask_json.get_data()
+ json_file.write(lines)
+ json_file.seek(0)
+ return send_file(json_file,
+ mimetype='text/json',
+ attachment_filename='Graph.json',
+ as_attachment=True)
+
+
+def graph_csv_response(options, num_plots, graph_parameters, graph_plots, mean_parameter):
+ def opt_is_true(o):
+ return o in options and bool(options[o])
+
+ # Create list of the data names (of the plots)
+ has_linear_regression = False # if true then skip first N items in list, which are linear_regression
+ data_name = []
+
+ if len(graph_parameters) > 0:
+ # Special cases for lineplot and all_points
+ if not options['hide_lineplot']:
+ data_name.append('lineplot')
+ if not options['hide_all_points']:
+ data_name.append('all_points')
+
+ # Define index of the every plot according to settings menu items order
+ for opt in ['show_mad',
+ 'show_stddev',
+ 'show_linear_regression', # these 3 plots is special case
+ 'show_moving_average',
+ 'show_moving_median']:
+ if bool(options[opt]):
+ if opt == 'switch_min_mean':
+ continue # no data
+ elif opt == 'min_mean_checkbox':
+ continue # no data
+ elif opt == 'hide_highlight':
+ continue # no data
+ elif opt == 'show_mad': # Show Median Absolute Deviation
+ if opt_is_true('show_stddev'):
+ continue
+ else:
+ data_name.append('mad')
+ elif opt == 'show_stddev':
+ if opt_is_true('show_mad'):
+ data_name.append('mad+stddev') # doubled points count in data[]
+ else:
+ data_name.append('stddev')
+ elif opt == 'show_moving_average':
+ data_name.append('moving_average')
+ elif opt == 'show_failures':
+ continue # no data
+ elif opt == 'show_linear_regression':
+ has_linear_regression = True
+ elif opt == 'moving_window_size':
+ continue # no data
+ elif opt == 'logarithmic_scale':
+ continue # no data
+ elif opt == 'normalize_by_median':
+ continue # no data
+ elif opt == 'show_moving_median':
+ data_name.append('moving_median')
+
+ # Extract all data
+ # Save linear_regression as it's always == len(graph_parameters)
+ linear_regression = []
+ linear_regression_count = 0
+ if has_linear_regression:
+ linear_regression_count = len(graph_parameters)
+ linear_regression = graph_plots[:linear_regression_count]
+ # linear_regression data are in reverse format vs. other data
+ linear_regression.reverse()
+ graph_plots = graph_plots[linear_regression_count:]
+
+ # Calculate data count per plot, useful for debugging
+ plots_count = len(graph_parameters)
+ if plots_count > 0:
+ data_per_plot = int(len(graph_plots) / plots_count)
+ if data_per_plot * plots_count != len(graph_plots):
+ return abort(412,
+ "The total number of data lists (%d) for all graph plots of all tests "
+ "is not a multiple of the number of requested plots types (%d)" %
+ (len(graph_plots), plots_count))
+
+ # Root of the dict which will be used for csv building
+ csv_data = []
+
+ for i, graph_param in enumerate(graph_parameters):
+ machine = graph_param.machine
+ test = graph_param.test
+ field = graph_param.field
+ field_index = graph_param.field_index
+ for j in range(len(data_name)):
+ plot_index = i * len(data_name) + j
+ plot_data = {'machine': machine.name,
+ 'test': test if isinstance(test, str) else test.name,
+ 'field': field.name,
+ 'field_index': field_index,
+ 'data': {'name': data_name[j],
+ 'values': graph_plots[plot_index]}}
+ csv_data.append(plot_data)
+
+ # Special case for linear_regression
+ if has_linear_regression:
+ plot_data = {'machine': machine.name,
+ 'test': test if isinstance(test, str) else test.name,
+ 'field': field.name,
+ 'field_index': field_index,
+ 'data': {'name': 'linear_regression',
+ 'values': linear_regression[i]}}
+ csv_data.append(plot_data)
+
+ # Check points count in all data[] and split mad+stddev
+ revision_set = set()
+ for run_item in csv_data:
+ revision_set.add(len(run_item['data']['values']['x']))
+
+ if len(revision_set) == 1 and not (opt_is_true('show_mad') and opt_is_true('show_stddev')):
+ pass
+ elif len(revision_set) >= 2 and (opt_is_true('show_mad') and opt_is_true('show_stddev')):
+ # Split mad+stddev data -> mad and stddev separately
+ for run_item in csv_data:
+ if run_item['data']['name'] == 'mad+stddev':
+ elem1 = {'machine': run_item['machine'],
+ 'test': run_item['test'],
+ 'data': {'name': 'mad', 'values': {
+ 'x': run_item['data']['values']['x'][0:][::2], # even
+ 'y': run_item['data']['values']['y'][0:][::2],
+ 'error_y': {
+ 'array': run_item['data']['values']['error_y']['array'][0:][::2]}}},
+ 'field_index': run_item['field_index'],
+ 'field': run_item['field']}
+ elem2 = {'machine': run_item['machine'],
+ 'test': run_item['test'],
+ 'data': {'name': 'stddev', 'values': {
+ 'x': run_item['data']['values']['x'][1:][::2], # odd
+ 'y': run_item['data']['values']['y'][1:][::2],
+ 'error_y': {
+ 'array': run_item['data']['values']['error_y']['array'][1:][::2]}}},
+ 'field_index': run_item['field_index'],
+ 'field': run_item['field']}
+ csv_data.append(elem1)
+ csv_data.append(elem2)
+ csv_data.remove(run_item)
+
+ # Order the plots by machine name, test name and then field.
+ csv_data.sort(key=lambda item: (item['machine'],
+ item['test'],
+ item['field'],
+ item['field_index'],
+ item['data']['name']))
+
+ # Create header row
+ metadata_index = 0
+ column_names = ['revision']
+ for item in csv_data:
+ # metadata
+ if item['data']['name'] == 'all_points':
+ meta_prefix = 'meta-' + str(metadata_index) + '-'
+ column_names.append(meta_prefix + 'Machine')
+ column_names.append(meta_prefix + 'Test')
+ column_names.append(meta_prefix + 'Revision')
+ column_names.append(meta_prefix + 'Value')
+ column_names.append(meta_prefix + 'Date')
+ column_names.append(meta_prefix + 'Run')
+ metadata_index += 1
+
+ column_name = item['machine'] + ' | ' + item['test'] + ' | ' + item['data']['name']
+ column_names.append(column_name)
+
+ # Get list of revisions (labels)
+ revision_set = set()
+ for item in csv_data:
+ for i in item['data']['values']['x']:
+ revision_set.add(i)
+ revision_list = sorted(list(revision_set))
+
+ # Write csv header row
+ csv_file = CsvStringIO()
+ csv_writer = csv.DictWriter(csv_file, column_names, restval='', lineterminator='\n')
+ csv_writer.writeheader()
+
+ # Write csv row
+ for revision in revision_list:
+ metadata_index = 0
+ rows = {'revision': revision, 'data': {}}
+ for item in csv_data:
+ # metadata column names (with prefix 'meta-###')
+ if item['data']['name'] == 'all_points':
+ meta_prefix = 'meta-' + str(metadata_index) + '-'
+ meta_name_machine = meta_prefix + 'Machine'
+ meta_name_test = meta_prefix + 'Test'
+ meta_name_revision = meta_prefix + 'Revision'
+ meta_name_value = meta_prefix + 'Value'
+ meta_name_date = meta_prefix + 'Date'
+ meta_name_run = meta_prefix + 'Run'
+ metadata_index += 1
+
+ column_name = item['machine'] + ' | ' + item['test'] + ' | ' + item['data']['name']
+
+ # Deal with multiple runs for revision
+ # Iteration by revision
+ run_by_revision = (i for i, x in enumerate(item['data']['values']['x'])
+ if x == revision)
+ for i in run_by_revision:
+ if item['data']['name'] == 'mad' or item['data']['name'] == 'stddev':
+ # deviation values
+ column_value = item['data']['values']['error_y']['array'][i]
+ else:
+ # another values
+ column_value = item['data']['values']['y'][i]
+
+ # metadata values
+ if item['data']['name'] == 'all_points':
+ meta_value_machine = item['data']['values']['machine']
+ meta_value_test = str(item['data']['values'].get('test_name', item['test']))
+ meta_value_revision = str(revision)
+ meta_value_value = str(column_value)
+ meta_value_date = item['data']['values']['meta'][i]['date']
+ meta_value_run = item['data']['values']['meta'][i]['runID']
+
+ # add columns
+ if column_name not in rows['data']:
+ if item['data']['name'] == 'all_points':
+ # add metadata columns
+ rows['data'].update({meta_name_machine: []})
+ rows['data'].update({meta_name_test: []})
+ rows['data'].update({meta_name_revision: []})
+ rows['data'].update({meta_name_value: []})
+ rows['data'].update({meta_name_date: []})
+ rows['data'].update({meta_name_run: []})
+ # add other column
+ rows['data'].update({column_name: []})
+
+ # add values
+ if item['data']['name'] == 'all_points':
+ # add metadata values
+ rows['data'][meta_name_machine].append(str(meta_value_machine))
+ rows['data'][meta_name_test].append(str(meta_value_test))
+ rows['data'][meta_name_revision].append(str(meta_value_revision))
+ rows['data'][meta_name_value].append(str(meta_value_value))
+ rows['data'][meta_name_date].append(str(meta_value_date))
+ rows['data'][meta_name_run].append(str(meta_value_run))
+ # add other values
+ rows['data'][column_name].append(column_value)
+
+ # Write rows for revision
+ counts = (len(rows['data'][x]) for x in rows['data'])
+ for i in range(max(counts)):
+ row = {'revision': revision}
+ for column_name in rows['data']:
+ column_value = rows['data'][column_name][i] \
+ if i < len(rows['data'][column_name]) \
+ else ''
+ row.update({column_name: column_value})
+
+ csv_writer.writerow(row)
+
+ csv_file.seek(0)
+ if bool(request.args.get('csv')):
+ return csv_file.read(-1)
+ else:
+ bw = BytesIOWrapper(csv_file)
+ return send_file(bw,
+ mimetype='text/csv',
+ attachment_filename='Graph.csv',
+ as_attachment=True)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -99,6 +99,7 @@
'static/flot/*.min.js',
'static/d3/*.min.js',
'static/jquery/**/*.min.js',
+ 'static/plotly/*.min.js',
'templates/*.html',
'templates/reporting/*.html',
'templates/reporting/*.txt'],
diff --git a/tests/server/ui/test_api.py b/tests/server/ui/test_api.py
--- a/tests/server/ui/test_api.py
+++ b/tests/server/ui/test_api.py
@@ -41,20 +41,20 @@
u'test_id': 1,
u'compile_time': 0.007}
-graph_data = [[[152292], 1.0,
- {u'date': u'2012-05-01 16:28:23',
- u'label': u'152292',
- u'runID': u'5'}],
- [[152293], 10.0,
+graph_data = [[u'152292', 1.0,
+ {u'date': u'2012-05-01 16:28:23',
+ u'label': u'152292',
+ u'runID': u'5'}],
+ [u'152293', 10.0,
+ {u'date': u'2012-05-03 16:28:24',
+ u'label': u'152293',
+ u'runID': u'6'}]]
+
+graph_data2 = [[u'152293', 10.0,
{u'date': u'2012-05-03 16:28:24',
u'label': u'152293',
u'runID': u'6'}]]
-graph_data2 = [[[152293], 10.0,
- {u'date': u'2012-05-03 16:28:24',
- u'label': u'152293',
- u'runID': u'6'}]]
-
possible_run_keys = {
u'ARCH',
u'CC_UNDER_TEST_IS_CLANG',
diff --git a/tests/server/ui/test_matrix_page.py b/tests/server/ui/test_matrix_page.py
--- a/tests/server/ui/test_matrix_page.py
+++ b/tests/server/ui/test_matrix_page.py
@@ -36,16 +36,16 @@
client = self.client
reply = check_code(client, '/v4/nts/matrix',
expected_code=HTTP_NOT_FOUND)
- self.assertIn("Request requires some data arguments.",
+ self.assertIn("Request requires some plot arguments.",
reply.get_data(as_text=True))
reply = check_code(client, '/v4/nts/matrix?plot.0=1.1.1',
expected_code=HTTP_NOT_FOUND)
- self.assertIn("No data found.", reply.get_data(as_text=True))
+ self.assertIn("No orders found.", reply.get_data(as_text=True))
reply = check_code(client, '/v4/nts/matrix?plot.0=a.2.0',
expected_code=HTTP_BAD_REQUEST)
- self.assertIn("malformed", reply.get_data(as_text=True))
+ self.assertIn("Expected int as plot value", reply.get_data(as_text=True))
reply = check_code(client, '/v4/nts/matrix?plot.0=999.0.0',
expected_code=HTTP_NOT_FOUND)