Index: lnt/trunk/lnt/server/ui/templates/v4_graph.html =================================================================== --- lnt/trunk/lnt/server/ui/templates/v4_graph.html +++ lnt/trunk/lnt/server/ui/templates/v4_graph.html @@ -56,7 +56,7 @@ }, highlight : { {% if revision_range is not none %} - range: {{revision_range|tojson|safe}} + range: {{revision_range|tojson|safe}} {% else %} enabled: false {% endif %} @@ -73,7 +73,7 @@ graph_options['grid']['markings'] = baseline_plots; var tmp_plots = update_graphplots(graph_plots); var main_plot = $.plot(graph, tmp_plots, graph_options); - + // Add tooltips. graph.bind("plotclick", function (e, p, i) { update_tooltip(e, p, i, show_tooltip, tmp_plots); @@ -112,7 +112,7 @@ })); }); bind_zoom_bar(main_plot); - + } @@ -157,6 +157,11 @@ + + + + @@ -230,7 +235,7 @@ {% endif %} {% endfor %} - + @@ -253,7 +258,7 @@ - +
Mean() as Aggregation
Hide Line Plot:
@@ -261,7 +266,7 @@
- +

Legend

Index: lnt/trunk/lnt/server/ui/views.py =================================================================== --- lnt/trunk/lnt/server/ui/views.py +++ lnt/trunk/lnt/server/ui/views.py @@ -7,6 +7,7 @@ import json import flask +from flask import session from flask import abort from flask import current_app from flask import g @@ -220,7 +221,7 @@ associated_runs=associated_runs) except NoResultFound as e: abort(404) - + class V4RequestInfo(object): def __init__(self, run_id, only_html_body=True): self.db = request.get_db() @@ -302,7 +303,7 @@ classes = { 'table': 'table table-striped table-condensed table-hover' } - + reports = lnt.server.reporting.runs.generate_run_report( self.run, baseurl=db_url_for('index', _external=True), only_html_body=only_html_body, result=None, @@ -505,9 +506,25 @@ from lnt.external.stats import stats as ext_stats ts = request.get_testsuite() + switch_min_mean_local = False + if 'switch_min_mean_session' not in session: + session['switch_min_mean_session'] = False # Parse the view options. - options = {} + options = {'min_mean_checkbox': 'min()'} + if 'submit' in request.args: # user pressed a button + if 'switch_min_mean' in request.args: # user checked mean() checkbox + session['switch_min_mean_session'] = options['switch_min_mean'] = \ + bool(request.args.get('switch_min_mean')) + switch_min_mean_local = session['switch_min_mean_session'] + else: # mean() check box is not checked + session['switch_min_mean_session'] = options['switch_min_mean'] = \ + bool(request.args.get('switch_min_mean')) + switch_min_mean_local = session['switch_min_mean_session'] + else: # new page was loaded by clicking link, not submit button + options['switch_min_mean'] = switch_min_mean_local = \ + session['switch_min_mean_session'] + options['hide_lineplot'] = bool(request.args.get('hide_lineplot')) show_lineplot = not options['hide_lineplot'] options['show_mad'] = show_mad = bool(request.args.get('show_mad')) @@ -769,8 +786,12 @@ values = [v*normalize_by for v in data] aggregation_fn = min + + if switch_min_mean_local: + aggregation_fn = lnt.util.stats.agg_mean if field.bigger_is_better: aggregation_fn = max + agg_value, agg_index = \ aggregation_fn((value, index) for (index, value) in enumerate(values)) @@ -780,7 +801,7 @@ metadata["date"] = str(dates[agg_index]) if runs: metadata["runID"] = str(runs[agg_index]) - + if len(graph_datum) > 1: # If there are more than one plot in the graph, also label the # test name. @@ -795,7 +816,7 @@ point_metadata = dict(metadata) point_metadata["date"] = str(dates[i]) points_data.append((x, v, point_metadata)) - + # Add the standard deviation error bar, if requested. if show_stddev: mean = stats.mean(values) @@ -1234,7 +1255,7 @@ if queue_length > 10: explode = True msg = "Queue too long." - + import resource stats = resource.getrusage(resource.RUSAGE_SELF) mem = stats.ru_maxrss @@ -1299,7 +1320,7 @@ for each dataset to add, there will be a "plot.n=.m.b.f" where m is machine ID, b is benchmark ID and f os field kind offset. "n" is used to unique the paramters, and is ignored. - + """ ts = request.get_testsuite() # Load the matrix request parameters. @@ -1379,9 +1400,9 @@ limit = int(limit) if limit != -1: q = q.limit(limit) - + req.samples = defaultdict(list) - + for s in q.all(): req.samples[s[1]].append(s[0]) all_orders.add(s[1]) @@ -1484,7 +1505,7 @@ show_mad = False show_all_samples = False show_sample_counts = False - + return render_template("v4_matrix.html", testsuite_name=g.testsuite_name, associated_runs=data_parameters, Index: lnt/trunk/lnt/util/stats.py =================================================================== --- lnt/trunk/lnt/util/stats.py +++ lnt/trunk/lnt/util/stats.py @@ -25,6 +25,23 @@ else: return None +def agg_mean(pairs): + """Aggregation function in views.py receives input via enumerate and + produces a tuple. + Input: (value, index) + Output: (mean, 0), or (None, None) on invalid input. + """ + if not pairs: + return (None, None) + my_sum = 0.0 + counter = 0 + for item in pairs: + my_sum += item[0] + counter += 1 + if counter > 0: + return (my_sum / counter, 0) + return (None, None) + def median(l): if not l: Index: lnt/trunk/tests/server/ui/V4Pages.py =================================================================== --- lnt/trunk/tests/server/ui/V4Pages.py +++ lnt/trunk/tests/server/ui/V4Pages.py @@ -206,7 +206,7 @@ # Fetch the index page. check_code(client, '/') - + # Rules the index page. check_code(client, '/rules') @@ -267,7 +267,7 @@ expected_code=HTTP_NOT_FOUND) # Check baselines work. check_code(client, '/v4/nts/graph?plot.0=1.3.2&baseline.60=3') - + # Check some variations of the daily report work. check_code(client, '/v4/nts/daily_report/2012/4/12') check_code(client, '/v4/nts/daily_report/2012/4/11') @@ -424,18 +424,21 @@ # Check some variations of the daily report work. check_code(client, '/v4/compile/daily_report/2014/6/5?day_start=16') check_code(client, '/v4/compile/daily_report/2014/6/4') - + check_redirect(client, '/v4/nts/regressions/new_from_graph/1/1/1/1', '/v4/nts/regressions/1') check_code(client, '/v4/nts/regressions/') check_code(client, '/v4/nts/regressions/?machine_filter=machine2') check_code(client, '/v4/nts/regressions/?machine_filter=machine0') check_code(client, '/v4/nts/regressions/1') - + check_json(client, '/v4/nts/regressions/1?json=True') - - + # Make sure the new option does not break anything + check_code(client, '/db_default/v4/nts/graph?switch_min_mean=yes&plot.0=1.3.2&submit=Update') + check_json(client, '/db_default/v4/nts/graph?switch_min_mean=yes&plot.0=1.3.2&json=true&submit=Update') + check_code(client, '/db_default/v4/nts/graph?switch_min_mean=yes&plot.0=1.3.2') + check_json(client, '/db_default/v4/nts/graph?switch_min_mean=yes&plot.0=1.3.2&json=true') if __name__ == '__main__': main() Index: lnt/trunk/tests/server/ui/statsTester.py =================================================================== --- lnt/trunk/tests/server/ui/statsTester.py +++ lnt/trunk/tests/server/ui/statsTester.py @@ -0,0 +1,55 @@ +# +# create temporary instance +# Cleanup temporary directory in case one remained from a previous run - also +# see PR9904. +# RUN: rm -rf %t.instance +# RUN: python %{shared_inputs}/create_temp_instance.py \ +# RUN: %s %{shared_inputs}/SmallInstance %t.instance \ +# RUN: %S/Inputs/V4Pages_extra_records.sql +# +# RUN: python %s %t.instance + +import unittest + +import lnt.util.stats as stats + +INDEX = 0 + + +class TestLNTStatsTester(unittest.TestCase): + + def loc_test_agg_mean(self, values): + if values is None: + return stats.agg_mean(None) + agg_value, agg_index = stats.agg_mean((value, index) for (index, value) in enumerate(values)) + return (agg_value, agg_index) + + def test_agg_mean(self): + test_list1 = [1, 2, 3, 4, 6] + self.assertEqual(self.loc_test_agg_mean(test_list1), (3.2, INDEX)) + test_list2 = [1.0, 2.0, 3.0, 4.0] + self.assertEqual(self.loc_test_agg_mean(test_list2), (2.5, INDEX)) + test_list3 = [1.0] + self.assertEqual(self.loc_test_agg_mean(test_list3), (1.0, INDEX)) + self.assertEqual(self.loc_test_agg_mean([]), (None, None)) + self.assertEqual(self.loc_test_agg_mean(None), (None, None)) + + # Test it exactly how it is called in views.py without indirection + agg_value, agg_index = stats.agg_mean((value, index) for (index, value) in enumerate(test_list1)) + self.assertEqual((3.2, INDEX), (agg_value, agg_index)) + agg_value, agg_index = stats.agg_mean((value, index) for (index, value) in enumerate(test_list2)) + self.assertEqual((2.5, INDEX), (agg_value, agg_index)) + agg_value, agg_index = stats.agg_mean((value, index) for (index, value) in enumerate(test_list3)) + self.assertEqual((1.0, INDEX), (agg_value, agg_index)) + +if __name__ == '__main__': + try: + unittest.main() + except AttributeError: + # Command line parameters are treaded as test cases, when \ + # running with lit rather than python directly. + import sys + if len(sys.argv) != 2: + sys.exit("Something went horribly wrong. You need parameters.") + del sys.argv[1:] + unittest.main()