Index: lnt/trunk/lnt/server/db/fieldchange.py =================================================================== --- lnt/trunk/lnt/server/db/fieldchange.py +++ lnt/trunk/lnt/server/db/fieldchange.py @@ -37,8 +37,10 @@ "That will be very slow.".format(run_size)) runinfo = lnt.server.reporting.analysis.RunInfo(ts, runs_to_load) - - for field in list(ts.sample_fields): + + # Only store fieldchanges for "metric" samples like execution time; + # not for fields with other data, e.g. hash of a binary + for field in list(ts.Sample.get_metric_fields()): for test_id in runinfo.test_ids: result = runinfo.get_comparison_result(runs, previous_runs, test_id, field) Index: lnt/trunk/lnt/server/db/testsuitedb.py =================================================================== --- lnt/trunk/lnt/server/db/testsuitedb.py +++ lnt/trunk/lnt/server/db/testsuitedb.py @@ -330,8 +330,21 @@ if field not in status_fields: yield field - # Dynamically create fields for all of the test suite defined sample - # fields. + @staticmethod + def get_metric_fields(): + """ + get_metric_fields() -> [SampleField*] + + Get the sample fields which represent some kind of metric, i.e. + those which have a value that can be interpreted as better or + worse than other potential values for this field. + """ + for field in self.Sample.fields: + if field.type.name == 'Real': + yield field + + # Dynamically create fields for all of the test suite defined + # sample fields. # # FIXME: We might want to index some of these, but for a different # reason than above. It is possible worth it to turn the compound @@ -348,6 +361,8 @@ elif item.type.name == 'Status': item.column = Column(item.name, Integer, ForeignKey( testsuite.StatusKind.id)) + elif item.type.name == 'Hash': + item.column = Column(item.name, String) else: raise ValueError,( "test suite defines unknown sample type %r" ( Index: lnt/trunk/lnt/server/db/v4db.py =================================================================== --- lnt/trunk/lnt/server/db/v4db.py +++ lnt/trunk/lnt/server/db/v4db.py @@ -118,10 +118,13 @@ # Resolve or create the known sample types. self.real_sample_type = self.query(testsuite.SampleType)\ - .filter_by(name = "Real").first() + .filter_by(name="Real").first() self.status_sample_type = self.query(testsuite.SampleType)\ - .filter_by(name = "Status").first() - assert (self.real_sample_type and self.status_sample_type), \ + .filter_by(name="Status").first() + self.hash_sample_type = self.query(testsuite.SampleType)\ + .filter_by(name="Hash").first() + assert (self.real_sample_type and self.status_sample_type and + self.hash_sample_type), \ "sample types not initialized!" def close(self): Index: lnt/trunk/lnt/server/reporting/analysis.py =================================================================== --- lnt/trunk/lnt/server/reporting/analysis.py +++ lnt/trunk/lnt/server/reporting/analysis.py @@ -143,8 +143,12 @@ else: return UNCHANGED_PASS + # FIXME: take into account hash of binary - if available. If the hash is + # the same, the binary is the same and therefore the difference cannot be + # significant - for execution time. It can be significant for compile time. def get_value_status(self, confidence_interval=2.576, - value_precision=MIN_VALUE_PRECISION, ignore_small=True): + value_precision=MIN_VALUE_PRECISION, + ignore_small=True): if self.current is None or self.previous is None: return None Index: lnt/trunk/lnt/server/reporting/dailyreport.py =================================================================== --- lnt/trunk/lnt/server/reporting/dailyreport.py +++ lnt/trunk/lnt/server/reporting/dailyreport.py @@ -74,7 +74,7 @@ self.year = year self.month = month self.day = day - self.fields = list(ts.Sample.get_primary_fields()) + self.fields = list(ts.Sample.get_metric_fields()) self.day_start_offset = datetime.timedelta( hours=day_start_offset_hours) self.for_mail = for_mail @@ -218,7 +218,7 @@ # discretion in determining whether or not a particular result is # worth considering (as opposed to noise). # - # The idea is as follows, for each (machine, test, primary_field), + # The idea is as follows, for each (machine, test, metric_field), # classify the result into one of REGRESSED, IMPROVED, UNCHANGED_FAIL, # ADDED, REMOVED, PERFORMANCE_REGRESSED, PERFORMANCE_IMPROVED. # Index: lnt/trunk/lnt/server/reporting/runs.py =================================================================== --- lnt/trunk/lnt/server/reporting/runs.py +++ lnt/trunk/lnt/server/reporting/runs.py @@ -66,23 +66,23 @@ sri = lnt.server.reporting.analysis.RunInfo( ts, runs_to_load, aggregation_fn, confidence_lv) - # Get the test names, primary fields and total test counts. + # Get the test names, metric fields and total test counts. test_names = ts.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).\ filter(ts.Test.id.in_(sri.test_ids)).all() - primary_fields = list(ts.Sample.get_primary_fields()) - num_total_tests = len(primary_fields) * len(test_names) + metric_fields = list(ts.Sample.get_metric_fields()) + num_total_tests = len(metric_fields) * len(test_names) # Gather the run-over-run changes to report, organized by field and then # collated by change type. run_to_run_info, test_results = _get_changes_by_type( - run, compare_to, primary_fields, test_names, num_comparison_runs, sri) + run, compare_to, metric_fields, test_names, num_comparison_runs, sri) # If we have a baseline, gather the run-over-baseline results and # changes. if baseline: run_to_baseline_info, baselined_results = _get_changes_by_type( - run, baseline, primary_fields, test_names, num_comparison_runs, sri) + run, baseline, metric_fields, test_names, num_comparison_runs, sri) else: run_to_baseline_info = baselined_results = None @@ -222,11 +222,12 @@ return subject, text_report, html_report, sri -def _get_changes_by_type(run_a, run_b, primary_fields, test_names, + +def _get_changes_by_type(run_a, run_b, metric_fields, test_names, num_comparison_runs, sri): comparison_results = {} results_by_type = [] - for field in primary_fields: + for field in metric_fields: new_failures = [] new_passes = [] perf_regressions = [] Index: lnt/trunk/lnt/server/reporting/summaryreport.py =================================================================== --- lnt/trunk/lnt/server/reporting/summaryreport.py +++ lnt/trunk/lnt/server/reporting/summaryreport.py @@ -206,7 +206,7 @@ build_mode = 'Release' # Return a datapoint for each passing field. - for field_name,field,status_field in ts_sample_primary_fields: + for field_name, field, status_field in ts_sample_metric_fields: # Ignore failing samples. if status_field and \ sample[2 + status_field.index] == lnt.testing.FAIL: @@ -276,7 +276,7 @@ metric = 'Compile Time' # Report the user and wall time. - for field_name,field,status_field in ts_sample_primary_fields: + for field_name, field, status_field in ts_sample_metric_fields: if field_name not in ('user_time', 'wall_time'): continue @@ -303,15 +303,15 @@ return get_compile_datapoints_for_sample(ts, sample) # For each column... - for index,runs in enumerate(self.runs_at_index): + for index, runs in enumerate(self.runs_at_index): # For each test suite and run list... - for ts,(ts_runs,_) in zip(self.testsuites, runs): + for ts, (ts_runs, _) in zip(self.testsuites, runs): ts_tests = self.tests[ts] - # Compute the primary sample fields. - ts_sample_primary_fields = [ + # Compute the metric fields. + ts_sample_metric_fields = [ (f.name, f, f.status_field) - for f in ts.Sample.get_primary_fields()] + for f in ts.Sample.get_metric_fields()] # Compute a mapping from run id to run. run_id_map = dict((r.id, r) Index: lnt/trunk/lnt/server/ui/templates/reporting/runs.html =================================================================== --- lnt/trunk/lnt/server/ui/templates/reporting/runs.html +++ lnt/trunk/lnt/server/ui/templates/reporting/runs.html @@ -7,7 +7,7 @@ {% macro add_report_changes_detail_for_field_and_bucket( field, show_perf, run_url, field_index, field_display_name, bucket_name, bucket, test_names, - primary_name, primary_field_suffix, secondary_field_suffix, secondary_info, styles + metric_name, metric_field_suffix, secondary_field_suffix, secondary_info, styles ) %} @@ -24,10 +24,10 @@ {% endfor %} {% else %} -