Index: docs/importing_data.rst =================================================================== --- docs/importing_data.rst +++ docs/importing_data.rst @@ -20,103 +20,50 @@ lnt importreport --machine=my-machine-name --order=1234 --testsuite=nts results.txt report.json lnt submit http://mylnt.com/default/submitRun --commit=1 report.json +.. _json_format: -Importing Data from Other Test Systems -====================================== +LNT Report File Format +====================== The lnt importreport tool is an easy way to import data into LNTs test format. -Or you can write your own importer. +You can also create LNTs report data directly for additional flexibility. First, make sure you've understood the underlying :ref:`concepts` used by LNT. -Given how simple it is to make your own results and send them to LNT, -it is common to not use the LNT client application at all, and just have a -custom script run your tests and submit the data to the LNT server. Details -on how to do this are in :mod:`lnt.testing` - -If for some reason you prefer to generate the json file more directly, the -current format looks like below. It remains recommended to use the APIs in -:mod:`lnt.testing` to be better protected against future changes to the json -format:: +.. code-block:: none { - "Machine": { - "Info": { - (_String_: _String_)* // optional extra info about the machine. - }, - "Name": _String_ // machine name, mandatory - }, - "Run": { - "End Time": "%Y-%m-%d %H:%M:%S", // mandatory - "Start Time": "%Y-%m-%d %H:%M:%S", // mandatory - "Info": { - "run_order": _String_, // mandatory - "tag": "nts" // mandatory + "machine": { + "name": _String_ // machine name, mandatory + (_String_: _String_)* // optional extra info + }, + "run": { + "start_time": "%Y-%m-%d %H:%M:%S", // mandatory + "end_time": "%Y-%m-%d %H:%M:%S", // mandatory (_String_: _String_)* // optional extra info about the run. - } - }, - "Tests": [ + // At least one of the extra fields is used as ordering and is + // mandatory. For the 'nts' and 'Compile' schemas this is the + // 'llvm_project_revision' field. + }, + "tests": [ { - "Data": [ (float+) ], - "Info": {}, - "Name": "nts._ProgramName_._metric_" + "name": _String_, // test name mandatory + (_String_: _Data_)* // List of metrics, _Data_ allows: + // number, string or list of numbers }+ ] - } - - -A concrete small example is:: - - { - "Machine": { - "Info": { - }, - "Name": "LNT-AArch64-A53-O3__clang_DEV__aarch64" - }, - "Run": { - "End Time": "2016-04-07 14:25:52", - "Start Time": "2016-04-07 09:33:48", - "Info": { - "run_order": "265649", - "tag": "nts" - } - }, - "Tests": [ - { - "Data": [ - 0.1056, - 0.1055 - ], - "Info": {}, - "Name": "nts.suite1/program1.exec" - }, - { - "Data": [ - 0.2136 - ], - "Info": {}, - "Name": "nts.suite2/program1.exec" - } - ] - } + } -Make sure that: - * The Run.Info.tag value is "nts". - * The test names always start with "nts.". - * The extension of the test name indicate what kind of data is recorded. - Currently accepted extensions in the NTS database are: +A concrete small example is - * ".exec": represents execution time - a lower number is better. - * ".exec.status": a non zero value represents a program failure. - * ".score": represent a benchmark score - a higher number is better. - * ".code_size": represents the code size of a program. - * ".hash": represents a hash of the binary program being executed. This is - used to detect if between compiler versions, the generated code has - changed. - * ".compile": represents the compile time of the program. +.. literalinclude:: report-example.json + :language: json - All of these metrics are optional. +Given how simple it is to make your own results and send them to LNT, +it is common to not use the LNT client application at all, and just have a +custom script run your tests and submit the data to the LNT server. Details +on how to do this are in :mod:`lnt.testing` .. _custom_testsuites: Index: docs/report-example.json =================================================================== --- /dev/null +++ docs/report-example.json @@ -0,0 +1,25 @@ +{ + "machine": { + "name": "LNT-AArch64-A53-O3__clang_DEV__aarch64", + "hardware": "HAL 9000" + }, + "run": { + "end_time": "2016-04-07 14:25:52", + "start_time": "2016-04-07 09:33:48", + "llvm_project_revision": "265649", + "compiler_version": "clang 4.0" + }, + "tests": [ + { + "name": "benchmark1", + "execution_time": [ 0.1056, 0.1055 ], + "hash": "49333a87d501b0aea2191830b66b5eec" + }, + { + "name": "benchmark2", + "compile_time": 13.12, + "execution_time": 0.2135, + "hash": "c321727e7e0dfef279548efdb8ab2ea6" + } + ] +} Index: lnt/lnttool/import_data.py =================================================================== --- lnt/lnttool/import_data.py +++ lnt/lnttool/import_data.py @@ -35,6 +35,7 @@ action="store_true", default=False) parser.add_option("", "--no-report", dest="no_report", action="store_true", default=False) + parser.add_option("-s", "--testsuite", default='nts') (opts, args) = parser.parse_args(args) if len(args) < 2: @@ -54,8 +55,8 @@ for file in args: result = lnt.util.ImportData.import_and_report( config, opts.database, db, file, - opts.format, opts.commit, opts.show_sample_count, - opts.no_email, opts.no_report) + opts.format, opts.testsuite, opts.commit, + opts.show_sample_count, opts.no_email, opts.no_report) success &= result.get('success', False) if opts.quiet: Index: lnt/lnttool/main.py =================================================================== --- lnt/lnttool/main.py +++ lnt/lnttool/main.py @@ -129,7 +129,7 @@ """check the format of an LNT test report file""" parser = OptionParser("%s [options] files" % name) - + parser.add_option("-s", "--testsuite", default='nts') (opts, args) = parser.parse_args(args) if len(args) > 1: parser.error("incorrect number of argments") @@ -147,7 +147,7 @@ db = lnt.server.db.v4db.V4DB('sqlite:///:memory:', lnt.server.config.Config.dummy_instance()) result = lnt.util.ImportData.import_and_report( - None, None, db, input, 'json', commit = True) + None, None, db, input, 'json', opts.testsuite, commit = True) lnt.util.ImportData.print_report_result(result, sys.stdout, sys.stderr, verbose = True) Index: lnt/lnttool/viewcomparison.py =================================================================== --- lnt/lnttool/viewcomparison.py +++ lnt/lnttool/viewcomparison.py @@ -57,6 +57,7 @@ parser.add_option("", "--dry-run", dest="dry_run", help="Do a dry run through the comparison. [%default]" " [%default]", action="store_true", default=False) + parser.add_option("-s", "--testsuite", default='nts') (opts, args) = parser.parse_args(args) if len(args) != 2: @@ -97,10 +98,10 @@ with contextlib.closing(config.get_database('default')) as db: import_and_report( config, 'default', db, report_a_path, - '', commit=True) + '', opts.testsuite, commit=True) import_and_report( config, 'default', db, report_b_path, - '', commit=True) + '', opts.testsuite, commit=True) # Dispatch another thread to start the webbrowser. comparison_url = '%s/v4/nts/2?compare_to=1' % (url,) Index: lnt/server/db/migrations/upgrade_11_to_12.py =================================================================== --- /dev/null +++ lnt/server/db/migrations/upgrade_11_to_12.py @@ -0,0 +1,32 @@ +# Rename 'name' field in machine parameters to 'hostname' to avoid name clash +# in import/export file format. +import sqlalchemy +import json + +def update_testsuite(engine, session, db_key_name): + class Machine(object): + pass + + meta = sqlalchemy.MetaData(bind=engine) + + machine_table = sqlalchemy.Table("%s_Machine" % db_key_name, meta, autoload=True) + sqlalchemy.orm.mapper(Machine, machine_table) + + all_machines = session.query(Machine) + for machine in all_machines: + info = dict(json.loads(machine.Parameters)) + name = info.pop('name', None) + if name is not None: + info['hostname'] = name + machine.Parameters = json.dumps(sorted(info.items())) + + session.flush() + +def upgrade(engine): + # Create a session. + session = sqlalchemy.orm.sessionmaker(engine)() + + update_testsuite(engine, session, 'NT') + update_testsuite(engine, session, 'Compile') + + session.commit() Index: lnt/server/db/testsuitedb.py =================================================================== --- lnt/server/db/testsuitedb.py +++ lnt/server/db/testsuitedb.py @@ -675,20 +675,23 @@ # # FIXME: This feels inelegant, can't SA help us out here? query = self.query(self.Machine).\ - filter(self.Machine.name == machine_data['Name']) - machine = self.Machine(machine_data['Name']) - machine_parameters = machine_data['Info'].copy() + filter(self.Machine.name == machine_data['name']) + machine = self.Machine(machine_data['name']) + machine_parameters = machine_data.copy() + machine_parameters.pop('name') + # Ignore incoming ids; we will create our own. + # TODO: Add some API/result to we can send a warning back to the user + # that we ignore the id. + machine_parameters.pop('id', None) # First, extract all of the specified machine fields. for item in self.machine_fields: - if item.info_key in machine_parameters: - value = machine_parameters.pop(item.info_key) - else: - # For now, insert empty values for any missing fields. We don't - # want to insert NULLs, so we should probably allow the test - # suite to define defaults. - value = '' + # For now, insert empty values for any missing fields. We don't + # want to insert NULLs, so we should probably allow the test + # suite to define defaults. + default_value = '' + value = machine_parameters.pop(item.name, default_value) query = query.filter(item.column == value) machine.set_field(item, value) @@ -727,13 +730,12 @@ # First, extract all of the specified order fields. for item in self.order_fields: - if item.info_key in run_parameters: - value = run_parameters.pop(item.info_key) - else: + value = run_parameters.pop(item.name, None) + if value is None: # We require that all of the order fields be present. raise ValueError,"""\ supplied run is missing required run parameter: %r""" % ( - item.info_key) + item.name) query = query.filter(item.column == value) order.set_field(item, value) @@ -783,17 +785,18 @@ """ # Extra the run parameters that define the order. - run_parameters = run_data['Info'].copy() - - # The tag has already been used to dispatch to the appropriate database. - run_parameters.pop('tag') + run_parameters = run_data.copy() + # Ignore incoming ids; we will create our own + run_parameters.pop('id', None) # Find the order record. order,inserted = self._getOrCreateOrder(run_parameters) - start_time = datetime.datetime.strptime(run_data['Start Time'], + start_time = datetime.datetime.strptime(run_data['start_time'], "%Y-%m-%d %H:%M:%S") - end_time = datetime.datetime.strptime(run_data['End Time'], + end_time = datetime.datetime.strptime(run_data['end_time'], "%Y-%m-%d %H:%M:%S") + run_parameters.pop('start_time') + run_parameters.pop('end_time') # Convert the rundata into a run record. As with Machines, we construct # the query to look for any existingrun at the same time as we build up @@ -809,14 +812,12 @@ # First, extract all of the specified run fields. for item in self.run_fields: - if item.info_key in run_parameters: - value = run_parameters.pop(item.info_key) - else: - # For now, insert empty values for any missing fields. We don't - # want to insert NULLs, so we should probably allow the test - # suite to define defaults. - value = '' + # For now, insert empty values for any missing fields. We don't + # want to insert NULLs, so we should probably allow the test + # suite to define defaults. + default_value = '' + value = run_parameters.pop(item.name, default_value) query = query.filter(item.column == value) run.set_field(item, value) @@ -833,86 +834,44 @@ return run,True - def _importSampleValues(self, tests_data, run, tag, commit, config): - # We now need to transform the old schema data (composite samples split - # into multiple tests with mangling) into the V4DB format where each - # sample is a complete record. - tag_dot = "%s." % tag - tag_dot_len = len(tag_dot) - + def _importSampleValues(self, tests_data, run, commit, config): # Load a map of all the tests, which we will extend when we find tests # that need to be added. test_cache = dict((test.name, test) for test in self.query(self.Test)) - # First, we aggregate all of the samples by test name. The schema allows - # reporting multiple values for a test in two ways, one by multiple - # samples and the other by multiple test entries with the same test - # name. We need to handle both. - tests_values = {} + profiles = dict() + field_dict = dict([(f.name, f) for f in self.sample_fields]) for test_data in tests_data: - if test_data['Info']: - raise ValueError,"""\ -test parameter sets are not supported by V4DB databases""" - - name = test_data['Name'] - if not name.startswith(tag_dot): - raise ValueError,"""\ -test %r is misnamed for reporting under schema %r""" % ( - name, tag) - name = name[tag_dot_len:] - - # Add all the values. - values = tests_values.get(name) - if values is None: - tests_values[name] = values = [] - - values.extend(test_data['Data']) - - # Next, build a map of test name to sample values, by scanning all the - # tests. This is complicated by the interchange's support of multiple - # values, which we cannot properly aggregate. We handle this by keying - # off of the test name and the sample index. - sample_records = {} - profiles = {} - for name,test_samples in tests_values.items(): - # Map this reported test name into a test name and a sample field. - # - # FIXME: This is really slow. - if name.endswith('.profile'): - test_name = name[:-len('.profile')] - sample_field = 'profile' - else: - for item in self.sample_fields: - if name.endswith(item.info_key): - test_name = name[:-len(item.info_key)] - sample_field = item - break - else: - # Disallow tests which do not map to a sample field. - raise ValueError,"""\ - test %r does not map to a sample field in the reported suite""" % ( - name) - - # Get or create the test. - test = test_cache.get(test_name) + name = test_data['name'] + test = test_cache.get(name) if test is None: - test_cache[test_name] = test = self.Test(test_name) + test = self.Test(test_data['name']) + test_cache[name] = test self.add(test) - for i, value in enumerate(test_samples): - record_key = (test_name, i) - sample = sample_records.get(record_key) - if sample is None: - sample_records[record_key] = sample = self.Sample(run, test) + samples = [] + for key, values in test_data.items(): + if key == 'name': + continue + field = field_dict.get(key) + if field is None and key != 'profile': + raise ValueError, \ + "test %r metric %r does not map to a sample field in the reported suite" % \ + (name, key) + + if not isinstance(values, list): + values = [values] + while len(samples) < len(values): + sample = self.Sample(run, test) self.add(sample) - - if sample_field != 'profile': - sample.set_field(sample_field, value) - else: - sample.profile = profiles.get(hash(value), - self.Profile(value, config, - test_name)) + samples.append(sample) + for sample, value in zip(samples, values): + if key == 'profile': + sample.profile = profiles.get(hash(value), + self.Profile(value, config, name)) + else: + sample.set_field(field, value) def importDataFromDict(self, data, commit, config=None): """ @@ -926,20 +885,17 @@ """ # Construct the machine entry. - machine,inserted = self._getOrCreateMachine(data['Machine']) + machine,inserted = self._getOrCreateMachine(data['machine']) # Construct the run entry. - run,inserted = self._getOrCreateRun(data['Run'], machine) + run,inserted = self._getOrCreateRun(data['run'], machine) - # Get the schema tag. - tag = data['Run']['Info']['tag'] - # If we didn't construct a new run, this is a duplicate # submission. Return the prior Run. if not inserted: return False, run - self._importSampleValues(data['Tests'], run, tag, commit, config) + self._importSampleValues(data['tests'], run, commit, config) return True, run Index: lnt/server/db/v4db.py =================================================================== --- lnt/server/db/v4db.py +++ lnt/server/db/v4db.py @@ -178,17 +178,16 @@ return sum([ts.query(ts.Test).count() for ts in self.testsuite.values()]) - def importDataFromDict(self, data, commit, config=None): + def importDataFromDict(self, data, commit, testsuite_schema, config=None): # Select the database to import into. - # - # FIXME: Promote this to a top-level field in the data. - db_name = data['Run']['Info'].get('tag') - if db_name is None: - raise ValueError, "unknown database target (no tag field)" + data_schema = data.get('schema') + if data_schema is not None and data_schema != testsuite_schema: + raise ValueError, "Tried to import '%s' data into schema '%s'" % \ + (data_schema, testsuite_schema) - db = self.testsuite.get(db_name) + db = self.testsuite.get(testsuite_schema) if db is None: raise ValueError, "test suite %r not present in this database!" % ( - db_name) + db_schema) return db.importDataFromDict(data, commit, config) Index: lnt/server/ui/views.py =================================================================== --- lnt/server/ui/views.py +++ lnt/server/ui/views.py @@ -94,9 +94,7 @@ ### # Database Actions - -@db_route('/submitRun', only_v3=False, methods=('GET', 'POST')) -def submit_run(): +def _do_submit(): if request.method == 'GET': return render_template("submit_run.html") @@ -150,7 +148,8 @@ # should at least reject overly large inputs. result = lnt.util.ImportData.import_and_report( - current_app.old_config, g.db_name, db, path, '', commit) + current_app.old_config, g.db_name, db, path, '', + ts_name=g.testsuite_name, commit=commit) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. @@ -160,6 +159,16 @@ return flask.jsonify(**result) +@db_route('/submitRun', only_v3=False, methods=('GET', 'POST')) +def submit_run(): + """Compatibility url that hardcodes testsuite to 'nts'""" + g.testsuite_name = 'nts' + return _do_submit() + + +@v4_route('/submitRun', methods=('GET', 'POST')) +def submit_run_ts(): + return _do_submit() ### # V4 Schema Viewer Index: lnt/testing/__init__.py =================================================================== --- lnt/testing/__init__.py +++ lnt/testing/__init__.py @@ -8,6 +8,7 @@ import datetime import re +import logging try: import json @@ -36,7 +37,7 @@ self.machine = machine self.run = run self.tests = list(tests) - self.report_version = current_version + self.report_version = '1' self.check() def check(self): @@ -107,7 +108,8 @@ for key,value in info.items()) if '__report_version__' in self.info: raise ValueError("'__report_version__' key is reserved") - self.info['__report_version__'] = str(current_version) + # TODO: Convert to version 2 + self.info['__report_version__'] = '1' def update_endtime(self, end_time=None): if end_time is None: @@ -176,7 +178,26 @@ # # Version 1 -- 2012-04-12: run_order was changed to not be padded, and allow # non-integral values. -current_version = 1 +# +# Version 2 -- 2017-06: Revamped json format +# - Directly uses lnt names (no 'info_key' names anymore) +# - Flatten Machine.Info and Run.Info into the Machine and Run records +# - One record for each test (not one record for test+metric) with one entry +# for each metric. +def get_format_version(data): + # Older versions had a Run.Info.__report_version__ field + run = data.get('Run') + if run is not None: + info = run.get('Info') + if info is not None: + report_version = info.get('__report_version__', '0') + return int(report_version) + # Check for signs of version 2 submissions + if data.get('run') and data.get('tests') and not data.get('Run') and \ + not data.get('Machine') and not data.get('Tests'): + return 2 + raise ValueError("Could not determine format version") + def upgrade_0_to_1(data): # We recompute the run_order here if it looks like this run_order was @@ -222,24 +243,168 @@ if m: run_info['run_order'] = run_info['inferred_run_order'] = \ run_info['cc_src_tag'] = m.group(1) - -def upgrade_report(data): - # Get the report version. - report_version = int(data['Run']['Info'].get('__report_version__', 0)) - - # Check if the report is current. - if report_version == current_version: - return data - - # Check if the version is out-of-range. - if report_version > current_version: - raise ValueError("unknown report version: %r" % (report_version,)) - - # Otherwise, we need to upgrade it. - for version in range(report_version, current_version): - upgrade_method = globals().get('upgrade_%d_to_%d' % ( - version, version+1)) - upgrade_method(data) - data['Run']['Info']['__report_version__'] = str(version + 1) + data['Run']['Info']['__report_version__'] = "1" + return data + + +# Upgrading from version 1 to version 2 needs some schema in place +class _UpgradeSchema(object): + def __init__(self, metric_rename, machine_param_rename, run_param_rename): + self.metric_rename = metric_rename + self.machine_param_rename = machine_param_rename + self.run_param_rename = run_param_rename + +_nts_upgrade = _UpgradeSchema( + metric_rename={ + '.code_size': 'code_size', + '.compile': 'compile_time', + '.compile.status': 'compile_status', + '.exec': 'execution_time', + '.exec.status': 'execution_status', + '.hash': 'hash', + '.hash.status': 'hash_status', + '.mem': 'mem_bytes', + '.score': 'score', + }, machine_param_rename = { + 'name': 'hostname', # Avoid name clash with actual machine name. + }, run_param_rename = { + 'run_order': 'llvm_project_revision', + } +) +_compile_upgrade = _UpgradeSchema( + metric_rename={ + '.mem.bytes': 'mem_bytes', + '.mem.status': 'mem_status', + '.size.bytes': 'size_bytes', + '.size.status': 'size_status', + '.sys.status': 'sys_status', + '.user.status': 'user_status', + '.user.time': 'sys_time', + '.user.time': 'user_time', + '.wall.status': 'wall_status', + '.wall.time': 'wall_time', + }, machine_param_rename = { + 'hw.model': 'hardware', + 'kern.version': 'os_version', + }, run_param_rename = { + 'run_order': 'llvm_project_revision', + } +) +_upgrades = { + 'nts': _nts_upgrade, + 'compile': _compile_upgrade +} + +def upgrade_1_to_2(data, ts_name): + result = dict() + + # Pull version and database schema to toplevel + result['format_version'] = '2' + report_version = data['Run']['Info'].pop('__report_version__', '1') + # We should not be in upgrade_1_to_2 for other versions + assert(report_version == '1') + tag = data['Run']['Info'].pop('tag', None) + if tag is not None and tag != ts_name: + raise ValueError("Importing '%s' data into '%s' testsuite" % + (tag, ts_name)) + + upgrade = _upgrades.get(tag) + if upgrade is None: + logging.warn("No upgrade schema known for '%s'\n" % tag) + upgrade = _UpgradeSchema({}, {}, {}) + + # Flatten Machine.Info into machine + Machine = data['Machine'] + result_machine = {'name': Machine['Name']} + for key, value in Machine['Info'].items(): + newname = upgrade.machine_param_rename.get(key, key) + if newname in result_machine: + raise ValueError("Name clash for machine info '%s'" % newname) + result_machine[newname] = value + result['machine'] = result_machine + + # Flatten Result.Info into result + Run = data['Run'] + result_run = { + 'end_time': Run['End Time'], + 'start_time': Run['Start Time'], + } + for key, value in Run['Info'].items(): + newname = upgrade.run_param_rename.get(key, key) + if newname in result_run: + raise ValueError("Name clash for run info '%s'" % newname) + result_run[newname] = value + result['run'] = result_run + + # Merge tests + result_tests = list() + result_tests_dict = dict() + Tests = data['Tests'] + for test in Tests: + test_Name = test['Name'] + + # Old testnames always started with 'tag.', split that part. + if len(test['Info']) != 0: + # The Info record didn't work with the v4 database anyway... + raise ValueError("Tests/%s: cannot convert non-empty Info record" % + test_Name) + tag_dot = '%s.' % ts_name + if not test_Name.startswith(tag_dot): + raise ValueError("Tests/%s: test name does not start with '%s'" % + (test_Name, tag_dot)) + name_metric = test_Name[len(tag_dot):] + + found_metric = False + for oldname, newname in upgrade.metric_rename.items(): + assert(oldname.startswith('.')) + if name_metric.endswith(oldname): + name = name_metric[:-len(oldname)] + metric = newname + found_metric = True + break + if not found_metric: + # Fallback logic for unknown metrics: Assume they are '.xxxx' + name, dot, metric = name_metric.rpartition('.') + if dot != '.': + raise ValueError("Tests/%s: test name does not end in .metric" % + test_Name) + logging.warning("Found unknown metric '%s'" % metric) + upgrade.metric_rename['.'+metric] = '.'+metric + + result_test = result_tests_dict.get(name) + if result_test is None: + result_test = {'name': name} + result_tests_dict[name] = result_test + result_tests.append(result_test) + + data = test['Data'] + if metric not in result_test: + # Do not construct a list for the very common case of just a + # single datum. + if len(data) == 1: + data = data[0] + result_test[metric] = data + elif len(data) > 0: + # Transform the test data into a list + if not isinstance(result_test[metric], list): + result_test[metric] = [ result_test[metric] ] + result_test[metric] += data + + result['tests'] = result_tests + return result + +def upgrade_report(data, ts_name): + # Get the report version. V2 has it at the top level, older version + # in Run.Info. + format_version = get_format_version(data) + + if format_version==0: + data = upgrade_0_to_1(data) + format_version=1 + if format_version==1: + data = upgrade_1_to_2(data, ts_name) + format_version=2 + assert(format_version==2) + return data __all__ = ['Report', 'Machine', 'Run', 'TestSamples'] Index: lnt/tests/builtintest.py =================================================================== --- lnt/tests/builtintest.py +++ lnt/tests/builtintest.py @@ -46,7 +46,7 @@ if output_stream is not sys.stdout: output_stream.close() - def submit(self, report_path, config, commit=True): + def submit(self, report_path, config, ts_name=None, commit=True): """Submit the results file to the server. If no server was specified, use a local mock server. @@ -59,6 +59,7 @@ """ assert os.path.exists(report_path), "Failed report should have" \ " never gotten here!" + assert ts_name is not None server_report = None if config.submit_url is not None: @@ -76,7 +77,7 @@ db = lnt.server.db.v4db.V4DB("sqlite:///:memory:", server_config.Config.dummy_instance()) server_report = ImportData.import_and_report( - None, None, db, report_path, 'json', commit) + None, None, db, report_path, 'json', ts_name, commit) assert server_report is not None, "Results were not submitted." Index: lnt/tests/compile.py =================================================================== --- lnt/tests/compile.py +++ lnt/tests/compile.py @@ -1075,7 +1075,7 @@ if opts.output is not None: self.print_report(report, opts.output) - server_report = self.submit(lnt_report_path, opts) + server_report = self.submit(lnt_report_path, opts, ts_name='compile') return server_report Index: lnt/tests/nt.py =================================================================== --- lnt/tests/nt.py +++ lnt/tests/nt.py @@ -2013,7 +2013,7 @@ db = lnt.server.db.v4db.V4DB("sqlite:///:memory:", lnt.server.config.Config.dummy_instance()) result = lnt.util.ImportData.import_and_report( - None, None, db, report_path, 'json', commit) + None, None, db, report_path, 'json', 'nts', commit) if result is None: fatal("Results were not obtained from submission.") Index: lnt/tests/test_suite.py =================================================================== --- lnt/tests/test_suite.py +++ lnt/tests/test_suite.py @@ -522,7 +522,7 @@ with open(csv_report_path, 'w') as fd: fd.write(str_template) - return self.submit(report_path, self.opts, commit=True) + return self.submit(report_path, self.opts, 'nts', commit=True) def _configure_if_needed(self): mkdir_p(self._base_path) Index: lnt/util/ImportData.py =================================================================== --- lnt/util/ImportData.py +++ lnt/util/ImportData.py @@ -7,11 +7,11 @@ from lnt.util import NTEmailReport from lnt.util import async_ops -def import_and_report(config, db_name, db, file, format, commit=False, - show_sample_count=False, disable_email=False, - disable_report=False): +def import_and_report(config, db_name, db, file, format, ts_name, + commit=False, show_sample_count=False, + disable_email=False, disable_report=False): """ - import_and_report(config, db_name, db, file, format, + import_and_report(config, db_name, db, file, format, ts_name, [commit], [show_sample_count], [disable_email]) -> ... object ... @@ -49,7 +49,7 @@ result['load_time'] = time.time() - startTime # Auto-upgrade the data, if necessary. - lnt.testing.upgrade_report(data) + data = lnt.testing.upgrade_report(data, ts_name) # Find the database config, if we have a configuration object. if config: @@ -72,7 +72,8 @@ importStartTime = time.time() try: - success, run = db.importDataFromDict(data, commit, config=db_config) + success, run = db.importDataFromDict(data, commit, ts_name, + config=db_config) except KeyboardInterrupt: raise except: @@ -110,7 +111,6 @@ result['committed'] = commit result['run_id'] = run.id - ts_name = data['Run']['Info'].get('tag') if commit: db.commit() if db_config: @@ -141,9 +141,9 @@ # Perform the shadow import. shadow_result = import_and_report(config, shadow_name, - shadow_db, file, format, commit, - show_sample_count, disable_email, - disable_report) + shadow_db, file, format, ts_name, + commit, show_sample_count, + disable_email, disable_report) # Append the shadow result to the result. result['shadow_result'] = shadow_result Index: lnt/util/ServerUtil.py =================================================================== --- lnt/util/ServerUtil.py +++ lnt/util/ServerUtil.py @@ -50,7 +50,8 @@ if db is None: raise ValueError("no default database in instance: %r" % (path,)) return lnt.util.ImportData.import_and_report( - config, db_name, db, file, format='', commit=commit) + config, db_name, db, file, format='', ts_name='nts', + commit=commit) def submitFile(url, file, commit, verbose): Index: tests/lnttool/submit.py =================================================================== --- tests/lnttool/submit.py +++ tests/lnttool/submit.py @@ -29,3 +29,25 @@ # CHECK-VERBOSE: ---------------- # CHECK-VERBOSE: PASS : 10 # CHECK-VERBOSE: Results available at: http://localhost:9091/db_default/v4/nts/3 + +# RUN: rm -rf %t.instance +# RUN: python %{shared_inputs}/create_temp_instance.py \ +# RUN: %s %{shared_inputs}/SmallInstance %t.instance +# RUN: %{shared_inputs}/server_wrapper.sh %t.instance 9091 \ +# RUN: lnt submit "http://localhost:9091/db_default/submitRun" --commit=1 \ +# RUN: %{src_root}/docs/report-example.json -v | \ +# RUN: FileCheck %s --check-prefix=CHECK-NEWFORMAT +# +# CHECK-NEWFORMAT: Import succeeded. +# CHECK-NEWFORMAT: --- Tested: 10 tests -- +# +# CHECK-NEWFORMAT: Imported Data +# CHECK-NEWFORMAT: ------------- +# CHECK-NEWFORMAT: Added Machines: 1 +# CHECK-NEWFORMAT: Added Runs : 1 +# CHECK-NEWFORMAT: Added Tests : 2 +# +# CHECK-NEWFORMAT: Results +# CHECK-NEWFORMAT: ---------------- +# CHECK-NEWFORMAT: PASS : 10 +# CHECK-NEWFORMAT: Results available at: http://localhost:9091/db_default/v4/nts/3 Index: tests/server/db/ImportV4TestSuiteInstance.py =================================================================== --- tests/server/db/ImportV4TestSuiteInstance.py +++ tests/server/db/ImportV4TestSuiteInstance.py @@ -98,10 +98,8 @@ assert run_b.imported_from.endswith("sample-b-small.plist") assert run_a.start_time == datetime.datetime(2009, 11, 17, 2, 12, 25) assert run_a.end_time == datetime.datetime(2009, 11, 17, 3, 44, 48) -assert tuple(sorted(run_a.parameters.items())) == \ - (('__report_version__', '1'), ('inferred_run_order', '1')) -assert tuple(sorted(run_b.parameters.items())) == \ - (('__report_version__', '1'), ('inferred_run_order', '2')) +assert sorted(run_a.parameters.items()) == [('inferred_run_order', '1')] +assert sorted(run_b.parameters.items()) == [('inferred_run_order', '2')] # Validate the samples. samples = list(ts.query(ts.Sample)) Index: tests/server/db/search.py =================================================================== --- tests/server/db/search.py +++ tests/server/db/search.py @@ -44,7 +44,7 @@ result = lnt.util.ImportData.import_and_report( None, 'default', self.db, f.name, - '', True, False, + '', 'nts', True, False, True, True) success &= result.get('success', False)