Index: lnt/tests/test_suite.py =================================================================== --- lnt/tests/test_suite.py +++ lnt/tests/test_suite.py @@ -186,6 +186,10 @@ help=("autosubmit the test result to the given server" " (or local instance) [%default]"), type=str, default=None) + group.add_option("", "--just-submit", dest="just_submit", + metavar="LITOUTPUT", + help="Just submit the specified lit result file. Do " + + " not run cmake and lit", type=str, default=None) group.add_option("", "--commit", dest="commit", help=("whether the autosubmit result should be committed " "[%default]"), @@ -233,6 +237,17 @@ else: parser.error("Expected no positional arguments (got: %r)" % (args,)) + if self.opts.just_submit is not None: + # Just parse the given lit result file. + self.start_time = timestamp() + data = json.load(open(self.opts.just_submit)) + report = self._parse_lit_output(data) + report_file = tempfile.NamedTemporaryFile(suffix='.json', + delete=False) + report_file.write(report.render()) + report_file.close() + return self.submit(report_file.name, self.opts, commit=True) + for a in ['cross_compiling', 'cross_compiling_system_name', 'llvm_arch']: if getattr(opts, a): parser.error('option "%s" is not yet implemented!' % a) @@ -546,11 +561,6 @@ 'UNRESOLVED': lnt.testing.FAIL }[code] - def _test_failed_to_compile(self, raw_name, path): - # FIXME: Do we need to add ".exe" in windows? - name = raw_name.rsplit('.test', 1)[0] - return not os.path.exists(os.path.join(path, name)) - def _get_target_flags(self): return shlex.split(self.opts.cppflags + self.opts.cflags) @@ -558,7 +568,7 @@ return lnt.testing.util.compilers.get_cc_info(self.opts.cc, self._get_target_flags()) - def _parse_lit_output(self, path, data, only_test=False): + def _parse_lit_output(self, data, only_test=False): LIT_METRIC_TO_LNT = { 'compile_time': 'compile', 'exec_time': 'exec', @@ -586,7 +596,8 @@ for test_data in data['tests']: raw_name = test_data['name'].split(' :: ', 1)[1] name = 'nts.' + raw_name.rsplit('.test', 1)[0] - is_pass = self._is_pass_code(test_data['code']) + code = test_data['code'] + is_pass = self._is_pass_code(code) # If --single-result is given, exit based on --single-result-predicate if self.opts.single_result and \ @@ -614,17 +625,22 @@ test_info, LIT_METRIC_CONV_FN[k])) - if self._test_failed_to_compile(raw_name, path): - test_samples.append( - lnt.testing.TestSamples(name + '.compile.status', - [lnt.testing.FAIL], - test_info)) + if code != 'NOEXE': + compile_status = lnt.testing.PASS + else: + compile_status = lnt.testing.FAIL + test_samples.append( + lnt.testing.TestSamples(name + '.compile.status', + [compile_status], test_info)) - elif not is_pass: - test_samples.append( - lnt.testing.TestSamples(name + '.exec.status', - [self._get_lnt_code(test_data['code'])], - test_info)) + if is_pass: + exec_status = lnt.testing.PASS + else: + exec_status = lnt.testing.FAIL + test_samples.append( + lnt.testing.TestSamples(name + '.exec.status', + [self._get_lnt_code(code)], + test_info)) # Now import the profiles in parallel. if profiles_to_import: @@ -652,8 +668,9 @@ run_info = { 'tag': 'nts' } - run_info.update(self._get_cc_info()) - run_info['run_order'] = run_info['inferred_run_order'] + if self.opts.cc is not None: + run_info.update(self._get_cc_info()) + run_info['run_order'] = run_info['inferred_run_order'] if self.opts.run_order: run_info['run_order'] = self.opts.run_order Index: tests/runtest/test_suite.py =================================================================== --- tests/runtest/test_suite.py +++ tests/runtest/test_suite.py @@ -350,3 +350,21 @@ # CHECK-USE-PERF-ALL: --param profile=perf # CHECK-USE-PERF-ALL: Importing 1 profiles with # CHECK-USE-PERF-ALL: Profile /tmp/I/Do/Not/Exist.perf_data does not exist + +# Check --just-submit mode +# RUN: lnt runtest test-suite \ +# RUN: --just-submit=%S/Inputs/test-suite-cmake/fake-results.json \ +# RUN: --run-order=123 \ +# RUN: --verbose \ +# RUN: > %t.log 2> %t.err +# RUN: FileCheck --check-prefix CHECK-RESULTFILE --check-prefix CHECK-RESULTFILE-STDOUT < %t.log %s +# RUN: FileCheck --check-prefix CHECK-RESULTFILE --check-prefix CHECK-RESULTFILE-STDERR < %t.err %s +# Should not run any cmake/make/ninja/lit commands +# CHECK-RESULTFILE-NOT: cmake +# CHECK-RESULTFILE-NOT: make +# CHECK-RESULTFILE-NOT: ninja +# CHECK-RESULTFILE-NOT: lit +# CHECK-RESULTFILE-NOT: clang +# CHECK-RESULTFILE-STDERR: submitting result to dummy instance +# CHECK-RESULTFILE-STDOUT: Import succeeded. +# CHECK-RESULTFILE-STDOUT: PASS : 4