Index: llvm/trunk/utils/lit/lit/Test.py =================================================================== --- llvm/trunk/utils/lit/lit/Test.py +++ llvm/trunk/utils/lit/lit/Test.py @@ -135,6 +135,8 @@ self.elapsed = elapsed # The metrics reported by this test. self.metrics = {} + # The micro-test results reported by this test. + self.microResults = {} def addMetric(self, name, value): """ @@ -153,6 +155,24 @@ raise TypeError("unexpected metric value: %r" % (value,)) self.metrics[name] = value + def addMicroResult(self, name, microResult): + """ + addMicroResult(microResult) + + Attach a micro-test result to the test result, with the given name and + result. It is an error to attempt to attach a micro-test with the + same name multiple times. + + Each micro-test result must be an instance of the Result class. + """ + if name in self.microResults: + raise ValueError("Result already includes microResult for %r" % ( + name,)) + if not isinstance(microResult, Result): + raise TypeError("unexpected MicroResult value %r" % (microResult,)) + self.microResults[name] = microResult + + # Test classes. class TestSuite: Index: llvm/trunk/utils/lit/lit/main.py =================================================================== --- llvm/trunk/utils/lit/lit/main.py +++ llvm/trunk/utils/lit/lit/main.py @@ -81,6 +81,18 @@ print('%s: %s ' % (metric_name, value.format())) print("*" * 10) + # Report micro-tests, if present + if test.result.microResults: + items = sorted(test.result.microResults.items()) + for micro_test_name, micro_test in items: + print("%s MICRO-TEST: %s" % + ('*'*3, micro_test_name)) + + if micro_test.metrics: + sorted_metrics = sorted(micro_test.metrics.items()) + for metric_name, value in sorted_metrics: + print(' %s: %s ' % (metric_name, value.format())) + # Ensure the output is flushed. sys.stdout.flush() @@ -113,6 +125,25 @@ for key, value in test.result.metrics.items(): metrics_data[key] = value.todata() + # Report micro-tests separately, if present + if test.result.microResults: + for key, micro_test in test.result.microResults.items(): + # Expand parent test name with micro test name + parent_name = test.getFullName() + micro_full_name = parent_name + ':' + key + + micro_test_data = { + 'name' : micro_full_name, + 'code' : micro_test.code.name, + 'output' : micro_test.output, + 'elapsed' : micro_test.elapsed } + if micro_test.metrics: + micro_test_data['metrics'] = micro_metrics_data = {} + for key, value in micro_test.metrics.items(): + micro_metrics_data[key] = value.todata() + + tests_data.append(micro_test_data) + tests_data.append(test_data) # Write the output. Index: llvm/trunk/utils/lit/tests/Inputs/test-data-micro/dummy_format.py =================================================================== --- llvm/trunk/utils/lit/tests/Inputs/test-data-micro/dummy_format.py +++ llvm/trunk/utils/lit/tests/Inputs/test-data-micro/dummy_format.py @@ -0,0 +1,52 @@ +import os +try: + import ConfigParser +except ImportError: + import configparser as ConfigParser + +import lit.formats +import lit.Test + +class DummyFormat(lit.formats.FileBasedTest): + def execute(self, test, lit_config): + # In this dummy format, expect that each test file is actually just a + # .ini format dump of the results to report. + + source_path = test.getSourcePath() + + cfg = ConfigParser.ConfigParser() + cfg.read(source_path) + + # Create the basic test result. + result_code = cfg.get('global', 'result_code') + result_output = cfg.get('global', 'result_output') + result = lit.Test.Result(getattr(lit.Test, result_code), + result_output) + + # Load additional metrics. + for key,value_str in cfg.items('results'): + value = eval(value_str) + if isinstance(value, int): + metric = lit.Test.IntMetricValue(value) + elif isinstance(value, float): + metric = lit.Test.RealMetricValue(value) + else: + raise RuntimeError("unsupported result type") + result.addMetric(key, metric) + + # Create micro test results + for key,micro_name in cfg.items('micro-tests'): + micro_result = lit.Test.Result(getattr(lit.Test, result_code, '')) + # Load micro test additional metrics + for key,value_str in cfg.items('micro-results'): + value = eval(value_str) + if isinstance(value, int): + metric = lit.Test.IntMetricValue(value) + elif isinstance(value, float): + metric = lit.Test.RealMetricValue(value) + else: + raise RuntimeError("unsupported result type") + micro_result.addMetric(key, metric) + result.addMicroResult(micro_name, micro_result) + + return result Index: llvm/trunk/utils/lit/tests/Inputs/test-data-micro/lit.cfg =================================================================== --- llvm/trunk/utils/lit/tests/Inputs/test-data-micro/lit.cfg +++ llvm/trunk/utils/lit/tests/Inputs/test-data-micro/lit.cfg @@ -0,0 +1,10 @@ +import site +site.addsitedir(os.path.dirname(__file__)) +import dummy_format + +config.name = 'test-data-micro' +config.suffixes = ['.ini'] +config.test_format = dummy_format.DummyFormat() +config.test_source_root = None +config.test_exec_root = None +config.target_triple = None Index: llvm/trunk/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini =================================================================== --- llvm/trunk/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini +++ llvm/trunk/utils/lit/tests/Inputs/test-data-micro/micro-tests.ini @@ -0,0 +1,16 @@ +[global] +result_code = PASS +result_output = Test passed. + +[results] +value0 = 1 +value1 = 2.3456 + +[micro-tests] +microtest0 = test0 +microtest1 = test1 +microtest2 = test2 + +[micro-results] +micro_value0 = 4 +micro_value1 = 1.3 Index: llvm/trunk/utils/lit/tests/test-data-micro.py =================================================================== --- llvm/trunk/utils/lit/tests/test-data-micro.py +++ llvm/trunk/utils/lit/tests/test-data-micro.py @@ -0,0 +1,21 @@ +# Test features related to formats which support reporting additional test data. +# and multiple test results. + +# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s + +# CHECK: -- Testing: + +# CHECK: PASS: test-data-micro :: micro-tests.ini +# CHECK-NEXT: *** TEST 'test-data-micro :: micro-tests.ini' RESULTS *** +# CHECK-NEXT: value0: 1 +# CHECK-NEXT: value1: 2.3456 +# CHECK-NEXT: *** +# CHECK-NEXT: *** MICRO-TEST: test0 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 +# CHECK-NEXT: *** MICRO-TEST: test1 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 +# CHECK-NEXT: *** MICRO-TEST: test2 +# CHECK-NEXT: micro_value0: 4 +# CHECK-NEXT: micro_value1: 1.3 Index: llvm/trunk/utils/lit/tests/test-output-micro.py =================================================================== --- llvm/trunk/utils/lit/tests/test-output-micro.py +++ llvm/trunk/utils/lit/tests/test-output-micro.py @@ -0,0 +1,51 @@ +# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out +# RUN: FileCheck < %t.results.out %s +# RUN: rm %t.results.out + + +# CHECK: { +# CHECK: "__version__" +# CHECK: "elapsed" +# CHECK-NEXT: "tests": [ +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": null, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "micro_value0": 4, +# CHECK-NEXT: "micro_value1": 1.3 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}", +# CHECK-NEXT: "output": "" +# CHECK-NEXT: }, +# CHECK-NEXT: { +# CHECK-NEXT: "code": "PASS", +# CHECK-NEXT: "elapsed": {{[0-9.]+}}, +# CHECK-NEXT: "metrics": { +# CHECK-NEXT: "value0": 1, +# CHECK-NEXT: "value1": 2.3456 +# CHECK-NEXT: }, +# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini", +# CHECK-NEXT: "output": "Test passed." +# CHECK-NEXT: } +# CHECK-NEXT: ] +# CHECK-NEXT: }