Index: zorg/buildbot/builders/LLDBBuilder.py =================================================================== --- zorg/buildbot/builders/LLDBBuilder.py +++ zorg/buildbot/builders/LLDBBuilder.py @@ -11,6 +11,7 @@ from zorg.buildbot.commands.LitTestCommand import LitTestCommand from zorg.buildbot.builders.Util import getVisualStudioEnvironment from zorg.buildbot.builders.Util import extractSlaveEnvironment +from zorg.buildbot.commands.LldbTestCommand import LldbTestCommand # We *must* checkout at least Clang, LLVM, and LLDB. Once we add a step to run # tests (e.g. ninja check-lldb), we will also need to add a step for LLD, since @@ -298,7 +299,7 @@ haltOnFailure=True, workdir=llvm_builddir)) # Test - f.addStep(LitTestCommand(name="test lldb", + f.addStep(LldbTestCommand(name="test lldb", command=['nice', '-n', '10', 'ninja', 'check-lldb'], Index: zorg/buildbot/commands/LldbTestCommand.py =================================================================== --- zorg/buildbot/commands/LldbTestCommand.py +++ zorg/buildbot/commands/LldbTestCommand.py @@ -8,10 +8,15 @@ from buildbot.process.buildstep import LogLineObserver from buildbot.steps.shell import Test -class LitLogObserver(LogLineObserver): +class LldbLogObserver(LogLineObserver): # Regular expressions for a regular test line. kTestLineRE = re.compile(r'(\w+): (.*) \(.*\)') - + # Regular expressions to indicate start of result line. + kTestResultRE = re.compile(r'^Failing Tests \((\d*)\)$') + # If it's true, current line will be parsed as result steps + # If false, current line will be skipped + parserActive = False + # Regular expressions for verbose log start and stop markers. Verbose log # output always immediately follow a test. kTestVerboseLogStartRE = re.compile(r"""\*{4,80} TEST '(.*)' .*""") @@ -21,6 +26,7 @@ # step results. failingCodes = set(['FAIL', 'XPASS', 'KPASS', 'UNRESOLVED']) + def __init__(self, maxLogs=None): LogLineObserver.__init__(self) self.resultCounts = {} @@ -32,7 +38,7 @@ # If non-null, a list of lines in the current log. self.activeVerboseLog = None - + def hadFailure(self): for code in self.failingCodes: if self.resultCounts.get(code): @@ -49,15 +55,13 @@ def testInfoFinished(self): # We have finished getting information for one test, handle it. code, name = self.lastTestResult - + # If the test failed, add a log entry for it (unless we have reached the # max). - if code in self.failingCodes and (self.maxLogs is None or - self.numLogs < self.maxLogs): + if (self.maxLogs is None or self.numLogs < self.maxLogs): # If a verbose log was not provided, just add a one line description. if self.activeVerboseLog is None: self.activeVerboseLog = ['%s: %s' % (code, name)] - # Add the log to the build status. # Make the test name short, the qualified test name is in the log anyway. # Otherwise, we run out of the allowed name length on some hosts. @@ -72,6 +76,7 @@ self.activeVerboseLog = None def outLineReceived(self, line): + # If we are inside a verbose log, just accumulate lines until we reach the # stop marker. if self.activeVerboseLog is not None: @@ -96,16 +101,24 @@ # buildbot provided us a hook for when the log is done. if self.lastTestResult: self.testInfoFinished() - - # Check for a new test status line. - m = self.kTestLineRE.match(line.strip()) - if m: - # Remember the last test result and update the result counts. - self.lastTestResult = (code, name) = m.groups() - self.resultCounts[code] = self.resultCounts.get(code, 0) + 1 - return - -class LitTestCommand(Test): + + mResult = self.kTestResultRE.match(line) + if mResult: + self.parserActive = True; + if self.maxLogs is None or self.maxLogs > mResult.groups()[0]: + self.maxLogs = mResult.groups()[0] + + + if self.parserActive is True: + # Check for a new test status line. + m = self.kTestLineRE.match(line.strip()) + if m: + # Remember the last test result and update the result counts. + self.lastTestResult = (code, name) = m.groups() + self.resultCounts[code] = self.resultCounts.get(code, 0) + 1 + return + +class LldbTestCommand(Test): resultNames = {'FAIL':'unexpected failures', 'PASS':'expected passes', 'XFAIL':'expected failures', @@ -117,15 +130,15 @@ 'REGRESSED':'runtime performance regression', 'IMPROVED':'runtime performance improvement', 'UNSUPPORTED':'unsupported tests'} - + def __init__(self, ignore=[], flaky=[], max_logs=20, *args, **kwargs): Test.__init__(self, *args, **kwargs) self.maxLogs = int(max_logs) - self.logObserver = LitLogObserver(self.maxLogs) + self.logObserver = LldbLogObserver(self.maxLogs) self.addFactoryArguments(max_logs=max_logs) self.addLogObserver('stdio', self.logObserver) - + def evaluateCommand(self, cmd): # Always report failure if the command itself failed. if cmd.rc != 0: @@ -163,7 +176,7 @@ class TestLogObserver(unittest.TestCase): def parse_log(self, text): - observer = LitLogObserver() + observer = LldbLogObserver() observer.step = StepProxy() for ln in text.split('\n'): observer.outLineReceived(ln) @@ -171,58 +184,18 @@ def test_basic(self): obs = self.parse_log(""" -PASS: test-one (1 of 3) -FAIL: test-two (2 of 3) -PASS: test-three (3 of 3) -""") - - self.assertEqual(obs.resultCounts, { 'FAIL' : 1, 'PASS' : 2 }) - self.assertEqual(obs.step.logs, [('test-two', 'FAIL: test-two')]) - - def test_verbose_logs(self): - obs = self.parse_log(""" +Failing Tests (3) FAIL: test-one (1 of 3) -FAIL: test-two (2 of 3) -**** TEST 'test-two' FAILED **** -bla bla bla -********** +TIMEOUT: test-two (2 of 3) FAIL: test-three (3 of 3) +Ninja build stopped """) + self.assertEqual(obs.resultCounts, { 'FAIL' : 2, 'TIMEOUT' : 1 }) + self.assertEqual(obs.step.logs, [('test-one', 'FAIL: test-one'), + ('test-two', 'TIMEOUT: test-two'), + ('test-three', 'FAIL: test-three')]) + - self.assertEqual(obs.resultCounts, { 'FAIL' : 3 }) - self.assertEqual(obs.step.logs, [ - ('test-one', 'FAIL: test-one'), - ('test-two', """\ -**** TEST 'test-two' FAILED **** -bla bla bla -**********"""), - ('test-three', 'FAIL: test-three')]) - -class TestCommand(unittest.TestCase): - def parse_log(self, text, **kwargs): - cmd = LitTestCommand(**kwargs) - cmd.logObserver.step = StepProxy() - for ln in text.split('\n'): - cmd.logObserver.outLineReceived(ln) - return cmd - - def test_command_status(self): - # If the command failed, the status should always be error. - cmd = self.parse_log("") - self.assertEqual(cmd.evaluateCommand(RemoteCommandProxy(1)), FAILURE) - - # If there were failing tests, the status should be an error (even if the - # test command didn't report as such). - for failing_code in ('FAIL', 'XPASS', 'KPASS', 'UNRESOLVED'): - cmd = self.parse_log("""%s: test-one (1 of 1)""" % (failing_code,)) - self.assertEqual(cmd.evaluateCommand(RemoteCommandProxy(0)), FAILURE) - - def test_max_logs(self): - cmd = self.parse_log(""" -FAIL: test-one (1 of 2) -FAIL: test-two (2 of 2) -""", max_logs=1) - self.assertEqual(cmd.logObserver.step.logs, [('test-one', 'FAIL: test-one')]) if __name__ == '__main__': unittest.main()