diff options
Diffstat (limited to 'meta/lib/oeqa/core/runner.py')
-rw-r--r-- | meta/lib/oeqa/core/runner.py | 286 |
1 files changed, 197 insertions, 89 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index 6650a63ce7..a86a706bd9 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py @@ -1,11 +1,16 @@ +# # Copyright (C) 2016 Intel Corporation -# Released under the MIT license (see COPYING.MIT) +# +# SPDX-License-Identifier: MIT +# import os import time import unittest import logging import re +import json +import sys from unittest import TextTestResult as _TestResult from unittest import TextTestRunner as _TestRunner @@ -38,12 +43,17 @@ class OETestResult(_TestResult): self.starttime = {} self.endtime = {} self.progressinfo = {} + self.extraresults = {} + self.shownmsg = [] # Inject into tc so that TestDepends decorator can see results tc.results = self self.tc = tc + # stdout and stderr for each test case + self.logged_output = {} + def startTest(self, test): # May have been set by concurrencytest if test.id() not in self.starttime: @@ -52,9 +62,21 @@ class OETestResult(_TestResult): def stopTest(self, test): self.endtime[test.id()] = time.time() + if self.buffer: + self.logged_output[test.id()] = ( + sys.stdout.getvalue(), sys.stderr.getvalue()) super(OETestResult, self).stopTest(test) if test.id() in self.progressinfo: - print(self.progressinfo[test.id()]) + self.tc.logger.info(self.progressinfo[test.id()]) + + # Print the errors/failures early to aid/speed debugging, its a pain + # to wait until selftest finishes to see them. + for t in ['failures', 'errors', 'skipped', 'expectedFailures']: + for (scase, msg) in getattr(self, t): + if test.id() == scase.id(): + self.tc.logger.info(str(msg)) + self.shownmsg.append(test.id()) + break def logSummary(self, component, context_msg=''): elapsed_time = self.tc._run_end_time - self.tc._run_start_time @@ -67,80 +89,155 @@ class OETestResult(_TestResult): msg = "%s - OK - All required tests passed" % component else: msg = "%s - FAIL - Required tests failed" % component - skipped = len(self.skipped) - if skipped: - msg += " (skipped=%d)" % skipped + msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors)) self.tc.logger.info(msg) - def _getDetailsNotPassed(self, case, type, desc): - found = False - - for (scase, msg) in getattr(self, type): - if case.id() == scase.id(): - found = True - break - scase_str = str(scase.id()) - - # When fails at module or class level the class name is passed as string - # so figure out to see if match - m = re.search("^setUpModule \((?P<module_name>.*)\)$", scase_str) - if m: - if case.__class__.__module__ == m.group('module_name'): - found = True - break - - m = re.search("^setUpClass \((?P<class_name>.*)\)$", scase_str) - if m: - class_name = "%s.%s" % (case.__class__.__module__, - case.__class__.__name__) - - if class_name == m.group('class_name'): + def _getTestResultDetails(self, case): + result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED', + 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED', + 'unexpectedSuccesses' : 'PASSED'} + + for rtype in result_types: + found = False + for resultclass in getattr(self, rtype): + # unexpectedSuccesses are just lists, not lists of tuples + if isinstance(resultclass, tuple): + scase, msg = resultclass + else: + scase, msg = resultclass, None + if case.id() == scase.id(): found = True break + scase_str = str(scase.id()) + + # When fails at module or class level the class name is passed as string + # so figure out to see if match + m = re.search(r"^setUpModule \((?P<module_name>.*)\).*$", scase_str) + if m: + if case.__class__.__module__ == m.group('module_name'): + found = True + break + + m = re.search(r"^setUpClass \((?P<class_name>.*)\).*$", scase_str) + if m: + class_name = "%s.%s" % (case.__class__.__module__, + case.__class__.__name__) + + if class_name == m.group('class_name'): + found = True + break + + if found: + return result_types[rtype], msg + + return 'UNKNOWN', None + + def extractExtraResults(self, test, details = None): + extraresults = None + if details is not None and "extraresults" in details: + extraresults = details.get("extraresults", {}) + elif hasattr(test, "extraresults"): + extraresults = test.extraresults + + if extraresults is not None: + for k, v in extraresults.items(): + # handle updating already existing entries (e.g. ptestresults.sections) + if k in self.extraresults: + self.extraresults[k].update(v) + else: + self.extraresults[k] = v - if found: - return (found, msg) + def addError(self, test, *args, details = None): + self.extractExtraResults(test, details = details) + return super(OETestResult, self).addError(test, *args) - return (found, None) + def addFailure(self, test, *args, details = None): + self.extractExtraResults(test, details = details) + return super(OETestResult, self).addFailure(test, *args) - def addSuccess(self, test): + def addSuccess(self, test, details = None): #Added so we can keep track of successes too self.successes.append((test, None)) - super(OETestResult, self).addSuccess(test) + self.extractExtraResults(test, details = details) + return super(OETestResult, self).addSuccess(test) - def logDetails(self): - self.tc.logger.info("RESULTS:") - for case_name in self.tc._registry['cases']: - case = self.tc._registry['cases'][case_name] + def addExpectedFailure(self, test, *args, details = None): + self.extractExtraResults(test, details = details) + return super(OETestResult, self).addExpectedFailure(test, *args) - result_types = ['failures', 'errors', 'skipped', 'expectedFailures', 'successes'] - result_desc = ['FAILED', 'ERROR', 'SKIPPED', 'EXPECTEDFAIL', 'PASSED'] + def addUnexpectedSuccess(self, test, details = None): + self.extractExtraResults(test, details = details) + return super(OETestResult, self).addUnexpectedSuccess(test) - fail = False - desc = None - for idx, name in enumerate(result_types): - (fail, msg) = self._getDetailsNotPassed(case, result_types[idx], - result_desc[idx]) - if fail: - desc = result_desc[idx] - break + def logDetails(self, json_file_dir=None, configuration=None, result_id=None, + dump_streams=False): + + result = self.extraresults + logs = {} + if hasattr(self.tc, "extraresults"): + result.update(self.tc.extraresults) + + for case_name in self.tc._registry['cases']: + case = self.tc._registry['cases'][case_name] - oeid = -1 - if hasattr(case, 'decorators'): - for d in case.decorators: - if hasattr(d, 'oeid'): - oeid = d.oeid + (status, log) = self._getTestResultDetails(case) t = "" + duration = 0 if case.id() in self.starttime and case.id() in self.endtime: - t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)" + duration = self.endtime[case.id()] - self.starttime[case.id()] + t = " (" + "{0:.2f}".format(duration) + "s)" + + if status not in logs: + logs[status] = [] + logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t)) + report = {'status': status} + if log: + report['log'] = log + # Class setup failures wouldn't enter stopTest so would never display + if case.id() not in self.shownmsg: + self.tc.logger.info("Failure (%s) for %s:\n" % (status, case.id()) + log) + + if duration: + report['duration'] = duration + + alltags = [] + # pull tags from the case class + if hasattr(case, "__oeqa_testtags"): + alltags.extend(getattr(case, "__oeqa_testtags")) + # pull tags from the method itself + test_name = case._testMethodName + if hasattr(case, test_name): + method = getattr(case, test_name) + if hasattr(method, "__oeqa_testtags"): + alltags.extend(getattr(method, "__oeqa_testtags")) + if alltags: + report['oetags'] = alltags + + if dump_streams and case.id() in self.logged_output: + (stdout, stderr) = self.logged_output[case.id()] + report['stdout'] = stdout + report['stderr'] = stderr + result[case.id()] = report - if fail: - self.tc.logger.info("RESULTS - %s - Testcase %s: %s%s" % (case.id(), - oeid, desc, t)) - else: - self.tc.logger.info("RESULTS - %s - Testcase %s: %s%s" % (case.id(), - oeid, 'UNKNOWN', t)) + self.tc.logger.info("RESULTS:") + for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']: + if i not in logs: + continue + for l in logs[i]: + self.tc.logger.info(l) + + if json_file_dir: + tresultjsonhelper = OETestResultJSONHelper() + tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result) + + def wasSuccessful(self): + # Override as we unexpected successes aren't failures for us + return (len(self.failures) == len(self.errors) == 0) + + def hasAnyFailingTest(self): + # Account for expected failures + return not self.wasSuccessful() or len(self.expectedFailures) class OEListTestsResult(object): def wasSuccessful(self): @@ -169,42 +266,20 @@ class OETestRunner(_TestRunner): self._walked_cases = self._walked_cases + 1 def _list_tests_name(self, suite): - from oeqa.core.decorator.oeid import OETestID - from oeqa.core.decorator.oetag import OETestTag - self._walked_cases = 0 - def _list_cases_without_id(logger, case): - - found_id = False - if hasattr(case, 'decorators'): - for d in case.decorators: - if isinstance(d, OETestID): - found_id = True - - if not found_id: - logger.info('oeid missing for %s' % case.id()) - def _list_cases(logger, case): - oeid = None - oetag = None - - if hasattr(case, 'decorators'): - for d in case.decorators: - if isinstance(d, OETestID): - oeid = d.oeid - elif isinstance(d, OETestTag): - oetag = d.oetag - - logger.info("%s\t%s\t\t%s" % (oeid, oetag, case.id())) - - self.tc.logger.info("Listing test cases that don't have oeid ...") - self._walk_suite(suite, _list_cases_without_id) - self.tc.logger.info("-" * 80) + oetags = [] + if hasattr(case, '__oeqa_testtags'): + oetags = getattr(case, '__oeqa_testtags') + if oetags: + logger.info("%s (%s)" % (case.id(), ",".join(oetags))) + else: + logger.info("%s" % (case.id())) self.tc.logger.info("Listing all available tests:") self._walked_cases = 0 - self.tc.logger.info("id\ttag\t\ttest") + self.tc.logger.info("test (tags)") self.tc.logger.info("-" * 80) self._walk_suite(suite, _list_cases) self.tc.logger.info("-" * 80) @@ -253,3 +328,36 @@ class OETestRunner(_TestRunner): self._list_tests_module(suite) return OEListTestsResult() + +class OETestResultJSONHelper(object): + + testresult_filename = 'testresults.json' + + def _get_existing_testresults_if_available(self, write_dir): + testresults = {} + file = os.path.join(write_dir, self.testresult_filename) + if os.path.exists(file): + with open(file, "r") as f: + testresults = json.load(f) + return testresults + + def _write_file(self, write_dir, file_name, file_content): + file_path = os.path.join(write_dir, file_name) + with open(file_path, 'w') as the_file: + the_file.write(file_content) + + def dump_testresult_file(self, write_dir, configuration, result_id, test_result): + try: + import bb + has_bb = True + bb.utils.mkdirhier(write_dir) + lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock')) + except ImportError: + has_bb = False + os.makedirs(write_dir, exist_ok=True) + test_results = self._get_existing_testresults_if_available(write_dir) + test_results[result_id] = {'configuration': configuration, 'result': test_result} + json_testresults = json.dumps(test_results, sort_keys=True, indent=4) + self._write_file(write_dir, self.testresult_filename, json_testresults) + if has_bb: + bb.utils.unlockfile(lf) |