diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-16 18:13:00 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2019-02-21 12:31:50 +0000 |
commit | ff2c029b568f70aa9960dde04ddd207829812ea0 (patch) | |
tree | 5d80afe2e19d699d58cb424fe8dd97b3294f47f7 /scripts/lib/resulttool/report.py | |
parent | f24dc9e87085a8fe5410feee10c7a3591fe9d816 (diff) | |
download | openembedded-core-contrib-ff2c029b568f70aa9960dde04ddd207829812ea0.tar.gz |
resulttool: Improvements to allow integration to the autobuilder
This is a combined patch of the various tweaks and improvements I
made to resulttool:
* Avoid subprocess.run() as its a python 3.6 feature and we
have autobuilder workers with 3.5.
* Avoid python keywords as variable names
* Simplify dict accesses using .get()
* Rename resultsutils -> resultutils to match the resultstool ->
resulttool rename
* Formalised the handling of "file_name" to "TESTSERIES" which the code
will now add into the json configuration data if its not present, based
on the directory name.
* When we don't have failed test cases, print something saying so
instead of an empty table
* Tweak the table headers in the report to be more readable (reference
"Test Series" instead if file_id and ID instead of results_id)
* Improve/simplify the max string length handling
* Merge the counts and percentage data into one table in the report
since printing two reports of the same data confuses the user
* Removed the confusing header in the regression report
* Show matches, then regressions, then unmatched runs in the regression
report, also remove chatting unneeded output
* Try harder to "pair" up matching configurations to reduce noise in
the regressions report
* Abstracted the "mapping" table concept used to pairing in the
regression code to general code in resultutils
* Created multiple mappings for results analysis, results storage and
'flattening' results data in a merge
* Simplify the merge command to take a source and a destination,
letting the destination be a directory or a file, removing the need for
an output directory parameter
* Add the 'IMAGE_PKGTYPE' and 'DISTRO' config options to the regression
mappings
* Have the store command place the testresults files in a layout from
the mapping, making commits into the git repo for results storage more
useful for simple comparison purposes
* Set the oe-git-archive tag format appropriately for oeqa results
storage (and simplify the commit messages closer to their defaults)
* Fix oe-git-archive to use the commit/branch data from the results file
* Cleaned up the command option help to match other changes
* Follow the model of git branch/tag processing used by oe-build-perf-report
and use that to read the data using git show to avoid branch change
* Add ptest summary to the report command
* Update the tests to match the above changes
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'scripts/lib/resulttool/report.py')
-rw-r--r-- | scripts/lib/resulttool/report.py | 157 |
1 files changed, 89 insertions, 68 deletions
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index ab5de1f3a7..2f5ea308e2 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py @@ -1,6 +1,7 @@ # test result tool - report text based test results # # Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -14,100 +15,120 @@ import os import glob import json -from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files +import resulttool.resultutils as resultutils +from oeqa.utils.git import GitRepo +import oeqa.utils.gitarchive as gitarchive + class ResultsTextReport(object): + def __init__(self): + self.ptests = {} + self.result_types = {'passed': ['PASSED', 'passed'], + 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], + 'skipped': ['SKIPPED', 'skipped']} + + + def handle_ptest_result(self, k, status, result): + if k == 'ptestresult.sections': + return + _, suite, test = k.split(".", 2) + # Handle 'glib-2.0' + if suite not in result['ptestresult.sections']: + try: + _, suite, suite1, test = k.split(".", 3) + if suite + "." + suite1 in result['ptestresult.sections']: + suite = suite + "." + suite1 + except ValueError: + pass + if suite not in self.ptests: + self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} + for tk in self.result_types: + if status in self.result_types[tk]: + self.ptests[suite][tk] += 1 + if suite in result['ptestresult.sections']: + if 'duration' in result['ptestresult.sections'][suite]: + self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration'] + if 'timeout' in result['ptestresult.sections'][suite]: + self.ptests[suite]['duration'] += " T" def get_aggregated_test_result(self, logger, testresult): test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} - result_types = {'passed': ['PASSED', 'passed'], - 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], - 'skipped': ['SKIPPED', 'skipped']} - result = get_dict_value(logger, testresult, 'result') + result = testresult.get('result', []) for k in result: - test_status = get_dict_value(logger, result[k], 'status') - for tk in result_types: - if test_status in result_types[tk]: + test_status = result[k].get('status', []) + for tk in self.result_types: + if test_status in self.result_types[tk]: test_count_report[tk] += 1 - if test_status in result_types['failed']: + if test_status in self.result_types['failed']: test_count_report['failed_testcases'].append(k) + if k.startswith("ptestresult."): + self.handle_ptest_result(k, test_status, result) return test_count_report - def get_test_result_percentage(self, test_result_count): - total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped'] - test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0} - for k in test_percent_report: - test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f') - return test_percent_report - - def add_test_configurations(self, test_report, source_dir, file, result_id): - test_report['file_dir'] = self._get_short_file_dir(source_dir, file) - test_report['result_id'] = result_id - test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id']) - - def _get_short_file_dir(self, source_dir, file): - file_dir = os.path.dirname(file) - source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir - if file_dir == source_dir: - return 'None' - return file_dir.replace(source_dir, '') - - def get_max_string_len(self, test_result_list, key, default_max_len): - max_len = default_max_len - for test_result in test_result_list: - value_len = len(test_result[key]) - if value_len > max_len: - max_len = value_len - return max_len - - def print_test_report(self, template_file_name, test_count_reports, test_percent_reports, - max_len_dir, max_len_result_id): + def print_test_report(self, template_file_name, test_count_reports): from jinja2 import Environment, FileSystemLoader script_path = os.path.dirname(os.path.realpath(__file__)) file_loader = FileSystemLoader(script_path + '/template') env = Environment(loader=file_loader, trim_blocks=True) template = env.get_template(template_file_name) - output = template.render(test_count_reports=test_count_reports, - test_percent_reports=test_percent_reports, - max_len_dir=max_len_dir, - max_len_result_id=max_len_result_id) - print('Printing text-based test report:') + havefailed = False + haveptest = bool(self.ptests) + reportvalues = [] + cols = ['passed', 'failed', 'skipped'] + maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 } + for line in test_count_reports: + total_tested = line['passed'] + line['failed'] + line['skipped'] + vals = {} + vals['result_id'] = line['result_id'] + vals['testseries'] = line['testseries'] + vals['sort'] = line['testseries'] + "_" + line['result_id'] + vals['failed_testcases'] = line['failed_testcases'] + for k in cols: + vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + for k in maxlen: + if k in vals and len(vals[k]) > maxlen[k]: + maxlen[k] = len(vals[k]) + reportvalues.append(vals) + if line['failed_testcases']: + havefailed = True + for ptest in self.ptests: + if len(ptest) > maxlen['ptest']: + maxlen['ptest'] = len(ptest) + output = template.render(reportvalues=reportvalues, + havefailed=havefailed, + haveptest=haveptest, + ptests=self.ptests, + maxlen=maxlen) print(output) - def view_test_report(self, logger, source_dir, git_branch): - if git_branch: - checkout_git_dir(source_dir, git_branch) + def view_test_report(self, logger, source_dir, tag): test_count_reports = [] - test_percent_reports = [] - for file in get_directory_files(source_dir, ['.git'], 'testresults.json'): - logger.debug('Computing result for test result file: %s' % file) - testresults = load_json_file(file) - for k in testresults: - test_count_report = self.get_aggregated_test_result(logger, testresults[k]) - test_percent_report = self.get_test_result_percentage(test_count_report) - self.add_test_configurations(test_count_report, source_dir, file, k) - self.add_test_configurations(test_percent_report, source_dir, file, k) + if tag: + repo = GitRepo(source_dir) + testresults = resultutils.git_get_result(repo, [tag]) + else: + testresults = resultutils.load_resultsdata(source_dir) + for testsuite in testresults: + for resultid in testresults[testsuite]: + result = testresults[testsuite][resultid] + test_count_report = self.get_aggregated_test_result(logger, result) + test_count_report['testseries'] = result['configuration']['TESTSERIES'] + test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) - test_percent_reports.append(test_percent_report) - max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir')) - max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id')) - self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports, - max_len_dir, max_len_result_id) + self.print_test_report('test_report_full_text.txt', test_count_reports) def report(args, logger): report = ResultsTextReport() - report.view_test_report(logger, args.source_dir, args.git_branch) + report.view_test_report(logger, args.source_dir, args.tag) return 0 def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('report', help='report test result summary', - description='report text-based test result summary from the source directory', + parser_build = subparsers.add_parser('report', help='summarise test results', + description='print a text-based summary of the test results', group='analysis') parser_build.set_defaults(func=report) parser_build.add_argument('source_dir', - help='source directory that contain the test result files for reporting') - parser_build.add_argument('-b', '--git-branch', default='', - help='(optional) default assume source directory contains all available files for ' - 'reporting unless a git branch was provided where it will try to checkout ' - 'the provided git branch assuming source directory was a git repository') + help='source file/directory that contain the test result files to summarise') + parser_build.add_argument('-t', '--tag', default='', + help='source_dir is a git repository, report on the tag specified from that repository') |