diff options
Diffstat (limited to 'scripts/lib/resulttool')
-rw-r--r-- | scripts/lib/resulttool/junit.py | 77 | ||||
-rw-r--r-- | scripts/lib/resulttool/log.py | 107 | ||||
-rwxr-xr-x | scripts/lib/resulttool/manualexecution.py | 264 | ||||
-rw-r--r-- | scripts/lib/resulttool/merge.py | 89 | ||||
-rw-r--r-- | scripts/lib/resulttool/regression.py | 599 | ||||
-rw-r--r-- | scripts/lib/resulttool/report.py | 356 | ||||
-rw-r--r-- | scripts/lib/resulttool/resultsutils.py | 67 | ||||
-rw-r--r-- | scripts/lib/resulttool/resultutils.py | 228 | ||||
-rw-r--r-- | scripts/lib/resulttool/store.py | 160 | ||||
-rw-r--r-- | scripts/lib/resulttool/template/test_report_full_text.txt | 68 |
10 files changed, 1456 insertions, 559 deletions
diff --git a/scripts/lib/resulttool/junit.py b/scripts/lib/resulttool/junit.py new file mode 100644 index 0000000000..c7a53dc550 --- /dev/null +++ b/scripts/lib/resulttool/junit.py @@ -0,0 +1,77 @@ +# resulttool - report test results in JUnit XML format +# +# Copyright (c) 2024, Siemens AG. +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import os +import re +import xml.etree.ElementTree as ET +import resulttool.resultutils as resultutils + +def junit(args, logger): + testresults = resultutils.load_resultsdata(args.json_file, configmap=resultutils.store_map) + + total_time = 0 + skipped = 0 + failures = 0 + errors = 0 + + for tests in testresults.values(): + results = tests[next(reversed(tests))].get("result", {}) + + for result_id, result in results.items(): + # filter out ptestresult.rawlogs and ptestresult.sections + if re.search(r'\.test_', result_id): + total_time += result.get("duration", 0) + + if result['status'] == "FAILED": + failures += 1 + elif result['status'] == "ERROR": + errors += 1 + elif result['status'] == "SKIPPED": + skipped += 1 + + testsuites_node = ET.Element("testsuites") + testsuites_node.set("time", "%s" % total_time) + testsuite_node = ET.SubElement(testsuites_node, "testsuite") + testsuite_node.set("name", "Testimage") + testsuite_node.set("time", "%s" % total_time) + testsuite_node.set("tests", "%s" % len(results)) + testsuite_node.set("failures", "%s" % failures) + testsuite_node.set("errors", "%s" % errors) + testsuite_node.set("skipped", "%s" % skipped) + + for result_id, result in results.items(): + if re.search(r'\.test_', result_id): + testcase_node = ET.SubElement(testsuite_node, "testcase", { + "name": result_id, + "classname": "Testimage", + "time": str(result['duration']) + }) + if result['status'] == "SKIPPED": + ET.SubElement(testcase_node, "skipped", message=result['log']) + elif result['status'] == "FAILED": + ET.SubElement(testcase_node, "failure", message=result['log']) + elif result['status'] == "ERROR": + ET.SubElement(testcase_node, "error", message=result['log']) + + tree = ET.ElementTree(testsuites_node) + + if args.junit_xml_path is None: + args.junit_xml_path = os.environ['BUILDDIR'] + '/tmp/log/oeqa/junit.xml' + tree.write(args.junit_xml_path, encoding='UTF-8', xml_declaration=True) + + logger.info('Saved JUnit XML report as %s' % args.junit_xml_path) + +def register_commands(subparsers): + """Register subcommands from this plugin""" + parser_build = subparsers.add_parser('junit', help='create test report in JUnit XML format', + description='generate unit test report in JUnit XML format based on the latest test results in the testresults.json.', + group='analysis') + parser_build.set_defaults(func=junit) + parser_build.add_argument('json_file', + help='json file should point to the testresults.json') + parser_build.add_argument('-j', '--junit_xml_path', + help='junit xml path allows setting the path of the generated test report. The default location is <build_dir>/tmp/log/oeqa/junit.xml') diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py new file mode 100644 index 0000000000..15148ca288 --- /dev/null +++ b/scripts/lib/resulttool/log.py @@ -0,0 +1,107 @@ +# resulttool - Show logs +# +# Copyright (c) 2019 Garmin International +# +# SPDX-License-Identifier: GPL-2.0-only +# +import os +import resulttool.resultutils as resultutils + +def show_ptest(result, ptest, logger): + logdata = resultutils.ptestresult_get_log(result, ptest) + if logdata is not None: + print(logdata) + return 0 + + print("ptest '%s' log not found" % ptest) + return 1 + +def show_reproducible(result, reproducible, logger): + try: + print(result['reproducible'][reproducible]['diffoscope.text']) + return 0 + + except KeyError: + print("reproducible '%s' not found" % reproducible) + return 1 + +def log(args, logger): + results = resultutils.load_resultsdata(args.source) + + for _, run_name, _, r in resultutils.test_run_results(results): + if args.list_ptest: + print('\n'.join(sorted(r['ptestresult.sections'].keys()))) + + if args.dump_ptest: + for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']: + if sectname in r: + for name, ptest in r[sectname].items(): + logdata = resultutils.generic_get_log(sectname, r, name) + if logdata is not None: + dest_dir = args.dump_ptest + if args.prepend_run: + dest_dir = os.path.join(dest_dir, run_name) + if not sectname.startswith("ptest"): + dest_dir = os.path.join(dest_dir, sectname.split(".")[0]) + + os.makedirs(dest_dir, exist_ok=True) + dest = os.path.join(dest_dir, '%s.log' % name) + if os.path.exists(dest): + print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name) + continue + print(dest) + with open(dest, 'w') as f: + f.write(logdata) + + if args.raw_ptest: + found = False + for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']: + rawlog = resultutils.generic_get_rawlogs(sectname, r) + if rawlog is not None: + print(rawlog) + found = True + if not found: + print('Raw ptest logs not found') + return 1 + + if args.raw_reproducible: + if 'reproducible.rawlogs' in r: + print(r['reproducible.rawlogs']['log']) + else: + print('Raw reproducible logs not found') + return 1 + + for ptest in args.ptest: + if not show_ptest(r, ptest, logger): + return 1 + + for reproducible in args.reproducible: + if not show_reproducible(r, reproducible, logger): + return 1 + +def register_commands(subparsers): + """Register subcommands from this plugin""" + parser = subparsers.add_parser('log', help='show logs', + description='show the logs from test results', + group='analysis') + parser.set_defaults(func=log) + parser.add_argument('source', + help='the results file/directory/URL to import') + parser.add_argument('--list-ptest', action='store_true', + help='list the ptest test names') + parser.add_argument('--ptest', action='append', default=[], + help='show logs for a ptest') + parser.add_argument('--dump-ptest', metavar='DIR', + help='Dump all ptest log files to the specified directory.') + parser.add_argument('--reproducible', action='append', default=[], + help='show logs for a reproducible test') + parser.add_argument('--prepend-run', action='store_true', + help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest. + Required if more than one test run is present in the result file''') + parser.add_argument('--raw', action='store_true', + help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest') + parser.add_argument('--raw-ptest', action='store_true', + help='show raw ptest log') + parser.add_argument('--raw-reproducible', action='store_true', + help='show raw reproducible build logs') + diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py index 64ec581a9f..ecb27c5933 100755 --- a/scripts/lib/resulttool/manualexecution.py +++ b/scripts/lib/resulttool/manualexecution.py @@ -2,130 +2,220 @@ # # Copyright (c) 2018, Intel Corporation. # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. +# SPDX-License-Identifier: GPL-2.0-only # + import argparse import json import os import sys import datetime import re +import copy from oeqa.core.runner import OETestResultJSONHelper -from resulttool.resultsutils import load_json_file + + +def load_json_file(f): + with open(f, "r") as filedata: + return json.load(filedata) + +def write_json_file(f, json_data): + os.makedirs(os.path.dirname(f), exist_ok=True) + with open(f, 'w') as filedata: + filedata.write(json.dumps(json_data, sort_keys=True, indent=4)) class ManualTestRunner(object): - def __init__(self): - self.jdata = '' - self.test_module = '' - self.test_suite = '' - self.test_cases = '' - self.configuration = '' - self.starttime = '' - self.result_id = '' - self.write_dir = '' - - def _get_testcases(self, file): - self.jdata = load_json_file(file) - self.test_cases = [] - self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0] - self.test_suite = self.jdata[0]['test']['@alias'].split('.', 2)[1] - for i in self.jdata: - self.test_cases.append(i['test']['@alias'].split('.', 2)[2]) - + + def _get_test_module(self, case_file): + return os.path.basename(case_file).split('.')[0] + def _get_input(self, config): while True: output = input('{} = '.format(config)) - if re.match('^[a-zA-Z0-9_]+$', output): + if re.match('^[a-z0-9-.]+$', output): break - print('Only alphanumeric and underscore are allowed. Please try again') + print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again') return output - def _create_config(self): - self.configuration = {} + def _get_available_config_options(self, config_options, test_module, target_config): + avail_config_options = None + if test_module in config_options: + avail_config_options = config_options[test_module].get(target_config) + return avail_config_options + + def _choose_config_option(self, options): while True: - try: - conf_total = int(input('\nPlease provide how many configuration you want to save \n')) + output = input('{} = '.format('Option index number')) + if output in options: break - except ValueError: - print('Invalid input. Please provide input as a number not character.') - for i in range(conf_total): - print('---------------------------------------------') - print('This is configuration #%s ' % (i + 1) + '. Please provide configuration name and its value') - print('---------------------------------------------') - name_conf = self._get_input('Configuration Name') - value_conf = self._get_input('Configuration Value') - print('---------------------------------------------\n') - self.configuration[name_conf.upper()] = value_conf - current_datetime = datetime.datetime.now() - self.starttime = current_datetime.strftime('%Y%m%d%H%M%S') - self.configuration['STARTTIME'] = self.starttime - self.configuration['TEST_TYPE'] = self.test_module - - def _create_result_id(self): - self.result_id = 'manual_' + self.test_module + '_' + self.starttime - - def _execute_test_steps(self, test_id): + print('Only integer index inputs from above available configuration options are allowed. Please try again.') + return options[output] + + def _get_config(self, config_options, test_module): + from oeqa.utils.metadata import get_layers + from oeqa.utils.commands import get_bb_var + from resulttool.resultutils import store_map + + layers = get_layers(get_bb_var('BBLAYERS')) + configurations = {} + configurations['LAYERS'] = layers + configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + configurations['TEST_TYPE'] = 'manual' + configurations['TEST_MODULE'] = test_module + + extra_config = set(store_map['manual']) - set(configurations) + for config in sorted(extra_config): + avail_config_options = self._get_available_config_options(config_options, test_module, config) + if avail_config_options: + print('---------------------------------------------') + print('These are available configuration #%s options:' % config) + print('---------------------------------------------') + for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])): + print('%s: %s' % (option, avail_config_options[option])) + print('Please select configuration option, enter the integer index number.') + value_conf = self._choose_config_option(avail_config_options) + print('---------------------------------------------\n') + else: + print('---------------------------------------------') + print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config) + print('---------------------------------------------') + value_conf = self._get_input('Configuration Value') + print('---------------------------------------------\n') + configurations[config] = value_conf + return configurations + + def _execute_test_steps(self, case): test_result = {} - testcase_id = self.test_module + '.' + self.test_suite + '.' + self.test_cases[test_id] - total_steps = len(self.jdata[test_id]['test']['execution'].keys()) print('------------------------------------------------------------------------') - print('Executing test case:' + '' '' + self.test_cases[test_id]) + print('Executing test case: %s' % case['test']['@alias']) print('------------------------------------------------------------------------') - print('You have total ' + str(total_steps) + ' test steps to be executed.') + print('You have total %s test steps to be executed.' % len(case['test']['execution'])) print('------------------------------------------------------------------------\n') - for step in sorted((self.jdata[test_id]['test']['execution']).keys()): - print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action']) - print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results']) - done = input('\nPlease press ENTER when you are done to proceed to next step.\n') + for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])): + print('Step %s: %s' % (step, case['test']['execution'][step]['action'])) + expected_output = case['test']['execution'][step]['expected_results'] + if expected_output: + print('Expected output: %s' % expected_output) while True: - done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n') - done = done.lower() + done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower() result_types = {'p':'PASSED', - 'f':'FAILED', - 'b':'BLOCKED', - 's':'SKIPPED'} + 'f':'FAILED', + 'b':'BLOCKED', + 's':'SKIPPED'} if done in result_types: for r in result_types: if done == r: res = result_types[r] if res == 'FAILED': log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n') - test_result.update({testcase_id: {'status': '%s' % res, 'log': '%s' % log_input}}) + test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}}) else: - test_result.update({testcase_id: {'status': '%s' % res}}) + test_result.update({case['test']['@alias']: {'status': '%s' % res}}) break print('Invalid input!') return test_result - def _create_write_dir(self): - basepath = os.environ['BUILDDIR'] - self.write_dir = basepath + '/tmp/log/manual/' + def _get_write_dir(self): + return os.environ['BUILDDIR'] + '/tmp/log/manual/' - def run_test(self, file): - self._get_testcases(file) - self._create_config() - self._create_result_id() - self._create_write_dir() + def run_test(self, case_file, config_options_file, testcase_config_file): + test_module = self._get_test_module(case_file) + cases = load_json_file(case_file) + config_options = {} + if config_options_file: + config_options = load_json_file(config_options_file) + configurations = self._get_config(config_options, test_module) + result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME']) test_results = {} - print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata)) - for i in range(0, len(self.jdata)): - test_result = self._execute_test_steps(i) + if testcase_config_file: + test_case_config = load_json_file(testcase_config_file) + test_case_to_execute = test_case_config['testcases'] + for case in copy.deepcopy(cases) : + if case['test']['@alias'] not in test_case_to_execute: + cases.remove(case) + + print('\nTotal number of test cases in this test suite: %s\n' % len(cases)) + for c in cases: + test_result = self._execute_test_steps(c) test_results.update(test_result) - return self.configuration, self.result_id, self.write_dir, test_results + return configurations, result_id, self._get_write_dir(), test_results + + def _get_true_false_input(self, input_message): + yes_list = ['Y', 'YES'] + no_list = ['N', 'NO'] + while True: + more_config_option = input(input_message).upper() + if more_config_option in yes_list or more_config_option in no_list: + break + print('Invalid input!') + if more_config_option in no_list: + return False + return True + + def make_config_option_file(self, logger, case_file, config_options_file): + config_options = {} + if config_options_file: + config_options = load_json_file(config_options_file) + new_test_module = self._get_test_module(case_file) + print('Creating configuration options file for test module: %s' % new_test_module) + new_config_options = {} + + while True: + config_name = input('\nPlease provide test configuration to create:\n').upper() + new_config_options[config_name] = {} + while True: + config_value = self._get_input('Configuration possible option value') + config_option_index = len(new_config_options[config_name]) + 1 + new_config_options[config_name][config_option_index] = config_value + more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n') + if not more_config_option: + break + more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n') + if not more_config: + break + + if new_config_options: + config_options[new_test_module] = new_config_options + if not config_options_file: + config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json') + write_json_file(config_options_file, config_options) + logger.info('Configuration option file created at %s' % config_options_file) + + def make_testcase_config_file(self, logger, case_file, testcase_config_file): + if testcase_config_file: + if os.path.exists(testcase_config_file): + print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file)) + return 0 + + if not testcase_config_file: + testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json") + + testcase_config = {} + cases = load_json_file(case_file) + new_test_module = self._get_test_module(case_file) + new_testcase_config = {} + new_testcase_config['testcases'] = [] + + print('\nAdd testcases for this configuration file:') + for case in cases: + print('\n' + case['test']['@alias']) + add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n') + if add_tc_config: + new_testcase_config['testcases'].append(case['test']['@alias']) + write_json_file(testcase_config_file, new_testcase_config) + logger.info('Testcase Configuration file created at %s' % testcase_config_file) def manualexecution(args, logger): testrunner = ManualTestRunner() - get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file) + if args.make_config_options_file: + testrunner.make_config_option_file(logger, args.file, args.config_options_file) + return 0 + if args.make_testcase_config_file: + testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file) + return 0 + configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file) resultjsonhelper = OETestResultJSONHelper() - resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, - get_test_results) + resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results) return 0 def register_commands(subparsers): @@ -134,4 +224,12 @@ def register_commands(subparsers): description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/', group='manualexecution') parser_build.set_defaults(func=manualexecution) - parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
\ No newline at end of file + parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.') + parser_build.add_argument('-c', '--config-options-file', default='', + help='the config options file to import and used as available configuration option selection or make config option file') + parser_build.add_argument('-m', '--make-config-options-file', action='store_true', + help='make the configuration options file based on provided inputs') + parser_build.add_argument('-t', '--testcase-config-file', default='', + help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file') + parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true', + help='make the testcase configuration file to run a set of test cases based on user selection')
\ No newline at end of file diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py index 1d9cfafd41..18b4825a18 100644 --- a/scripts/lib/resulttool/merge.py +++ b/scripts/lib/resulttool/merge.py @@ -1,71 +1,46 @@ -# test result tool - merge multiple testresults.json files +# resulttool - merge multiple testresults.json files into a file or directory # # Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. +# SPDX-License-Identifier: GPL-2.0-only # -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -from resulttool.resultsutils import load_json_file, get_dict_value, dump_json_data + import os import json - -class ResultsMerge(object): - - def get_test_results(self, logger, file, result_id): - results = load_json_file(file) - if result_id: - result = get_dict_value(logger, results, result_id) - if result: - return {result_id: result} - return result - return results - - def merge_results(self, base_results, target_results): - for k in target_results: - base_results[k] = target_results[k] - return base_results - - def _get_write_dir(self): - basepath = os.environ['BUILDDIR'] - return basepath + '/tmp/' - - def dump_merged_results(self, results, output_dir): - file_output_dir = output_dir if output_dir else self._get_write_dir() - dump_json_data(file_output_dir, 'testresults.json', results) - print('Successfully merged results to: %s' % os.path.join(file_output_dir, 'testresults.json')) - - def run(self, logger, base_result_file, target_result_file, target_result_id, output_dir): - base_results = self.get_test_results(logger, base_result_file, '') - target_results = self.get_test_results(logger, target_result_file, target_result_id) - if base_results and target_results: - merged_results = self.merge_results(base_results, target_results) - self.dump_merged_results(merged_results, output_dir) +import resulttool.resultutils as resultutils def merge(args, logger): - merge = ResultsMerge() - merge.run(logger, args.base_result_file, args.target_result_file, args.target_result_id, args.output_dir) + configvars = {} + if not args.not_add_testseries: + configvars = resultutils.extra_configvars.copy() + if args.executed_by: + configvars['EXECUTED_BY'] = args.executed_by + if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results): + results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars) + resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars) + resultutils.save_resultsdata(results, args.target_results) + else: + results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars) + if os.path.exists(args.target_results): + resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars) + resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results)) + + logger.info('Merged results to %s' % os.path.dirname(args.target_results)) + return 0 def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('merge', help='merge test results', - description='merge results from multiple files', + parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs', + description='merge the results from multiple files/directories/URLs into the target file or directory', group='setup') parser_build.set_defaults(func=merge) - parser_build.add_argument('base_result_file', - help='base result file provide the base result set') - parser_build.add_argument('target_result_file', - help='target result file provide the target result set for merging into the ' - 'base result set') - parser_build.add_argument('-t', '--target-result-id', default='', - help='(optional) default merge all result sets available from target to base ' - 'unless specific target result id was provided') - parser_build.add_argument('-o', '--output-dir', default='', - help='(optional) default write merged results to <poky>/build/tmp/ unless specific ' - 'output directory was provided') + parser_build.add_argument('base_results', + help='the results file/directory/URL to import') + parser_build.add_argument('target_results', + help='the target file or directory to merge the base_results with') + parser_build.add_argument('-t', '--not-add-testseries', action='store_true', + help='do not add testseries configuration to results') + parser_build.add_argument('-x', '--executed-by', default='', + help='add executed-by configuration to each result file') diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py index bee3fb011a..10e7d13841 100644 --- a/scripts/lib/resulttool/regression.py +++ b/scripts/lib/resulttool/regression.py @@ -1,181 +1,435 @@ -# test result tool - regression analysis +# resulttool - regression analysis # # Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. +# SPDX-License-Identifier: GPL-2.0-only # -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -from resulttool.resultsutils import load_json_file, get_dict_value, pop_dict_element -import json - -class ResultsRegressionSelector(object): - - def get_results_unique_configurations(self, logger, results): - unique_configurations_map = {"oeselftest": ['TEST_TYPE', 'HOST_DISTRO', 'MACHINE'], - "runtime": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE'], - "sdk": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], - "sdkext": ['TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE']} - results_unique_configs = {} - for k in results: - result = results[k] - result_configs = get_dict_value(logger, result, 'configuration') - result_test_type = get_dict_value(logger, result_configs, 'TEST_TYPE') - unique_configuration_keys = get_dict_value(logger, unique_configurations_map, result_test_type) - result_unique_config = {} - for ck in unique_configuration_keys: - config_value = get_dict_value(logger, result_configs, ck) - if config_value: - result_unique_config[ck] = config_value - results_unique_configs[k] = result_unique_config - return results_unique_configs - - def get_regression_base_target_pair(self, logger, base_results, target_results): - base_configs = self.get_results_unique_configurations(logger, base_results) - logger.debug('Retrieved base configuration: config=%s' % base_configs) - target_configs = self.get_results_unique_configurations(logger, target_results) - logger.debug('Retrieved target configuration: config=%s' % target_configs) - regression_pair = {} - for bk in base_configs: - base_config = base_configs[bk] - for tk in target_configs: - target_config = target_configs[tk] - if base_config == target_config: - if bk in regression_pair: - regression_pair[bk].append(tk) - else: - regression_pair[bk] = [tk] - return regression_pair - - def run_regression_with_regression_pairing(self, logger, regression_pair, base_results, target_results): - regression = ResultsRegression() - for base in regression_pair: - for target in regression_pair[base]: - print('Getting regression for base=%s target=%s' % (base, target)) - regression.run(logger, base_results[base], target_results[target]) - -class ResultsRegression(object): - - def print_regression_result(self, result): - if result: - print('============================Start Regression============================') - print('Only print regression if base status not equal target') - print('<test case> : <base status> -> <target status>') - print('========================================================================') - for k in result: - print(k, ':', result[k]['base'], '->', result[k]['target']) - print('==============================End Regression==============================') - - def get_regression_result(self, logger, base_result, target_result): - base_result = get_dict_value(logger, base_result, 'result') - target_result = get_dict_value(logger, target_result, 'result') - result = {} - if base_result and target_result: - logger.debug('Getting regression result') - for k in base_result: - base_testcase = base_result[k] - base_status = get_dict_value(logger, base_testcase, 'status') - if base_status: - target_testcase = get_dict_value(logger, target_result, k) - target_status = get_dict_value(logger, target_testcase, 'status') - if base_status != target_status: - result[k] = {'base': base_status, 'target': target_status} - else: - logger.error('Failed to retrieved base test case status: %s' % k) - return result - - def run(self, logger, base_result, target_result): - if base_result and target_result: - result = self.get_regression_result(logger, base_result, target_result) - logger.debug('Retrieved regression result =%s' % result) - self.print_regression_result(result) + +import resulttool.resultutils as resultutils + +from oeqa.utils.git import GitRepo +import oeqa.utils.gitarchive as gitarchive + +METADATA_MATCH_TABLE = { + "oeselftest": "OESELFTEST_METADATA" +} + +OESELFTEST_METADATA_GUESS_TABLE={ + "trigger-build-posttrigger": { + "run_all_tests": False, + "run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"], + "skips": None, + "machine": None, + "select_tags":None, + "exclude_tags": None + }, + "reproducible": { + "run_all_tests": False, + "run_tests":["reproducible"], + "skips": None, + "machine": None, + "select_tags":None, + "exclude_tags": None + }, + "arch-qemu-quick": { + "run_all_tests": True, + "run_tests":None, + "skips": None, + "machine": None, + "select_tags":["machine"], + "exclude_tags": None + }, + "arch-qemu-full-x86-or-x86_64": { + "run_all_tests": True, + "run_tests":None, + "skips": None, + "machine": None, + "select_tags":["machine", "toolchain-system"], + "exclude_tags": None + }, + "arch-qemu-full-others": { + "run_all_tests": True, + "run_tests":None, + "skips": None, + "machine": None, + "select_tags":["machine", "toolchain-user"], + "exclude_tags": None + }, + "selftest": { + "run_all_tests": True, + "run_tests":None, + "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"], + "machine": None, + "select_tags":None, + "exclude_tags": ["machine", "toolchain-system", "toolchain-user"] + }, + "bringup": { + "run_all_tests": True, + "run_tests":None, + "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"], + "machine": None, + "select_tags":None, + "exclude_tags": ["machine", "toolchain-system", "toolchain-user"] + } +} + +STATUS_STRINGS = { + "None": "No matching test result" +} + +REGRESSIONS_DISPLAY_LIMIT=50 + +MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------" +ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------" + +def test_has_at_least_one_matching_tag(test, tag_list): + return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"]) + +def all_tests_have_at_least_one_matching_tag(results, tag_list): + return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items()) + +def any_test_have_any_matching_tag(results, tag_list): + return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values()) + +def have_skipped_test(result, test_prefix): + return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix)) + +def have_all_tests_skipped(result, test_prefixes_list): + return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list) + +def guess_oeselftest_metadata(results): + """ + When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content. + Check results for specific values (absence/presence of oetags, number and name of executed tests...), + and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA + to it to allow proper test filtering. + This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less, + as new tests will have OESELFTEST_METADATA properly appended at test reporting time + """ + + if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results: + return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger'] + elif all(result.startswith("reproducible") for result in results): + return OESELFTEST_METADATA_GUESS_TABLE['reproducible'] + elif all_tests_have_at_least_one_matching_tag(results, ["machine"]): + return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick'] + elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]): + return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64'] + elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]): + return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others'] + elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]): + if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]): + return OESELFTEST_METADATA_GUESS_TABLE['selftest'] + elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]): + return OESELFTEST_METADATA_GUESS_TABLE['bringup'] + + return None + + +def metadata_matches(base_configuration, target_configuration): + """ + For passed base and target, check test type. If test type matches one of + properties described in METADATA_MATCH_TABLE, compare metadata if it is + present in base. Return true if metadata matches, or if base lacks some + data (either TEST_TYPE or the corresponding metadata) + """ + test_type = base_configuration.get('TEST_TYPE') + if test_type not in METADATA_MATCH_TABLE: + return True + + metadata_key = METADATA_MATCH_TABLE.get(test_type) + if target_configuration.get(metadata_key) != base_configuration.get(metadata_key): + return False + + return True + + +def machine_matches(base_configuration, target_configuration): + return base_configuration.get('MACHINE') == target_configuration.get('MACHINE') + + +def can_be_compared(logger, base, target): + """ + Some tests are not relevant to be compared, for example some oeselftest + run with different tests sets or parameters. Return true if tests can be + compared + """ + ret = True + base_configuration = base['configuration'] + target_configuration = target['configuration'] + + # Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results. + if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration: + guess = guess_oeselftest_metadata(base['result']) + if guess is None: + logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}") else: - logger.error('Input data objects must not be empty (base_result=%s, target_result=%s)' % - (base_result, target_result)) + logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}") + base_configuration['OESELFTEST_METADATA'] = guess + if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration: + guess = guess_oeselftest_metadata(target['result']) + if guess is None: + logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}") + else: + logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}") + target_configuration['OESELFTEST_METADATA'] = guess -def get_results_from_directory(logger, source_dir): - from resulttool.merge import ResultsMerge - from resulttool.resultsutils import get_directory_files - result_files = get_directory_files(source_dir, ['.git'], 'testresults.json') - base_results = {} - for file in result_files: - merge = ResultsMerge() - results = merge.get_test_results(logger, file, '') - base_results = merge.merge_results(base_results, results) - return base_results - -def remove_testcases_to_optimize_regression_runtime(logger, results): - test_case_removal = ['ptestresult.rawlogs', 'ptestresult.sections'] - for r in test_case_removal: - for k in results: - result = get_dict_value(logger, results[k], 'result') - pop_dict_element(logger, result, r) - -def regression_file(args, logger): - base_results = load_json_file(args.base_result_file) - print('Successfully loaded base test results from: %s' % args.base_result_file) - target_results = load_json_file(args.target_result_file) - print('Successfully loaded target test results from: %s' % args.target_result_file) - remove_testcases_to_optimize_regression_runtime(logger, base_results) - remove_testcases_to_optimize_regression_runtime(logger, target_results) - if args.base_result_id and args.target_result_id: - base_result = get_dict_value(logger, base_results, base_result_id) - print('Getting base test result with result_id=%s' % base_result_id) - target_result = get_dict_value(logger, target_results, target_result_id) - print('Getting target test result with result_id=%s' % target_result_id) - regression = ResultsRegression() - regression.run(logger, base_result, target_result) + # Test runs with LTP results in should only be compared with other runs with LTP tests in them + if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']): + ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result']) + + return ret and metadata_matches(base_configuration, target_configuration) \ + and machine_matches(base_configuration, target_configuration) + +def get_status_str(raw_status): + raw_status_lower = raw_status.lower() if raw_status else "None" + return STATUS_STRINGS.get(raw_status_lower, raw_status) + +def get_additional_info_line(new_pass_count, new_tests): + result=[] + if new_tests: + result.append(f'+{new_tests} test(s) present') + if new_pass_count: + result.append(f'+{new_pass_count} test(s) now passing') + + if not result: + return "" + + return ' -> ' + ', '.join(result) + '\n' + +def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None): + base_result = base_result.get('result') + target_result = target_result.get('result') + result = {} + new_tests = 0 + regressions = {} + resultstring = "" + new_tests = 0 + new_pass_count = 0 + + display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT + + if base_result and target_result: + for k in base_result: + base_testcase = base_result[k] + base_status = base_testcase.get('status') + if base_status: + target_testcase = target_result.get(k, {}) + target_status = target_testcase.get('status') + if base_status != target_status: + result[k] = {'base': base_status, 'target': target_status} + else: + logger.error('Failed to retrieved base test case status: %s' % k) + + # Also count new tests that were not present in base results: it + # could be newly added tests, but it could also highlights some tests + # renames or fixed faulty ptests + for k in target_result: + if k not in base_result: + new_tests += 1 + if result: + new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values()) + # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...) + if new_pass_count < len(result): + resultstring = "Regression: %s\n %s\n" % (base_name, target_name) + for k in sorted(result): + if not result[k]['target'] or not result[k]['target'].startswith("PASS"): + # Differentiate each ptest kind when listing regressions + key_parts = k.split('.') + key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0] + # Append new regression to corresponding test family + regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))] + resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n" + for k in regressions: + resultstring += f" {len(regressions[k])} regression(s) for {k}\n" + count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k]) + resultstring += ''.join(regressions[k][:count_to_print]) + if count_to_print < len(regressions[k]): + resultstring+=' [...]\n' + if new_pass_count > 0: + resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n' + if new_tests > 0: + resultstring += f' Additionally, {new_tests} new test(s) is/are present\n' + else: + resultstring = "%s\n%s\n" % (base_name, target_name) + result = None else: - regression = ResultsRegressionSelector() - regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) - logger.debug('Retrieved regression pair=%s' % regression_pair) - regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) - return 0 + resultstring = "%s\n%s\n" % (base_name, target_name) + + if not result: + additional_info = get_additional_info_line(new_pass_count, new_tests) + if additional_info: + resultstring += additional_info -def regression_directory(args, logger): - base_results = get_results_from_directory(logger, args.base_result_directory) - target_results = get_results_from_directory(logger, args.target_result_directory) - remove_testcases_to_optimize_regression_runtime(logger, base_results) - remove_testcases_to_optimize_regression_runtime(logger, target_results) - regression = ResultsRegressionSelector() - regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) - logger.debug('Retrieved regression pair=%s' % regression_pair) - regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) + return result, resultstring + +def get_results(logger, source): + return resultutils.load_resultsdata(source, configmap=resultutils.regression_map) + +def regression(args, logger): + base_results = get_results(logger, args.base_result) + target_results = get_results(logger, args.target_result) + + regression_common(args, logger, base_results, target_results) + +# Some test case naming is poor and contains random strings, particularly lttng/babeltrace. +# Truncating the test names works since they contain file and line number identifiers +# which allows us to match them without the random components. +def fixup_ptest_names(results, logger): + for r in results: + for i in results[r]: + tests = list(results[r][i]['result'].keys()) + for test in tests: + new = None + if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test: + new = test.split("_-_")[0] + elif test.startswith(("ptestresult.curl.")) and "__" in test: + new = test.split("__")[0] + elif test.startswith(("ptestresult.dbus.")) and "__" in test: + new = test.split("__")[0] + elif test.startswith("ptestresult.binutils") and "build-st-" in test: + new = test.split(" ")[0] + elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test: + new = ".".join(test.split(".")[:2]) + if new: + results[r][i]['result'][new] = results[r][i]['result'][test] + del results[r][i]['result'][test] + +def regression_common(args, logger, base_results, target_results): + if args.base_result_id: + base_results = resultutils.filter_resultsdata(base_results, args.base_result_id) + if args.target_result_id: + target_results = resultutils.filter_resultsdata(target_results, args.target_result_id) + + fixup_ptest_names(base_results, logger) + fixup_ptest_names(target_results, logger) + + matches = [] + regressions = [] + notfound = [] + + for a in base_results: + if a in target_results: + base = list(base_results[a].keys()) + target = list(target_results[a].keys()) + # We may have multiple base/targets which are for different configurations. Start by + # removing any pairs which match + for c in base.copy(): + for b in target.copy(): + if not can_be_compared(logger, base_results[a][c], target_results[a][b]): + continue + res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit) + if not res: + matches.append(resstr) + base.remove(c) + target.remove(b) + break + # Should only now see regressions, we may not be able to match multiple pairs directly + for c in base: + for b in target: + if not can_be_compared(logger, base_results[a][c], target_results[a][b]): + continue + res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit) + if res: + regressions.append(resstr) + else: + notfound.append("%s not found in target" % a) + print("\n".join(sorted(regressions))) + print("\n" + MISSING_TESTS_BANNER + "\n") + print("\n".join(sorted(notfound))) + print("\n" + ADDITIONAL_DATA_BANNER + "\n") + print("\n".join(sorted(matches))) return 0 def regression_git(args, logger): - from resulttool.resultsutils import checkout_git_dir base_results = {} target_results = {} - if checkout_git_dir(args.source_dir, args.base_git_branch): - base_results = get_results_from_directory(logger, args.source_dir) - if checkout_git_dir(args.source_dir, args.target_git_branch): - target_results = get_results_from_directory(logger, args.source_dir) - if base_results and target_results: - remove_testcases_to_optimize_regression_runtime(logger, base_results) - remove_testcases_to_optimize_regression_runtime(logger, target_results) - regression = ResultsRegressionSelector() - regression_pair = regression.get_regression_base_target_pair(logger, base_results, target_results) - logger.debug('Retrieved regression pair=%s' % regression_pair) - regression.run_regression_with_regression_pairing(logger, regression_pair, base_results, target_results) + + tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" + repo = GitRepo(args.repo) + + revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch) + + if args.branch2: + revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2) + if not len(revs2): + logger.error("No revisions found to compare against") + return 1 + if not len(revs): + logger.error("No revision to report on found") + return 1 + else: + if len(revs) < 2: + logger.error("Only %d tester revisions found, unable to generate report" % len(revs)) + return 1 + + # Pick revisions + if args.commit: + if args.commit_number: + logger.warning("Ignoring --commit-number as --commit was specified") + index1 = gitarchive.rev_find(revs, 'commit', args.commit) + elif args.commit_number: + index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number) + else: + index1 = len(revs) - 1 + + if args.branch2: + revs2.append(revs[index1]) + index1 = len(revs2) - 1 + revs = revs2 + + if args.commit2: + if args.commit_number2: + logger.warning("Ignoring --commit-number2 as --commit2 was specified") + index2 = gitarchive.rev_find(revs, 'commit', args.commit2) + elif args.commit_number2: + index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2) + else: + if index1 > 0: + index2 = index1 - 1 + # Find the closest matching commit number for comparision + # In future we could check the commit is a common ancestor and + # continue back if not but this good enough for now + while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number: + index2 = index2 - 1 + else: + logger.error("Unable to determine the other commit, use " + "--commit2 or --commit-number2 to specify it") + return 1 + + logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2])) + + base_results = resultutils.git_get_result(repo, revs[index1][2]) + target_results = resultutils.git_get_result(repo, revs[index2][2]) + + regression_common(args, logger, base_results, target_results) + return 0 def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('regression-file', help='regression file analysis', + + parser_build = subparsers.add_parser('regression', help='regression file/directory analysis', + description='regression analysis comparing the base set of results to the target results', + group='analysis') + parser_build.set_defaults(func=regression) + parser_build.add_argument('base_result', + help='base result file/directory/URL for the comparison') + parser_build.add_argument('target_result', + help='target result file/directory/URL to compare with') + parser_build.add_argument('-b', '--base-result-id', default='', + help='(optional) filter the base results to this result ID') + parser_build.add_argument('-t', '--target-result-id', default='', + help='(optional) filter the target results to this result ID') + + parser_build = subparsers.add_parser('regression-git', help='regression git analysis', description='regression analysis comparing base result set to target ' 'result set', group='analysis') - parser_build.set_defaults(func=regression_file) - parser_build.add_argument('base_result_file', - help='base result file provide the base result set') - parser_build.add_argument('target_result_file', - help='target result file provide the target result set for comparison with base result') + parser_build.set_defaults(func=regression_git) + parser_build.add_argument('repo', + help='the git repository containing the data') parser_build.add_argument('-b', '--base-result-id', default='', help='(optional) default select regression based on configurations unless base result ' 'id was provided') @@ -183,26 +437,11 @@ def register_commands(subparsers): help='(optional) default select regression based on configurations unless target result ' 'id was provided') - parser_build = subparsers.add_parser('regression-dir', help='regression directory analysis', - description='regression analysis comparing base result set to target ' - 'result set', - group='analysis') - parser_build.set_defaults(func=regression_directory) - parser_build.add_argument('base_result_directory', - help='base result directory provide the files for base result set') - parser_build.add_argument('target_result_directory', - help='target result file provide the files for target result set for comparison with ' - 'base result') + parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in") + parser_build.add_argument('--branch2', help="Branch to find comparision revisions in") + parser_build.add_argument('--commit', help="Revision to search for") + parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified") + parser_build.add_argument('--commit2', help="Revision to compare with") + parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified") + parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes") - parser_build = subparsers.add_parser('regression-git', help='regression git analysis', - description='regression analysis comparing base result set to target ' - 'result set', - group='analysis') - parser_build.set_defaults(func=regression_git) - parser_build.add_argument('source_dir', - help='source directory that contain the git repository with test result files') - parser_build.add_argument('base_git_branch', - help='base git branch that provide the files for base result set') - parser_build.add_argument('target_git_branch', - help='target git branch that provide the files for target result set for comparison with ' - 'base result') diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index ab5de1f3a7..a349510ab8 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py @@ -1,113 +1,315 @@ # test result tool - report text based test results # # Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. +# SPDX-License-Identifier: GPL-2.0-only # + import os import glob import json -from resulttool.resultsutils import checkout_git_dir, load_json_file, get_dict_value, get_directory_files +import resulttool.resultutils as resultutils +from oeqa.utils.git import GitRepo +import oeqa.utils.gitarchive as gitarchive + class ResultsTextReport(object): + def __init__(self): + self.ptests = {} + self.ltptests = {} + self.ltpposixtests = {} + self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'], + 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'], + 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']} + + + def handle_ptest_result(self, k, status, result, machine): + if machine not in self.ptests: + self.ptests[machine] = {} + + if k == 'ptestresult.sections': + # Ensure tests without any test results still show up on the report + for suite in result['ptestresult.sections']: + if suite not in self.ptests[machine]: + self.ptests[machine][suite] = { + 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', + 'failed_testcases': [], "testcases": set(), + } + if 'duration' in result['ptestresult.sections'][suite]: + self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration'] + if 'timeout' in result['ptestresult.sections'][suite]: + self.ptests[machine][suite]['duration'] += " T" + return True + + # process test result + try: + _, suite, test = k.split(".", 2) + except ValueError: + return True + + # Handle 'glib-2.0' + if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']: + try: + _, suite, suite1, test = k.split(".", 3) + if suite + "." + suite1 in result['ptestresult.sections']: + suite = suite + "." + suite1 + except ValueError: + pass + + if suite not in self.ptests[machine]: + self.ptests[machine][suite] = { + 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', + 'failed_testcases': [], "testcases": set(), + } - def get_aggregated_test_result(self, logger, testresult): + # do not process duplicate results + if test in self.ptests[machine][suite]["testcases"]: + print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine)) + return False + + for tk in self.result_types: + if status in self.result_types[tk]: + self.ptests[machine][suite][tk] += 1 + self.ptests[machine][suite]["testcases"].add(test) + return True + + def handle_ltptest_result(self, k, status, result, machine): + if machine not in self.ltptests: + self.ltptests[machine] = {} + + if k == 'ltpresult.sections': + # Ensure tests without any test results still show up on the report + for suite in result['ltpresult.sections']: + if suite not in self.ltptests[machine]: + self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} + if 'duration' in result['ltpresult.sections'][suite]: + self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration'] + if 'timeout' in result['ltpresult.sections'][suite]: + self.ltptests[machine][suite]['duration'] += " T" + return + try: + _, suite, test = k.split(".", 2) + except ValueError: + return + # Handle 'glib-2.0' + if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']: + try: + _, suite, suite1, test = k.split(".", 3) + if suite + "." + suite1 in result['ltpresult.sections']: + suite = suite + "." + suite1 + except ValueError: + pass + if suite not in self.ltptests[machine]: + self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} + for tk in self.result_types: + if status in self.result_types[tk]: + self.ltptests[machine][suite][tk] += 1 + + def handle_ltpposixtest_result(self, k, status, result, machine): + if machine not in self.ltpposixtests: + self.ltpposixtests[machine] = {} + + if k == 'ltpposixresult.sections': + # Ensure tests without any test results still show up on the report + for suite in result['ltpposixresult.sections']: + if suite not in self.ltpposixtests[machine]: + self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} + if 'duration' in result['ltpposixresult.sections'][suite]: + self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration'] + return + try: + _, suite, test = k.split(".", 2) + except ValueError: + return + # Handle 'glib-2.0' + if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']: + try: + _, suite, suite1, test = k.split(".", 3) + if suite + "." + suite1 in result['ltpposixresult.sections']: + suite = suite + "." + suite1 + except ValueError: + pass + if suite not in self.ltpposixtests[machine]: + self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []} + for tk in self.result_types: + if status in self.result_types[tk]: + self.ltpposixtests[machine][suite][tk] += 1 + + def get_aggregated_test_result(self, logger, testresult, machine): test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []} - result_types = {'passed': ['PASSED', 'passed'], - 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'], - 'skipped': ['SKIPPED', 'skipped']} - result = get_dict_value(logger, testresult, 'result') + result = testresult.get('result', []) for k in result: - test_status = get_dict_value(logger, result[k], 'status') - for tk in result_types: - if test_status in result_types[tk]: + test_status = result[k].get('status', []) + if k.startswith("ptestresult."): + if not self.handle_ptest_result(k, test_status, result, machine): + continue + elif k.startswith("ltpresult."): + self.handle_ltptest_result(k, test_status, result, machine) + elif k.startswith("ltpposixresult."): + self.handle_ltpposixtest_result(k, test_status, result, machine) + + # process result if it was not skipped by a handler + for tk in self.result_types: + if test_status in self.result_types[tk]: test_count_report[tk] += 1 - if test_status in result_types['failed']: + if test_status in self.result_types['failed']: test_count_report['failed_testcases'].append(k) return test_count_report - def get_test_result_percentage(self, test_result_count): - total_tested = test_result_count['passed'] + test_result_count['failed'] + test_result_count['skipped'] - test_percent_report = {'passed': 0, 'failed': 0, 'skipped': 0} - for k in test_percent_report: - test_percent_report[k] = format(test_result_count[k] / total_tested * 100, '.2f') - return test_percent_report - - def add_test_configurations(self, test_report, source_dir, file, result_id): - test_report['file_dir'] = self._get_short_file_dir(source_dir, file) - test_report['result_id'] = result_id - test_report['test_file_dir_result_id'] = '%s_%s' % (test_report['file_dir'], test_report['result_id']) - - def _get_short_file_dir(self, source_dir, file): - file_dir = os.path.dirname(file) - source_dir = source_dir[:-1] if source_dir[-1] == '/' else source_dir - if file_dir == source_dir: - return 'None' - return file_dir.replace(source_dir, '') - - def get_max_string_len(self, test_result_list, key, default_max_len): - max_len = default_max_len - for test_result in test_result_list: - value_len = len(test_result[key]) - if value_len > max_len: - max_len = value_len - return max_len - - def print_test_report(self, template_file_name, test_count_reports, test_percent_reports, - max_len_dir, max_len_result_id): + def print_test_report(self, template_file_name, test_count_reports): from jinja2 import Environment, FileSystemLoader script_path = os.path.dirname(os.path.realpath(__file__)) file_loader = FileSystemLoader(script_path + '/template') env = Environment(loader=file_loader, trim_blocks=True) template = env.get_template(template_file_name) - output = template.render(test_count_reports=test_count_reports, - test_percent_reports=test_percent_reports, - max_len_dir=max_len_dir, - max_len_result_id=max_len_result_id) - print('Printing text-based test report:') + havefailed = False + reportvalues = [] + machines = [] + cols = ['passed', 'failed', 'skipped'] + maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0} + for line in test_count_reports: + total_tested = line['passed'] + line['failed'] + line['skipped'] + vals = {} + vals['result_id'] = line['result_id'] + vals['testseries'] = line['testseries'] + vals['sort'] = line['testseries'] + "_" + line['result_id'] + vals['failed_testcases'] = line['failed_testcases'] + for k in cols: + if total_tested: + vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + else: + vals[k] = "0 (0%)" + for k in maxlen: + if k in vals and len(vals[k]) > maxlen[k]: + maxlen[k] = len(vals[k]) + reportvalues.append(vals) + if line['failed_testcases']: + havefailed = True + if line['machine'] not in machines: + machines.append(line['machine']) + reporttotalvalues = {} + for k in cols: + reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports]) + reporttotalvalues['count'] = '%s' % len(test_count_reports) + for (machine, report) in self.ptests.items(): + for ptest in self.ptests[machine]: + if len(ptest) > maxlen['ptest']: + maxlen['ptest'] = len(ptest) + for (machine, report) in self.ltptests.items(): + for ltptest in self.ltptests[machine]: + if len(ltptest) > maxlen['ltptest']: + maxlen['ltptest'] = len(ltptest) + for (machine, report) in self.ltpposixtests.items(): + for ltpposixtest in self.ltpposixtests[machine]: + if len(ltpposixtest) > maxlen['ltpposixtest']: + maxlen['ltpposixtest'] = len(ltpposixtest) + output = template.render(reportvalues=reportvalues, + reporttotalvalues=reporttotalvalues, + havefailed=havefailed, + machines=machines, + ptests=self.ptests, + ltptests=self.ltptests, + ltpposixtests=self.ltpposixtests, + maxlen=maxlen) print(output) - def view_test_report(self, logger, source_dir, git_branch): - if git_branch: - checkout_git_dir(source_dir, git_branch) + def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only): + def print_selected_testcase_result(testresults, selected_test_case_only): + for testsuite in testresults: + for resultid in testresults[testsuite]: + result = testresults[testsuite][resultid]['result'] + test_case_result = result.get(selected_test_case_only, {}) + if test_case_result.get('status'): + print('Found selected test case result for %s from %s' % (selected_test_case_only, + resultid)) + print(test_case_result['status']) + else: + print('Could not find selected test case result for %s from %s' % (selected_test_case_only, + resultid)) + if test_case_result.get('log'): + print(test_case_result['log']) test_count_reports = [] - test_percent_reports = [] - for file in get_directory_files(source_dir, ['.git'], 'testresults.json'): - logger.debug('Computing result for test result file: %s' % file) - testresults = load_json_file(file) - for k in testresults: - test_count_report = self.get_aggregated_test_result(logger, testresults[k]) - test_percent_report = self.get_test_result_percentage(test_count_report) - self.add_test_configurations(test_count_report, source_dir, file, k) - self.add_test_configurations(test_percent_report, source_dir, file, k) + configmap = resultutils.store_map + if use_regression_map: + configmap = resultutils.regression_map + if commit: + if tag: + logger.warning("Ignoring --tag as --commit was specified") + tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}" + repo = GitRepo(source_dir) + revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch) + rev_index = gitarchive.rev_find(revs, 'commit', commit) + testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap) + elif tag: + repo = GitRepo(source_dir) + testresults = resultutils.git_get_result(repo, [tag], configmap=configmap) + else: + testresults = resultutils.load_resultsdata(source_dir, configmap=configmap) + if raw_test: + raw_results = {} + for testsuite in testresults: + result = testresults[testsuite].get(raw_test, {}) + if result: + raw_results[testsuite] = {raw_test: result} + if raw_results: + if selected_test_case_only: + print_selected_testcase_result(raw_results, selected_test_case_only) + else: + print(json.dumps(raw_results, sort_keys=True, indent=4)) + else: + print('Could not find raw test result for %s' % raw_test) + return 0 + if selected_test_case_only: + print_selected_testcase_result(testresults, selected_test_case_only) + return 0 + for testsuite in testresults: + for resultid in testresults[testsuite]: + skip = False + result = testresults[testsuite][resultid] + machine = result['configuration']['MACHINE'] + + # Check to see if there is already results for these kinds of tests for the machine + for key in result['result'].keys(): + testtype = str(key).split('.')[0] + if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or + (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])): + print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid)) + skip = True + break + if skip: + break + + test_count_report = self.get_aggregated_test_result(logger, result, machine) + test_count_report['machine'] = machine + test_count_report['testseries'] = result['configuration']['TESTSERIES'] + test_count_report['result_id'] = resultid test_count_reports.append(test_count_report) - test_percent_reports.append(test_percent_report) - max_len_dir = self.get_max_string_len(test_count_reports, 'file_dir', len('file_dir')) - max_len_result_id = self.get_max_string_len(test_count_reports, 'result_id', len('result_id')) - self.print_test_report('test_report_full_text.txt', test_count_reports, test_percent_reports, - max_len_dir, max_len_result_id) + self.print_test_report('test_report_full_text.txt', test_count_reports) def report(args, logger): report = ResultsTextReport() - report.view_test_report(logger, args.source_dir, args.git_branch) + report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map, + args.raw_test_only, args.selected_test_case_only) return 0 def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('report', help='report test result summary', - description='report text-based test result summary from the source directory', + parser_build = subparsers.add_parser('report', help='summarise test results', + description='print a text-based summary of the test results', group='analysis') parser_build.set_defaults(func=report) parser_build.add_argument('source_dir', - help='source directory that contain the test result files for reporting') - parser_build.add_argument('-b', '--git-branch', default='', - help='(optional) default assume source directory contains all available files for ' - 'reporting unless a git branch was provided where it will try to checkout ' - 'the provided git branch assuming source directory was a git repository') + help='source file/directory/URL that contain the test result files to summarise') + parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in") + parser_build.add_argument('--commit', help="Revision to report") + parser_build.add_argument('-t', '--tag', default='', + help='source_dir is a git repository, report on the tag specified from that repository') + parser_build.add_argument('-m', '--use_regression_map', action='store_true', + help='instead of the default "store_map", use the "regression_map" for report') + parser_build.add_argument('-r', '--raw_test_only', default='', + help='output raw test result only for the user provided test result id') + parser_build.add_argument('-s', '--selected_test_case_only', default='', + help='output selected test case result for the user provided test case id, if both test ' + 'result id and test case id are provided then output the selected test case result ' + 'from the provided test result id') diff --git a/scripts/lib/resulttool/resultsutils.py b/scripts/lib/resulttool/resultsutils.py deleted file mode 100644 index 368786922c..0000000000 --- a/scripts/lib/resulttool/resultsutils.py +++ /dev/null @@ -1,67 +0,0 @@ -# test result tool - utilities -# -# Copyright (c) 2019, Intel Corporation. -# -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. -# -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -import os -import json -import scriptpath -scriptpath.add_oe_lib_path() -from oeqa.utils.git import GitRepo, GitError - -def load_json_file(file): - with open(file, "r") as f: - return json.load(f) - -def dump_json_data(write_dir, file_name, json_data): - file_content = json.dumps(json_data, sort_keys=True, indent=4) - file_path = os.path.join(write_dir, file_name) - with open(file_path, 'w') as the_file: - the_file.write(file_content) - -def get_dict_value(logger, dict, key): - try: - return dict[key] - except KeyError: - if logger: - logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) - return None - except TypeError: - if logger: - logger.debug('Faced TypeError exception: dict=%s: key=%s' % (dict, key)) - return None - -def pop_dict_element(logger, dict, key): - try: - dict.pop(key) - except KeyError: - if logger: - logger.debug('Faced KeyError exception: dict=%s: key=%s' % (dict, key)) - except AttributeError: - if logger: - logger.debug('Faced AttributeError exception: dict=%s: key=%s' % (dict, key)) - -def checkout_git_dir(git_dir, git_branch): - try: - repo = GitRepo(git_dir, is_topdir=True) - repo.run_cmd('checkout %s' % git_branch) - return True - except GitError: - return False - -def get_directory_files(source_dir, excludes, file): - files_in_dir = [] - for root, dirs, files in os.walk(source_dir, topdown=True): - [dirs.remove(d) for d in list(dirs) if d in excludes] - for name in files: - if name == file: - files_in_dir.append(os.path.join(root, name)) - return files_in_dir
\ No newline at end of file diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py new file mode 100644 index 0000000000..c5521d81bd --- /dev/null +++ b/scripts/lib/resulttool/resultutils.py @@ -0,0 +1,228 @@ +# resulttool - common library/utility functions +# +# Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import os +import base64 +import zlib +import json +import scriptpath +import copy +import urllib.request +import posixpath +scriptpath.add_oe_lib_path() + +flatten_map = { + "oeselftest": [], + "runtime": [], + "sdk": [], + "sdkext": [], + "manual": [] +} +regression_map = { + "oeselftest": ['TEST_TYPE', 'MACHINE'], + "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'], + "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], + "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'], + "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE'] +} +store_map = { + "oeselftest": ['TEST_TYPE'], + "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'], + "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], + "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'], + "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME'] +} + +def is_url(p): + """ + Helper for determining if the given path is a URL + """ + return p.startswith('http://') or p.startswith('https://') + +extra_configvars = {'TESTSERIES': ''} + +# +# Load the json file and append the results data into the provided results dict +# +def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars): + if type(f) is str: + if is_url(f): + with urllib.request.urlopen(f) as response: + data = json.loads(response.read().decode('utf-8')) + url = urllib.parse.urlparse(f) + testseries = posixpath.basename(posixpath.dirname(url.path)) + else: + with open(f, "r") as filedata: + try: + data = json.load(filedata) + except json.decoder.JSONDecodeError: + print("Cannot decode {}. Possible corruption. Skipping.".format(f)) + data = "" + testseries = os.path.basename(os.path.dirname(f)) + else: + data = f + for res in data: + if "configuration" not in data[res] or "result" not in data[res]: + raise ValueError("Test results data without configuration or result section?") + for config in configvars: + if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]: + data[res]["configuration"]["TESTSERIES"] = testseries + continue + if config not in data[res]["configuration"]: + data[res]["configuration"][config] = configvars[config] + testtype = data[res]["configuration"].get("TEST_TYPE") + if testtype not in configmap: + raise ValueError("Unknown test type %s" % testtype) + testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype]) + if testpath not in results: + results[testpath] = {} + results[testpath][res] = data[res] + +# +# Walk a directory and find/load results data +# or load directly from a file +# +def load_resultsdata(source, configmap=store_map, configvars=extra_configvars): + results = {} + if is_url(source) or os.path.isfile(source): + append_resultsdata(results, source, configmap, configvars) + return results + for root, dirs, files in os.walk(source): + for name in files: + f = os.path.join(root, name) + if name == "testresults.json": + append_resultsdata(results, f, configmap, configvars) + return results + +def filter_resultsdata(results, resultid): + newresults = {} + for r in results: + for i in results[r]: + if i == resultsid: + newresults[r] = {} + newresults[r][i] = results[r][i] + return newresults + +def strip_ptestresults(results): + newresults = copy.deepcopy(results) + #for a in newresults2: + # newresults = newresults2[a] + for res in newresults: + if 'result' not in newresults[res]: + continue + if 'ptestresult.rawlogs' in newresults[res]['result']: + del newresults[res]['result']['ptestresult.rawlogs'] + if 'ptestresult.sections' in newresults[res]['result']: + for i in newresults[res]['result']['ptestresult.sections']: + if 'log' in newresults[res]['result']['ptestresult.sections'][i]: + del newresults[res]['result']['ptestresult.sections'][i]['log'] + return newresults + +def decode_log(logdata): + if isinstance(logdata, str): + return logdata + elif isinstance(logdata, dict): + if "compressed" in logdata: + data = logdata.get("compressed") + data = base64.b64decode(data.encode("utf-8")) + data = zlib.decompress(data) + return data.decode("utf-8", errors='ignore') + return None + +def generic_get_log(sectionname, results, section): + if sectionname not in results: + return None + if section not in results[sectionname]: + return None + + ptest = results[sectionname][section] + if 'log' not in ptest: + return None + return decode_log(ptest['log']) + +def ptestresult_get_log(results, section): + return generic_get_log('ptestresult.sections', results, section) + +def generic_get_rawlogs(sectname, results): + if sectname not in results: + return None + if 'log' not in results[sectname]: + return None + return decode_log(results[sectname]['log']) + +def ptestresult_get_rawlogs(results): + return generic_get_rawlogs('ptestresult.rawlogs', results) + +def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False): + for res in results: + if res: + dst = destdir + "/" + res + "/" + fn + else: + dst = destdir + "/" + fn + os.makedirs(os.path.dirname(dst), exist_ok=True) + resultsout = results[res] + if not ptestjson: + resultsout = strip_ptestresults(results[res]) + with open(dst, 'w') as f: + f.write(json.dumps(resultsout, sort_keys=True, indent=4)) + for res2 in results[res]: + if ptestlogs and 'result' in results[res][res2]: + seriesresults = results[res][res2]['result'] + rawlogs = ptestresult_get_rawlogs(seriesresults) + if rawlogs is not None: + with open(dst.replace(fn, "ptest-raw.log"), "w+") as f: + f.write(rawlogs) + if 'ptestresult.sections' in seriesresults: + for i in seriesresults['ptestresult.sections']: + sectionlog = ptestresult_get_log(seriesresults, i) + if sectionlog is not None: + with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f: + f.write(sectionlog) + +def git_get_result(repo, tags, configmap=store_map): + git_objs = [] + for tag in tags: + files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines() + git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")]) + + def parse_json_stream(data): + """Parse multiple concatenated JSON objects""" + objs = [] + json_d = "" + for line in data.splitlines(): + if line == '}{': + json_d += '}' + objs.append(json.loads(json_d)) + json_d = '{' + else: + json_d += line + objs.append(json.loads(json_d)) + return objs + + # Optimize by reading all data with one git command + results = {} + for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])): + append_resultsdata(results, obj, configmap=configmap) + + return results + +def test_run_results(results): + """ + Convenient generator function that iterates over all test runs that have a + result section. + + Generates a tuple of: + (result json file path, test run name, test run (dict), test run "results" (dict)) + for each test run that has a "result" section + """ + for path in results: + for run_name, test_run in results[path].items(): + if not 'result' in test_run: + continue + yield path, run_name, test_run, test_run['result'] + diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py index 2c6fd8492c..e0951f0a8f 100644 --- a/scripts/lib/resulttool/store.py +++ b/scripts/lib/resulttool/store.py @@ -1,110 +1,104 @@ -# test result tool - store test results +# resulttool - store test results # # Copyright (c) 2019, Intel Corporation. +# Copyright (c) 2019, Linux Foundation # -# This program is free software; you can redistribute it and/or modify it -# under the terms and conditions of the GNU General Public License, -# version 2, as published by the Free Software Foundation. +# SPDX-License-Identifier: GPL-2.0-only # -# This program is distributed in the hope it will be useful, but WITHOUT -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -# more details. -# -import datetime + import tempfile import os import subprocess +import json +import shutil import scriptpath scriptpath.add_bitbake_lib_path() scriptpath.add_oe_lib_path() -from resulttool.resultsutils import checkout_git_dir -try: - import bb -except ImportError: - pass +import resulttool.resultutils as resultutils +import oeqa.utils.gitarchive as gitarchive -class ResultsGitStore(object): - def _get_output_dir(self): - basepath = os.environ['BUILDDIR'] - return basepath + '/testresults_%s/' % datetime.datetime.now().strftime("%Y%m%d%H%M%S") - - def _create_temporary_workspace_dir(self): - return tempfile.mkdtemp(prefix='testresults.') +def store(args, logger): + tempdir = tempfile.mkdtemp(prefix='testresults.') + try: + configvars = resultutils.extra_configvars.copy() + if args.executed_by: + configvars['EXECUTED_BY'] = args.executed_by + if args.extra_test_env: + configvars['EXTRA_TEST_ENV'] = args.extra_test_env + results = {} + logger.info('Reading files from %s' % args.source) + if resultutils.is_url(args.source) or os.path.isfile(args.source): + resultutils.append_resultsdata(results, args.source, configvars=configvars) + else: + for root, dirs, files in os.walk(args.source): + for name in files: + f = os.path.join(root, name) + if name == "testresults.json": + resultutils.append_resultsdata(results, f, configvars=configvars) + elif args.all: + dst = f.replace(args.source, tempdir + "/") + os.makedirs(os.path.dirname(dst), exist_ok=True) + shutil.copyfile(f, dst) - def _remove_temporary_workspace_dir(self, workspace_dir): - return subprocess.run(["rm", "-rf", workspace_dir]) + revisions = {} - def _oe_copy_files(self, source_dir, destination_dir): - from oe.path import copytree - copytree(source_dir, destination_dir) + if not results and not args.all: + if args.allow_empty: + logger.info("No results found to store") + return 0 + logger.error("No results found to store") + return 1 - def _copy_files(self, source_dir, destination_dir, copy_ignore=None): - from shutil import copytree - copytree(source_dir, destination_dir, ignore=copy_ignore) + # Find the branch/commit/commit_count and ensure they all match + for suite in results: + for result in results[suite]: + config = results[suite][result]['configuration']['LAYERS']['meta'] + revision = (config['commit'], config['branch'], str(config['commit_count'])) + if revision not in revisions: + revisions[revision] = {} + if suite not in revisions[revision]: + revisions[revision][suite] = {} + revisions[revision][suite][result] = results[suite][result] - def _store_files_to_git(self, logger, file_dir, git_dir, git_branch, commit_msg_subject, commit_msg_body): - logger.debug('Storing test result into git repository (%s) and branch (%s)' - % (git_dir, git_branch)) - return subprocess.run(["oe-git-archive", - file_dir, - "-g", git_dir, - "-b", git_branch, - "--commit-msg-subject", commit_msg_subject, - "--commit-msg-body", commit_msg_body]) + logger.info("Found %d revisions to store" % len(revisions)) - def store_to_existing(self, logger, source_dir, git_dir, git_branch): - logger.debug('Storing files to existing git repository and branch') - from shutil import ignore_patterns - dest_dir = self._create_temporary_workspace_dir() - dest_top_dir = os.path.join(dest_dir, 'top_dir') - self._copy_files(git_dir, dest_top_dir, copy_ignore=ignore_patterns('.git')) - self._oe_copy_files(source_dir, dest_top_dir) - self._store_files_to_git(logger, dest_top_dir, git_dir, git_branch, - 'Store as existing git and branch', 'Store as existing git repository and branch') - self._remove_temporary_workspace_dir(dest_dir) - return git_dir + for r in revisions: + results = revisions[r] + keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]} + subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"]) + resultutils.save_resultsdata(results, tempdir, ptestlogs=True) - def store_to_existing_with_new_branch(self, logger, source_dir, git_dir, git_branch): - logger.debug('Storing files to existing git repository with new branch') - self._store_files_to_git(logger, source_dir, git_dir, git_branch, - 'Store as existing git with new branch', - 'Store as existing git repository with new branch') - return git_dir + logger.info('Storing test result into git repository %s' % args.git_dir) - def store_to_new(self, logger, source_dir, git_branch): - logger.debug('Storing files to new git repository') - output_dir = self._get_output_dir() - self._store_files_to_git(logger, source_dir, output_dir, git_branch, - 'Store as new', 'Store as new git repository') - return output_dir + gitarchive.gitarchive(tempdir, args.git_dir, False, False, + "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}", + False, "{branch}/{commit_count}-g{commit}/{tag_number}", + 'Test run #{tag_number} of {branch}:{commit}', '', + [], [], False, keywords, logger) - def store(self, logger, source_dir, git_dir, git_branch): - if git_dir: - if checkout_git_dir(git_dir, git_branch): - self.store_to_existing(logger, source_dir, git_dir, git_branch) - else: - self.store_to_existing_with_new_branch(logger, source_dir, git_dir, git_branch) - else: - self.store_to_new(logger, source_dir, git_branch) + finally: + subprocess.check_call(["rm", "-rf", tempdir]) -def store(args, logger): - gitstore = ResultsGitStore() - gitstore.store(logger, args.source_dir, args.git_dir, args.git_branch) return 0 def register_commands(subparsers): """Register subcommands from this plugin""" - parser_build = subparsers.add_parser('store', help='store test result files and directories into git repository', - description='store the testresults.json files and related directories ' - 'from the source directory into the destination git repository ' - 'with the given git branch', + parser_build = subparsers.add_parser('store', help='store test results into a git repository', + description='takes a results file or directory of results files and stores ' + 'them into the destination git repository, splitting out the results ' + 'files as configured', group='setup') parser_build.set_defaults(func=store) - parser_build.add_argument('source_dir', - help='source directory that contain the test result files and directories to be stored') - parser_build.add_argument('git_branch', help='git branch used for store') - parser_build.add_argument('-d', '--git-dir', default='', - help='(optional) default store to new <top_dir>/<build>/<testresults_datetime> ' - 'directory unless provided with existing git repository as destination') + parser_build.add_argument('source', + help='source file/directory/URL that contain the test result files to be stored') + parser_build.add_argument('git_dir', + help='the location of the git repository to store the results in') + parser_build.add_argument('-a', '--all', action='store_true', + help='include all files, not just testresults.json files') + parser_build.add_argument('-e', '--allow-empty', action='store_true', + help='don\'t error if no results to store are found') + parser_build.add_argument('-x', '--executed-by', default='', + help='add executed-by configuration to each result file') + parser_build.add_argument('-t', '--extra-test-env', default='', + help='add extra test environment data to each result file configuration') diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt index bc4874ba4b..2efba2ef6f 100644 --- a/scripts/lib/resulttool/template/test_report_full_text.txt +++ b/scripts/lib/resulttool/template/test_report_full_text.txt @@ -1,35 +1,79 @@ ============================================================================================================== -Test Report (Count of passed, failed, skipped group by file_dir, result_id) +Test Result Status Summary (Counts/Percentages sorted by testseries, ID) ============================================================================================================== -------------------------------------------------------------------------------------------------------------- -{{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed'.ljust(10) }} | {{ 'failed'.ljust(10) }} | {{ 'skipped'.ljust(10) }} +{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} -------------------------------------------------------------------------------------------------------------- -{% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} -{{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} +{% for report in reportvalues |sort(attribute='sort') %} +{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }} {% endfor %} -------------------------------------------------------------------------------------------------------------- +{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }} +-------------------------------------------------------------------------------------------------------------- + +{% for machine in machines %} +{% if ptests[machine] %} +============================================================================================================== +{{ machine }} PTest Result Summary +============================================================================================================== +-------------------------------------------------------------------------------------------------------------- +{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }} +-------------------------------------------------------------------------------------------------------------- +{% for ptest in ptests[machine] |sort %} +{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }} +{% endfor %} +-------------------------------------------------------------------------------------------------------------- + +{% endif %} +{% endfor %} +{% for machine in machines %} +{% if ltptests[machine] %} ============================================================================================================== -Test Report (Percent of passed, failed, skipped group by file_dir, result_id) +{{ machine }} Ltp Test Result Summary ============================================================================================================== -------------------------------------------------------------------------------------------------------------- -{{ 'file_dir'.ljust(max_len_dir) }} | {{ 'result_id'.ljust(max_len_result_id) }} | {{ 'passed_%'.ljust(10) }} | {{ 'failed_%'.ljust(10) }} | {{ 'skipped_%'.ljust(10) }} +{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }} -------------------------------------------------------------------------------------------------------------- -{% for report in test_percent_reports |sort(attribute='test_file_dir_result_id') %} -{{ report.file_dir.ljust(max_len_dir) }} | {{ report.result_id.ljust(max_len_result_id) }} | {{ (report.passed|string).ljust(10) }} | {{ (report.failed|string).ljust(10) }} | {{ (report.skipped|string).ljust(10) }} +{% for ltptest in ltptests[machine] |sort %} +{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }} {% endfor %} -------------------------------------------------------------------------------------------------------------- +{% endif %} +{% endfor %} + +{% for machine in machines %} +{% if ltpposixtests[machine] %} ============================================================================================================== -Test Report (Failed test cases group by file_dir, result_id) +{{ machine }} Ltp Posix Result Summary ============================================================================================================== -------------------------------------------------------------------------------------------------------------- -{% for report in test_count_reports |sort(attribute='test_file_dir_result_id') %} +{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }} +-------------------------------------------------------------------------------------------------------------- +{% for ltpposixtest in ltpposixtests[machine] |sort %} +{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }} +{% endfor %} +-------------------------------------------------------------------------------------------------------------- + +{% endif %} +{% endfor %} + + +============================================================================================================== +Failed test cases (sorted by testseries, ID) +============================================================================================================== +{% if havefailed %} +-------------------------------------------------------------------------------------------------------------- +{% for report in reportvalues |sort(attribute='sort') %} {% if report.failed_testcases %} -file_dir | result_id : {{ report.file_dir }} | {{ report.result_id }} +testseries | result_id : {{ report.testseries }} | {{ report.result_id }} {% for testcase in report.failed_testcases %} {{ testcase }} {% endfor %} {% endif %} {% endfor %} ---------------------------------------------------------------------------------------------------------------
\ No newline at end of file +-------------------------------------------------------------------------------------------------------------- +{% else %} +There were no test failures +{% endif %} |