summaryrefslogtreecommitdiffstats
path: root/scripts/resulttool
blob: 66a6af9959deb2d41cac833f9cb67b3a2a025283 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
#!/usr/bin/env python3
#
# test results tool - tool for manipulating OEQA test result json files
# (merge results, summarise results, regression analysis, generate manual test results file)
#
# To look for help information.
#    $ resulttool
#
# To store test results from oeqa automated tests, execute the below
#     $ resulttool store <source_dir> <git_branch>
#
# To merge test results, execute the below
#    $ resulttool merge <base_result_file> <target_result_file>
#
# To report test report, execute the below
#     $ resulttool report <source_dir>
#
# To create a unit test report in JUnit XML format, execute the below
#     $ resulttool junit <json_file>
#
# To perform regression file analysis, execute the below
#     $ resulttool regression-file <base_result_file> <target_result_file>
#
# To execute manual test cases, execute the below
#     $ resulttool manualexecution <manualjsonfile>
#
# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
#
# Copyright (c) 2019, Intel Corporation.
#
# SPDX-License-Identifier: GPL-2.0-only
#

import os
import sys
import argparse
import logging
script_path = os.path.dirname(os.path.realpath(__file__))
lib_path = script_path + '/lib'
sys.path = sys.path + [lib_path]
import argparse_oe
import scriptutils
import resulttool.merge
import resulttool.store
import resulttool.regression
import resulttool.report
import resulttool.manualexecution
import resulttool.log
import resulttool.junit
logger = scriptutils.logger_create('resulttool')

def main():
    parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.",
                                        epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
    parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
    parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
    subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
    subparsers.required = True
    subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
    resulttool.manualexecution.register_commands(subparsers)
    subparsers.add_subparser_group('setup', 'setup', 200)
    resulttool.merge.register_commands(subparsers)
    resulttool.store.register_commands(subparsers)
    subparsers.add_subparser_group('analysis', 'analysis', 100)
    resulttool.regression.register_commands(subparsers)
    resulttool.report.register_commands(subparsers)
    resulttool.log.register_commands(subparsers)
    resulttool.junit.register_commands(subparsers)

    args = parser.parse_args()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    elif args.quiet:
        logger.setLevel(logging.ERROR)

    try:
        ret = args.func(args, logger)
    except argparse_oe.ArgumentUsageError as ae:
        parser.error_subcommand(ae.message, ae.subcommand)
    return ret

if __name__ == "__main__":
    sys.exit(main())