summaryrefslogtreecommitdiffstats
path: root/scripts/oe-build-perf-test
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/oe-build-perf-test')
-rwxr-xr-xscripts/oe-build-perf-test133
1 files changed, 108 insertions, 25 deletions
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test
index 996996bc62..00e00b4ce9 100755
--- a/scripts/oe-build-perf-test
+++ b/scripts/oe-build-perf-test
@@ -1,37 +1,38 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-"""Build performance test script"""
+
import argparse
import errno
import fcntl
+import json
import logging
import os
+import re
+import shutil
import sys
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
import scriptpath
scriptpath.add_oe_lib_path()
-from oeqa.buildperf import BuildPerfTestRunner, KernelDropCaches
+scriptpath.add_bitbake_lib_path()
+import oeqa.buildperf
+from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
+ BuildPerfTestRunner, KernelDropCaches)
from oeqa.utils.commands import runCmd
+from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
# Set-up logging
LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
-logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
+logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
+ datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger()
@@ -63,10 +64,8 @@ def pre_run_sanity_check():
if ret.status:
log.error("bitbake command not found")
return False
-
return True
-
def setup_file_logging(log_file):
"""Setup loggin to file"""
log_dir = os.path.dirname(log_file)
@@ -78,6 +77,46 @@ def setup_file_logging(log_file):
log.addHandler(handler)
+def archive_build_conf(out_dir):
+ """Archive build/conf to test results"""
+ src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
+ tgt_dir = os.path.join(out_dir, 'build', 'conf')
+ os.makedirs(os.path.dirname(tgt_dir))
+ shutil.copytree(src_dir, tgt_dir)
+
+
+def update_globalres_file(result_obj, filename, metadata):
+ """Write results to globalres csv file"""
+ # Map test names to time and size columns in globalres
+ # The tuples represent index and length of times and sizes
+ # respectively
+ gr_map = {'test1': ((0, 1), (8, 1)),
+ 'test12': ((1, 1), (None, None)),
+ 'test13': ((2, 1), (9, 1)),
+ 'test2': ((3, 1), (None, None)),
+ 'test3': ((4, 3), (None, None)),
+ 'test4': ((7, 1), (10, 2))}
+
+ values = ['0'] * 12
+ for status, test, _ in result_obj.all_results():
+ if status in ['ERROR', 'SKIPPED']:
+ continue
+ (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
+ if t_ind is not None:
+ values[t_ind:t_ind + t_len] = test.times
+ if s_ind is not None:
+ values[s_ind:s_ind + s_len] = test.sizes
+
+ log.debug("Writing globalres log to %s", filename)
+ rev_info = metadata['layers']['meta']
+ with open(filename, 'a') as fobj:
+ fobj.write('{},{}:{},{},'.format(metadata['hostname'],
+ rev_info['branch'],
+ rev_info['commit'],
+ rev_info['commit']))
+ fobj.write(','.join(values) + '\n')
+
+
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
@@ -86,12 +125,21 @@ def parse_args(argv):
parser.add_argument('-D', '--debug', action='store_true',
help='Enable debug level logging')
parser.add_argument('--globalres-file',
+ type=os.path.abspath,
help="Append results to 'globalres' csv file")
parser.add_argument('--lock-file', default='./oe-build-perf.lock',
- metavar='FILENAME',
+ metavar='FILENAME', type=os.path.abspath,
help="Lock file to use")
parser.add_argument('-o', '--out-dir', default='results-{date}',
+ type=os.path.abspath,
help="Output directory for test results")
+ parser.add_argument('-x', '--xml', action='store_true',
+ help='Enable JUnit xml output')
+ parser.add_argument('--log-file',
+ default='{out_dir}/oe-build-perf-test.log',
+ help="Log file of this script")
+ parser.add_argument('--run-tests', nargs='+', metavar='TEST',
+ help="List of tests to run")
return parser.parse_args(argv)
@@ -100,6 +148,10 @@ def main(argv=None):
"""Script entry point"""
args = parse_args(argv)
+ # Set-up log file
+ out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
+ setup_file_logging(args.log_file.format(out_dir=out_dir))
+
if args.debug:
log.setLevel(logging.DEBUG)
@@ -114,18 +166,49 @@ def main(argv=None):
# Check our capability to drop caches and ask pass if needed
KernelDropCaches.check()
- # Set-up log file
- out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
- setup_file_logging(os.path.join(out_dir, 'output.log'))
+ # Load build perf tests
+ loader = BuildPerfTestLoader()
+ if args.run_tests:
+ suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
+ else:
+ suite = loader.loadTestsFromModule(oeqa.buildperf)
+
+ # Save test metadata
+ metadata = metadata_from_bb()
+ log.info("Testing Git revision branch:commit %s:%s (%s)",
+ metadata['layers']['meta']['branch'],
+ metadata['layers']['meta']['commit'],
+ metadata['layers']['meta']['commit_count'])
+ if args.xml:
+ write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
+ else:
+ with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
+ json.dump(metadata, fobj, indent=2)
+ archive_build_conf(out_dir)
+
+ runner = BuildPerfTestRunner(out_dir, verbosity=2)
+
+ # Suppress logger output to stderr so that the output from unittest
+ # is not mixed with occasional logger output
+ log.handlers[0].setLevel(logging.CRITICAL)
# Run actual tests
- runner = BuildPerfTestRunner(out_dir)
- ret = runner.run_tests()
- if not ret:
- if args.globalres_file:
- runner.update_globalres_file(args.globalres_file)
-
- return ret
+ result = runner.run(suite)
+
+ # Restore logger output to stderr
+ log.handlers[0].setLevel(log.level)
+
+ if args.xml:
+ result.write_results_xml()
+ else:
+ result.write_results_json()
+ result.write_buildstats_json()
+ if args.globalres_file:
+ update_globalres_file(result, args.globalres_file, metadata)
+ if result.wasSuccessful():
+ return 0
+
+ return 2
if __name__ == '__main__':