summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/buildperf/base.py
diff options
context:
space:
mode:
Diffstat (limited to 'meta/lib/oeqa/buildperf/base.py')
-rw-r--r--meta/lib/oeqa/buildperf/base.py650
1 files changed, 412 insertions, 238 deletions
diff --git a/meta/lib/oeqa/buildperf/base.py b/meta/lib/oeqa/buildperf/base.py
index 527563bb0b..5d656c781a 100644
--- a/meta/lib/oeqa/buildperf/base.py
+++ b/meta/lib/oeqa/buildperf/base.py
@@ -1,32 +1,36 @@
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test base classes and functionality"""
-import glob
+import json
import logging
import os
import re
-import shutil
+import resource
import socket
-import tempfile
+import shutil
import time
-import traceback
+import unittest
+import xml.etree.ElementTree as ET
+from collections import OrderedDict
from datetime import datetime, timedelta
+from functools import partial
+from multiprocessing import Process
+from multiprocessing import SimpleQueue
+from xml.dom import minidom
-from oeqa.utils.commands import runCmd, get_bb_vars
+import oe.path
+from oeqa.utils.commands import CommandError, runCmd, get_bb_vars
from oeqa.utils.git import GitError, GitRepo
# Get logger for this module
log = logging.getLogger('build-perf')
+# Our own version of runCmd which does not raise AssertErrors which would cause
+# errors to interpreted as failures
+runCmd2 = partial(runCmd, assert_error=False, limit_exc_output=40)
+
class KernelDropCaches(object):
"""Container of the functions for dropping kernel caches"""
@@ -38,7 +42,7 @@ class KernelDropCaches(object):
from getpass import getpass
from locale import getdefaultlocale
cmd = ['sudo', '-k', '-n', 'tee', '/proc/sys/vm/drop_caches']
- ret = runCmd(cmd, ignore_status=True, data=b'0')
+ ret = runCmd2(cmd, ignore_status=True, data=b'0')
if ret.output.startswith('sudo:'):
pass_str = getpass(
"\nThe script requires sudo access to drop caches between "
@@ -58,273 +62,443 @@ class KernelDropCaches(object):
input_data = b''
cmd += ['tee', '/proc/sys/vm/drop_caches']
input_data += b'3'
- runCmd(cmd, data=input_data)
+ runCmd2(cmd, data=input_data)
+
+def str_to_fn(string):
+ """Convert string to a sanitized filename"""
+ return re.sub(r'(\W+)', '-', string, flags=re.LOCALE)
-def time_cmd(cmd, **kwargs):
- """TIme a command"""
- with tempfile.NamedTemporaryFile(mode='w+') as tmpf:
- timecmd = ['/usr/bin/time', '-v', '-o', tmpf.name]
- if isinstance(cmd, str):
- timecmd = ' '.join(timecmd) + ' '
- timecmd += cmd
- # TODO: 'ignore_status' could/should be removed when globalres.log is
- # deprecated. The function would just raise an exception, instead
- ret = runCmd(timecmd, ignore_status=True, **kwargs)
- timedata = tmpf.file.read()
- return ret, timedata
+class ResultsJsonEncoder(json.JSONEncoder):
+ """Extended encoder for build perf test results"""
+ unix_epoch = datetime.utcfromtimestamp(0)
-class BuildPerfTestRunner(object):
+ def default(self, obj):
+ """Encoder for our types"""
+ if isinstance(obj, datetime):
+ # NOTE: we assume that all timestamps are in UTC time
+ return (obj - self.unix_epoch).total_seconds()
+ if isinstance(obj, timedelta):
+ return obj.total_seconds()
+ return json.JSONEncoder.default(self, obj)
+
+
+class BuildPerfTestResult(unittest.TextTestResult):
"""Runner class for executing the individual tests"""
# List of test cases to run
test_run_queue = []
- def __init__(self, out_dir):
- self.results = {}
- self.out_dir = os.path.abspath(out_dir)
- if not os.path.exists(self.out_dir):
- os.makedirs(self.out_dir)
+ def __init__(self, out_dir, *args, **kwargs):
+ super(BuildPerfTestResult, self).__init__(*args, **kwargs)
- # Get Git parameters
- try:
- self.repo = GitRepo('.')
- except GitError:
- self.repo = None
- self.git_rev, self.git_branch = self.get_git_revision()
- log.info("Using Git branch:revision %s:%s", self.git_branch,
- self.git_rev)
-
- def get_git_revision(self):
- """Get git branch and revision under testing"""
- rev = os.getenv('OE_BUILDPERFTEST_GIT_REVISION')
- branch = os.getenv('OE_BUILDPERFTEST_GIT_BRANCH')
- if not self.repo and (not rev or not branch):
- log.info("The current working directory doesn't seem to be a Git "
- "repository clone. You can specify branch and revision "
- "used in test results with OE_BUILDPERFTEST_GIT_REVISION "
- "and OE_BUILDPERFTEST_GIT_BRANCH environment variables")
- else:
- if not rev:
- rev = self.repo.run_cmd(['rev-parse', 'HEAD'])
- if not branch:
- try:
- # Strip 11 chars, i.e. 'refs/heads' from the beginning
- branch = self.repo.run_cmd(['symbolic-ref', 'HEAD'])[11:]
- except GitError:
- log.debug('Currently on detached HEAD')
- branch = None
- return str(rev), str(branch)
-
- def run_tests(self):
- """Method that actually runs the tests"""
- self.results['schema_version'] = 1
- self.results['git_revision'] = self.git_rev
- self.results['git_branch'] = self.git_branch
- self.results['tester_host'] = socket.gethostname()
- start_time = datetime.utcnow()
- self.results['start_time'] = start_time
- self.results['tests'] = {}
-
- self.archive_build_conf()
- for test_class in self.test_run_queue:
- log.info("Executing test %s: %s", test_class.name,
- test_class.description)
-
- test = test_class(self.out_dir)
- try:
- test.run()
- except Exception:
- # Catch all exceptions. This way e.g buggy tests won't scrap
- # the whole test run
- sep = '-' * 5 + ' TRACEBACK ' + '-' * 60 + '\n'
- tb_msg = sep + traceback.format_exc() + sep
- log.error("Test execution failed with:\n" + tb_msg)
- self.results['tests'][test.name] = test.results
-
- self.results['elapsed_time'] = datetime.utcnow() - start_time
- return 0
-
- def archive_build_conf(self):
- """Archive build/conf to test results"""
- src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
- tgt_dir = os.path.join(self.out_dir, 'build', 'conf')
- os.makedirs(os.path.dirname(tgt_dir))
- shutil.copytree(src_dir, tgt_dir)
-
- def update_globalres_file(self, filename):
- """Write results to globalres csv file"""
- if self.repo:
- git_tag_rev = self.repo.run_cmd(['describe', self.git_rev])
- else:
- git_tag_rev = self.git_rev
- times = []
- sizes = []
- for test in self.results['tests'].values():
- for measurement in test['measurements']:
- res_type = measurement['type']
- values = measurement['values']
- if res_type == BuildPerfTest.SYSRES:
- e_sec = values['elapsed_time'].total_seconds()
- times.append('{:d}:{:02d}:{:.2f}'.format(
- int(e_sec / 3600),
- int((e_sec % 3600) / 60),
- e_sec % 60))
- elif res_type == BuildPerfTest.DISKUSAGE:
- sizes.append(str(values['size']))
+ self.out_dir = out_dir
+ self.hostname = socket.gethostname()
+ self.product = os.getenv('OE_BUILDPERFTEST_PRODUCT', 'oe-core')
+ self.start_time = self.elapsed_time = None
+ self.successes = []
+
+ def addSuccess(self, test):
+ """Record results from successful tests"""
+ super(BuildPerfTestResult, self).addSuccess(test)
+ self.successes.append(test)
+
+ def addError(self, test, err):
+ """Record results from crashed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addError(test, err)
+
+ def addFailure(self, test, err):
+ """Record results from failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addFailure(test, err)
+
+ def addExpectedFailure(self, test, err):
+ """Record results from expectedly failed test"""
+ test.err = err
+ super(BuildPerfTestResult, self).addExpectedFailure(test, err)
+
+ def startTest(self, test):
+ """Pre-test hook"""
+ test.base_dir = self.out_dir
+ log.info("Executing test %s: %s", test.name, test.shortDescription())
+ self.stream.write(datetime.now().strftime("[%Y-%m-%d %H:%M:%S] "))
+ super(BuildPerfTestResult, self).startTest(test)
+
+ def startTestRun(self):
+ """Pre-run hook"""
+ self.start_time = datetime.utcnow()
+
+ def stopTestRun(self):
+ """Pre-run hook"""
+ self.elapsed_time = datetime.utcnow() - self.start_time
+
+ def all_results(self):
+ compound = [('SUCCESS', t, None) for t in self.successes] + \
+ [('FAILURE', t, m) for t, m in self.failures] + \
+ [('ERROR', t, m) for t, m in self.errors] + \
+ [('EXPECTED_FAILURE', t, m) for t, m in self.expectedFailures] + \
+ [('UNEXPECTED_SUCCESS', t, None) for t in self.unexpectedSuccesses] + \
+ [('SKIPPED', t, m) for t, m in self.skipped]
+ return sorted(compound, key=lambda info: info[1].start_time)
+
+
+ def write_buildstats_json(self):
+ """Write buildstats file"""
+ buildstats = OrderedDict()
+ for _, test, _ in self.all_results():
+ for key, val in test.buildstats.items():
+ buildstats[test.name + '.' + key] = val
+ with open(os.path.join(self.out_dir, 'buildstats.json'), 'w') as fobj:
+ json.dump(buildstats, fobj, cls=ResultsJsonEncoder)
+
+
+ def write_results_json(self):
+ """Write test results into a json-formatted file"""
+ results = OrderedDict([('tester_host', self.hostname),
+ ('start_time', self.start_time),
+ ('elapsed_time', self.elapsed_time),
+ ('tests', OrderedDict())])
+
+ for status, test, reason in self.all_results():
+ test_result = OrderedDict([('name', test.name),
+ ('description', test.shortDescription()),
+ ('status', status),
+ ('start_time', test.start_time),
+ ('elapsed_time', test.elapsed_time),
+ ('measurements', test.measurements)])
+ if status in ('ERROR', 'FAILURE', 'EXPECTED_FAILURE'):
+ test_result['message'] = str(test.err[1])
+ test_result['err_type'] = test.err[0].__name__
+ test_result['err_output'] = reason
+ elif reason:
+ test_result['message'] = reason
+
+ results['tests'][test.name] = test_result
+
+ with open(os.path.join(self.out_dir, 'results.json'), 'w') as fobj:
+ json.dump(results, fobj, indent=4,
+ cls=ResultsJsonEncoder)
+
+ def write_results_xml(self):
+ """Write test results into a JUnit XML file"""
+ top = ET.Element('testsuites')
+ suite = ET.SubElement(top, 'testsuite')
+ suite.set('name', 'oeqa.buildperf')
+ suite.set('timestamp', self.start_time.isoformat())
+ suite.set('time', str(self.elapsed_time.total_seconds()))
+ suite.set('hostname', self.hostname)
+ suite.set('failures', str(len(self.failures) + len(self.expectedFailures)))
+ suite.set('errors', str(len(self.errors)))
+ suite.set('skipped', str(len(self.skipped)))
+
+ test_cnt = 0
+ for status, test, reason in self.all_results():
+ test_cnt += 1
+ testcase = ET.SubElement(suite, 'testcase')
+ testcase.set('classname', test.__module__ + '.' + test.__class__.__name__)
+ testcase.set('name', test.name)
+ testcase.set('description', test.shortDescription())
+ testcase.set('timestamp', test.start_time.isoformat())
+ testcase.set('time', str(test.elapsed_time.total_seconds()))
+ if status in ('ERROR', 'FAILURE', 'EXP_FAILURE'):
+ if status in ('FAILURE', 'EXP_FAILURE'):
+ result = ET.SubElement(testcase, 'failure')
else:
- log.warning("Unable to handle '%s' values in "
- "globalres.log", res_type)
-
- log.debug("Writing globalres log to %s", filename)
- with open(filename, 'a') as fobj:
- fobj.write('{},{}:{},{},'.format(self.results['tester_host'],
- self.results['git_branch'],
- self.results['git_revision'],
- git_tag_rev))
- fobj.write(','.join(times + sizes) + '\n')
+ result = ET.SubElement(testcase, 'error')
+ result.set('message', str(test.err[1]))
+ result.set('type', test.err[0].__name__)
+ result.text = reason
+ elif status == 'SKIPPED':
+ result = ET.SubElement(testcase, 'skipped')
+ result.text = reason
+ elif status not in ('SUCCESS', 'UNEXPECTED_SUCCESS'):
+ raise TypeError("BUG: invalid test status '%s'" % status)
+
+ for data in test.measurements.values():
+ measurement = ET.SubElement(testcase, data['type'])
+ measurement.set('name', data['name'])
+ measurement.set('legend', data['legend'])
+ vals = data['values']
+ if data['type'] == BuildPerfTestCase.SYSRES:
+ ET.SubElement(measurement, 'time',
+ timestamp=vals['start_time'].isoformat()).text = \
+ str(vals['elapsed_time'].total_seconds())
+ attrib = dict((k, str(v)) for k, v in vals['iostat'].items())
+ ET.SubElement(measurement, 'iostat', attrib=attrib)
+ attrib = dict((k, str(v)) for k, v in vals['rusage'].items())
+ ET.SubElement(measurement, 'rusage', attrib=attrib)
+ elif data['type'] == BuildPerfTestCase.DISKUSAGE:
+ ET.SubElement(measurement, 'size').text = str(vals['size'])
+ else:
+ raise TypeError('BUG: unsupported measurement type')
+ suite.set('tests', str(test_cnt))
-def perf_test_case(obj):
- """Decorator for adding test classes"""
- BuildPerfTestRunner.test_run_queue.append(obj)
- return obj
+ # Use minidom for pretty-printing
+ dom_doc = minidom.parseString(ET.tostring(top, 'utf-8'))
+ with open(os.path.join(self.out_dir, 'results.xml'), 'w') as fobj:
+ dom_doc.writexml(fobj, addindent=' ', newl='\n', encoding='utf-8')
-class BuildPerfTest(object):
+class BuildPerfTestCase(unittest.TestCase):
"""Base class for build performance tests"""
SYSRES = 'sysres'
DISKUSAGE = 'diskusage'
-
- name = None
- description = None
-
- def __init__(self, out_dir):
- self.out_dir = out_dir
- self.results = {'name':self.name,
- 'description': self.description,
- 'status': 'NOTRUN',
- 'start_time': None,
- 'elapsed_time': None,
- 'measurements': []}
- if not os.path.exists(self.out_dir):
- os.makedirs(self.out_dir)
- if not self.name:
- self.name = self.__class__.__name__
+ build_target = None
+
+ def __init__(self, *args, **kwargs):
+ super(BuildPerfTestCase, self).__init__(*args, **kwargs)
+ self.name = self._testMethodName
+ self.base_dir = None
+ self.start_time = None
+ self.elapsed_time = None
+ self.measurements = OrderedDict()
+ self.buildstats = OrderedDict()
+ # self.err is supposed to be a tuple from sys.exc_info()
+ self.err = None
self.bb_vars = get_bb_vars()
- # TODO: remove the _failed flag when globalres.log is ditched as all
- # failures should raise an exception
- self._failed = False
- self.cmd_log = os.path.join(self.out_dir, 'commands.log')
-
- def run(self):
+ # TODO: remove 'times' and 'sizes' arrays when globalres support is
+ # removed
+ self.times = []
+ self.sizes = []
+
+ @property
+ def tmp_dir(self):
+ return os.path.join(self.base_dir, self.name + '.tmp')
+
+ def shortDescription(self):
+ return super(BuildPerfTestCase, self).shortDescription() or ""
+
+ def setUp(self):
+ """Set-up fixture for each test"""
+ if not os.path.isdir(self.tmp_dir):
+ os.mkdir(self.tmp_dir)
+ if self.build_target:
+ self.run_cmd(['bitbake', self.build_target, '--runall=fetch'])
+
+ def tearDown(self):
+ """Tear-down fixture for each test"""
+ if os.path.isdir(self.tmp_dir):
+ shutil.rmtree(self.tmp_dir)
+
+ def run(self, *args, **kwargs):
"""Run test"""
- self.results['status'] = 'FAILED'
- self.results['start_time'] = datetime.now()
- self._run()
- self.results['elapsed_time'] = (datetime.now() -
- self.results['start_time'])
- # Test is regarded as completed if it doesn't raise an exception
- if not self._failed:
- self.results['status'] = 'COMPLETED'
-
- def _run(self):
- """Actual test payload"""
- raise NotImplementedError
-
- def log_cmd_output(self, cmd):
- """Run a command and log it's output"""
- with open(self.cmd_log, 'a') as fobj:
- runCmd(cmd, stdout=fobj)
-
- def measure_cmd_resources(self, cmd, name, legend):
+ self.start_time = datetime.now()
+ super(BuildPerfTestCase, self).run(*args, **kwargs)
+ self.elapsed_time = datetime.now() - self.start_time
+
+ def run_cmd(self, cmd):
+ """Convenience method for running a command"""
+ cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
+ log.info("Logging command: %s", cmd_str)
+ try:
+ runCmd2(cmd)
+ except CommandError as err:
+ log.error("Command failed: %s", err.retcode)
+ raise
+
+ def _append_measurement(self, measurement):
+ """Simple helper for adding measurements results"""
+ if measurement['name'] in self.measurements:
+ raise ValueError('BUG: two measurements with the same name in {}'.format(
+ self.__class__.__name__))
+ self.measurements[measurement['name']] = measurement
+
+ def measure_cmd_resources(self, cmd, name, legend, save_bs=False):
"""Measure system resource usage of a command"""
- def str_time_to_timedelta(strtime):
- """Convert time strig from the time utility to timedelta"""
- split = strtime.split(':')
- hours = int(split[0]) if len(split) > 2 else 0
- mins = int(split[-2])
- secs, frac = split[-1].split('.')
- secs = int(secs)
- microsecs = int(float('0.' + frac) * pow(10, 6))
- return timedelta(0, hours*3600 + mins*60 + secs, microsecs)
+ def _worker(data_q, cmd, **kwargs):
+ """Worker process for measuring resources"""
+ try:
+ start_time = datetime.now()
+ ret = runCmd2(cmd, **kwargs)
+ etime = datetime.now() - start_time
+ rusage_struct = resource.getrusage(resource.RUSAGE_CHILDREN)
+ iostat = OrderedDict()
+ with open('/proc/{}/io'.format(os.getpid())) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':')
+ iostat[key] = int(val)
+ rusage = OrderedDict()
+ # Skip unused fields, (i.e. 'ru_ixrss', 'ru_idrss', 'ru_isrss',
+ # 'ru_nswap', 'ru_msgsnd', 'ru_msgrcv' and 'ru_nsignals')
+ for key in ['ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
+ 'ru_majflt', 'ru_inblock', 'ru_oublock',
+ 'ru_nvcsw', 'ru_nivcsw']:
+ rusage[key] = getattr(rusage_struct, key)
+ data_q.put({'ret': ret,
+ 'start_time': start_time,
+ 'elapsed_time': etime,
+ 'rusage': rusage,
+ 'iostat': iostat})
+ except Exception as err:
+ data_q.put(err)
cmd_str = cmd if isinstance(cmd, str) else ' '.join(cmd)
log.info("Timing command: %s", cmd_str)
- with open(self.cmd_log, 'a') as fobj:
- ret, timedata = time_cmd(cmd, stdout=fobj)
- if ret.status:
- log.error("Time will be reported as 0. Command failed: %s",
- ret.status)
- etime = timedelta(0)
- self._failed = True
- else:
- match = re.search(r'.*wall clock.*: (?P<etime>.*)\n', timedata)
- etime = str_time_to_timedelta(match.group('etime'))
-
- measurement = {'type': self.SYSRES,
- 'name': name,
- 'legend': legend}
- measurement['values'] = {'elapsed_time': etime}
- self.results['measurements'].append(measurement)
- nlogs = len(glob.glob(self.out_dir + '/results.log*'))
- results_log = os.path.join(self.out_dir,
- 'results.log.{}'.format(nlogs + 1))
- with open(results_log, 'w') as fobj:
- fobj.write(timedata)
-
- def measure_disk_usage(self, path, name, legend):
+ data_q = SimpleQueue()
+ try:
+ proc = Process(target=_worker, args=(data_q, cmd,))
+ proc.start()
+ data = data_q.get()
+ proc.join()
+ if isinstance(data, Exception):
+ raise data
+ except CommandError:
+ log.error("Command '%s' failed", cmd_str)
+ raise
+ etime = data['elapsed_time']
+
+ measurement = OrderedDict([('type', self.SYSRES),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('start_time', data['start_time']),
+ ('elapsed_time', etime),
+ ('rusage', data['rusage']),
+ ('iostat', data['iostat'])])
+ if save_bs:
+ self.save_buildstats(name)
+
+ self._append_measurement(measurement)
+
+ # Append to 'times' array for globalres log
+ e_sec = etime.total_seconds()
+ self.times.append('{:d}:{:02d}:{:05.2f}'.format(int(e_sec / 3600),
+ int((e_sec % 3600) / 60),
+ e_sec % 60))
+
+ def measure_disk_usage(self, path, name, legend, apparent_size=False):
"""Estimate disk usage of a file or directory"""
- # TODO: 'ignore_status' could/should be removed when globalres.log is
- # deprecated. The function would just raise an exception, instead
- ret = runCmd(['du', '-s', path], ignore_status=True)
- if ret.status:
- log.error("du failed, disk usage will be reported as 0")
- size = 0
- self._failed = True
- else:
- size = int(ret.output.split()[0])
- log.debug("Size of %s path is %s", path, size)
- measurement = {'type': self.DISKUSAGE,
- 'name': name,
- 'legend': legend}
- measurement['values'] = {'size': size}
- self.results['measurements'].append(measurement)
-
- def save_buildstats(self):
+ cmd = ['du', '-s', '--block-size', '1024']
+ if apparent_size:
+ cmd.append('--apparent-size')
+ cmd.append(path)
+
+ ret = runCmd2(cmd)
+ size = int(ret.output.split()[0])
+ log.debug("Size of %s path is %s", path, size)
+ measurement = OrderedDict([('type', self.DISKUSAGE),
+ ('name', name),
+ ('legend', legend)])
+ measurement['values'] = OrderedDict([('size', size)])
+ self._append_measurement(measurement)
+ # Append to 'sizes' array for globalres log
+ self.sizes.append(str(size))
+
+ def save_buildstats(self, measurement_name):
"""Save buildstats"""
- shutil.move(self.bb_vars['BUILDSTATS_BASE'],
- os.path.join(self.out_dir, 'buildstats-' + self.name))
-
- @staticmethod
- def force_rm(path):
- """Equivalent of 'rm -rf'"""
- if os.path.isfile(path) or os.path.islink(path):
- os.unlink(path)
- elif os.path.isdir(path):
- shutil.rmtree(path)
+ def split_nevr(nevr):
+ """Split name and version information from recipe "nevr" string"""
+ n_e_v, revision = nevr.rsplit('-', 1)
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[0-9]\S*)$',
+ n_e_v)
+ if not match:
+ # If we're not able to parse a version starting with a number, just
+ # take the part after last dash
+ match = re.match(r'^(?P<name>\S+)-((?P<epoch>[0-9]{1,5})_)?(?P<version>[^-]+)$',
+ n_e_v)
+ name = match.group('name')
+ version = match.group('version')
+ epoch = match.group('epoch')
+ return name, epoch, version, revision
+
+ def bs_to_json(filename):
+ """Convert (task) buildstats file into json format"""
+ bs_json = OrderedDict()
+ iostat = OrderedDict()
+ rusage = OrderedDict()
+ with open(filename) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Started':
+ start_time = datetime.utcfromtimestamp(float(val))
+ bs_json['start_time'] = start_time
+ elif key == 'Ended':
+ end_time = datetime.utcfromtimestamp(float(val))
+ elif key.startswith('IO '):
+ split = key.split()
+ iostat[split[1]] = int(val)
+ elif key.find('rusage') >= 0:
+ split = key.split()
+ ru_key = split[-1]
+ if ru_key in ('ru_stime', 'ru_utime'):
+ val = float(val)
+ else:
+ val = int(val)
+ rusage[ru_key] = rusage.get(ru_key, 0) + val
+ elif key == 'Status':
+ bs_json['status'] = val
+ bs_json['elapsed_time'] = end_time - start_time
+ bs_json['rusage'] = rusage
+ bs_json['iostat'] = iostat
+ return bs_json
+
+ log.info('Saving buildstats in JSON format')
+ bs_dirs = sorted(os.listdir(self.bb_vars['BUILDSTATS_BASE']))
+ if len(bs_dirs) > 1:
+ log.warning("Multiple buildstats found for test %s, only "
+ "archiving the last one", self.name)
+ bs_dir = os.path.join(self.bb_vars['BUILDSTATS_BASE'], bs_dirs[-1])
+
+ buildstats = []
+ for fname in os.listdir(bs_dir):
+ recipe_dir = os.path.join(bs_dir, fname)
+ if not os.path.isdir(recipe_dir) or fname == "reduced_proc_pressure":
+ continue
+ name, epoch, version, revision = split_nevr(fname)
+ recipe_bs = OrderedDict((('name', name),
+ ('epoch', epoch),
+ ('version', version),
+ ('revision', revision),
+ ('tasks', OrderedDict())))
+ for task in os.listdir(recipe_dir):
+ recipe_bs['tasks'][task] = bs_to_json(os.path.join(recipe_dir,
+ task))
+ buildstats.append(recipe_bs)
+
+ self.buildstats[measurement_name] = buildstats
def rm_tmp(self):
"""Cleanup temporary/intermediate files and directories"""
log.debug("Removing temporary and cache files")
- for name in ['bitbake.lock', 'conf/sanity_info',
+ for name in ['bitbake.lock', 'cache/sanity_info',
self.bb_vars['TMPDIR']]:
- self.force_rm(name)
+ oe.path.remove(name, recurse=True)
def rm_sstate(self):
"""Remove sstate directory"""
log.debug("Removing sstate-cache")
- self.force_rm(self.bb_vars['SSTATE_DIR'])
+ oe.path.remove(self.bb_vars['SSTATE_DIR'], recurse=True)
def rm_cache(self):
"""Drop bitbake caches"""
- self.force_rm(self.bb_vars['PERSISTENT_DIR'])
+ oe.path.remove(self.bb_vars['PERSISTENT_DIR'], recurse=True)
@staticmethod
def sync():
"""Sync and drop kernel caches"""
+ runCmd2('bitbake -m', ignore_status=True)
log.debug("Syncing and dropping kernel caches""")
KernelDropCaches.drop()
os.sync()
# Wait a bit for all the dirty blocks to be written onto disk
time.sleep(3)
+
+
+class BuildPerfTestLoader(unittest.TestLoader):
+ """Test loader for build performance tests"""
+ sortTestMethodsUsing = None
+
+
+class BuildPerfTestRunner(unittest.TextTestRunner):
+ """Test loader for build performance tests"""
+ sortTestMethodsUsing = None
+
+ def __init__(self, out_dir, *args, **kwargs):
+ super(BuildPerfTestRunner, self).__init__(*args, **kwargs)
+ self.out_dir = out_dir
+
+ def _makeResult(self):
+ return BuildPerfTestResult(self.out_dir, self.stream, self.descriptions,
+ self.verbosity)