diff options
Diffstat (limited to 'scripts')
44 files changed, 609 insertions, 334 deletions
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged index 3095dafa46..6f4b268119 100755 --- a/scripts/bitbake-whatchanged +++ b/scripts/bitbake-whatchanged @@ -217,7 +217,7 @@ print what will be done between the current and last builds, for example: # Edit the recipes $ bitbake-whatchanged core-image-sato -The changes will be printed" +The changes will be printed. Note: The amount of tasks is not accurate when the task is "do_build" since diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff index 833f7c33a5..02eedafd6e 100755 --- a/scripts/buildhistory-diff +++ b/scripts/buildhistory-diff @@ -11,7 +11,6 @@ import sys import os import argparse -from distutils.version import LooseVersion # Ensure PythonGit is installed (buildhistory_analysis needs it) try: @@ -71,10 +70,6 @@ def main(): parser = get_args_parser() args = parser.parse_args() - if LooseVersion(git.__version__) < '0.3.1': - sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n") - sys.exit(1) - if len(args.revisions) > 2: sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:])) parser.print_help() diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh index fa71d4a2e9..0a85e6e708 100755 --- a/scripts/contrib/build-perf-test-wrapper.sh +++ b/scripts/contrib/build-perf-test-wrapper.sh @@ -87,21 +87,10 @@ if [ $# -ne 0 ]; then exit 1 fi -if [ -n "$email_to" ]; then - if ! [ -x "$(command -v phantomjs)" ]; then - echo "ERROR: Sending email needs phantomjs." - exit 1 - fi - if ! [ -x "$(command -v optipng)" ]; then - echo "ERROR: Sending email needs optipng." - exit 1 - fi -fi - # Open a file descriptor for flock and acquire lock LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock" if ! exec 3> "$LOCK_FILE"; then - echo "ERROR: Unable to open lock file" + echo "ERROR: Unable to open loemack file" exit 1 fi if ! flock -n 3; then @@ -226,7 +215,7 @@ if [ -n "$results_repo" ]; then if [ -n "$email_to" ]; then echo "Emailing test report" os_name=`get_os_release_var PRETTY_NAME` - "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}" + "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}" fi # Upload report files, unless we're on detached head diff --git a/scripts/contrib/convert-srcuri.py b/scripts/contrib/convert-srcuri.py new file mode 100755 index 0000000000..5b362ea2e8 --- /dev/null +++ b/scripts/contrib/convert-srcuri.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Conversion script to update SRC_URI to add branch to git urls +# +# Copyright (C) 2021 Richard Purdie +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import os +import sys +import tempfile +import shutil +import mimetypes + +if len(sys.argv) < 2: + print("Please specify a directory to run the conversion script against.") + sys.exit(1) + +def processfile(fn): + def matchline(line): + if "MIRROR" in line or ".*" in line or "GNOME_GIT" in line: + return False + return True + print("processing file '%s'" % fn) + try: + if "distro_alias.inc" in fn or "linux-yocto-custom.bb" in fn: + return + fh, abs_path = tempfile.mkstemp() + modified = False + with os.fdopen(fh, 'w') as new_file: + with open(fn, "r") as old_file: + for line in old_file: + if ("git://" in line or "gitsm://" in line) and "branch=" not in line and matchline(line): + if line.endswith('"\n'): + line = line.replace('"\n', ';branch=master"\n') + elif line.endswith(" \\\n"): + line = line.replace(' \\\n', ';branch=master \\\n') + modified = True + if ("git://" in line or "gitsm://" in line) and "github.com" in line and "protocol=https" not in line and matchline(line): + if "protocol=git" in line: + line = line.replace('protocol=git', 'protocol=https') + elif line.endswith('"\n'): + line = line.replace('"\n', ';protocol=https"\n') + elif line.endswith(" \\\n"): + line = line.replace(' \\\n', ';protocol=https \\\n') + modified = True + new_file.write(line) + if modified: + shutil.copymode(fn, abs_path) + os.remove(fn) + shutil.move(abs_path, fn) + except UnicodeDecodeError: + pass + +ourname = os.path.basename(sys.argv[0]) +ourversion = "0.1" + +if os.path.isfile(sys.argv[1]): + processfile(sys.argv[1]) + sys.exit(0) + +for targetdir in sys.argv[1:]: + print("processing directory '%s'" % targetdir) + for root, dirs, files in os.walk(targetdir): + for name in files: + if name == ourname: + continue + fn = os.path.join(root, name) + if os.path.islink(fn): + continue + if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"): + continue + processfile(fn) + +print("All files processed with version %s" % ourversion) diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh index 1191f57a8e..f436f9bae0 100755 --- a/scripts/contrib/documentation-audit.sh +++ b/scripts/contrib/documentation-audit.sh @@ -27,7 +27,7 @@ fi echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results" echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or " -echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\"" +echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"commercial\"" for pkg in `bitbake -s | awk '{ print \$1 }'`; do if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" || diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py index de3862c897..7192113c28 100755 --- a/scripts/contrib/oe-build-perf-report-email.py +++ b/scripts/contrib/oe-build-perf-report-email.py @@ -19,8 +19,6 @@ import socket import subprocess import sys import tempfile -from email.mime.image import MIMEImage -from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText @@ -29,30 +27,6 @@ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s") log = logging.getLogger('oe-build-perf-report') -# Find js scaper script -SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf', - 'scrape-html-report.js') -if not os.path.isfile(SCRAPE_JS): - log.error("Unableto find oe-build-perf-report-scrape.js") - sys.exit(1) - - -class ReportError(Exception): - """Local errors""" - pass - - -def check_utils(): - """Check that all needed utils are installed in the system""" - missing = [] - for cmd in ('phantomjs', 'optipng'): - if not shutil.which(cmd): - missing.append(cmd) - if missing: - log.error("The following tools are missing: %s", ' '.join(missing)) - sys.exit(1) - - def parse_args(argv): """Parse command line arguments""" description = """Email build perf test report""" @@ -77,137 +51,19 @@ def parse_args(argv): "the email parts") parser.add_argument('--text', help="Plain text message") - parser.add_argument('--html', - help="HTML peport generated by oe-build-perf-report") - parser.add_argument('--phantomjs-args', action='append', - help="Extra command line arguments passed to PhantomJS") args = parser.parse_args(argv) - if not args.html and not args.text: - parser.error("Please specify --html and/or --text") + if not args.text: + parser.error("Please specify --text") return args -def decode_png(infile, outfile): - """Parse/decode/optimize png data from a html element""" - with open(infile) as f: - raw_data = f.read() - - # Grab raw base64 data - b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1) - b64_data = re.sub('">.+$', '', b64_data, 1) - - # Replace file with proper decoded png - with open(outfile, 'wb') as f: - f.write(base64.b64decode(b64_data)) - - subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT) - - -def mangle_html_report(infile, outfile, pngs): - """Mangle html file into a email compatible format""" - paste = True - png_dir = os.path.dirname(outfile) - with open(infile) as f_in: - with open(outfile, 'w') as f_out: - for line in f_in.readlines(): - stripped = line.strip() - # Strip out scripts - if stripped == '<!--START-OF-SCRIPTS-->': - paste = False - elif stripped == '<!--END-OF-SCRIPTS-->': - paste = True - elif paste: - if re.match('^.+href="data:image/png;base64', stripped): - # Strip out encoded pngs (as they're huge in size) - continue - elif 'www.gstatic.com' in stripped: - # HACK: drop references to external static pages - continue - - # Replace charts with <img> elements - match = re.match('<div id="(?P<id>\w+)"', stripped) - if match and match.group('id') in pngs: - f_out.write('<img src="cid:{}"\n'.format(match.group('id'))) - else: - f_out.write(line) - - -def scrape_html_report(report, outdir, phantomjs_extra_args=None): - """Scrape html report into a format sendable by email""" - tmpdir = tempfile.mkdtemp(dir='.') - log.debug("Using tmpdir %s for phantomjs output", tmpdir) - - if not os.path.isdir(outdir): - os.mkdir(outdir) - if os.path.splitext(report)[1] not in ('.html', '.htm'): - raise ReportError("Invalid file extension for report, needs to be " - "'.html' or '.htm'") - - try: - log.info("Scraping HTML report with PhangomJS") - extra_args = phantomjs_extra_args if phantomjs_extra_args else [] - subprocess.check_output(['phantomjs', '--debug=true'] + extra_args + - [SCRAPE_JS, report, tmpdir], - stderr=subprocess.STDOUT) - - pngs = [] - images = [] - for fname in os.listdir(tmpdir): - base, ext = os.path.splitext(fname) - if ext == '.png': - log.debug("Decoding %s", fname) - decode_png(os.path.join(tmpdir, fname), - os.path.join(outdir, fname)) - pngs.append(base) - images.append(fname) - elif ext in ('.html', '.htm'): - report_file = fname - else: - log.warning("Unknown file extension: '%s'", ext) - #shutil.move(os.path.join(tmpdir, fname), outdir) - - log.debug("Mangling html report file %s", report_file) - mangle_html_report(os.path.join(tmpdir, report_file), - os.path.join(outdir, report_file), pngs) - return (os.path.join(outdir, report_file), - [os.path.join(outdir, i) for i in images]) - finally: - shutil.rmtree(tmpdir) - -def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[], - blind_copy=[]): - """Send email""" +def send_email(text_fn, subject, recipients, copy=[], blind_copy=[]): # Generate email message - text_msg = html_msg = None - if text_fn: - with open(text_fn) as f: - text_msg = MIMEText("Yocto build performance test report.\n" + - f.read(), 'plain') - if html_fn: - html_msg = msg = MIMEMultipart('related') - with open(html_fn) as f: - html_msg.attach(MIMEText(f.read(), 'html')) - for img_fn in image_fns: - # Expect that content id is same as the filename - cid = os.path.splitext(os.path.basename(img_fn))[0] - with open(img_fn, 'rb') as f: - image_msg = MIMEImage(f.read()) - image_msg['Content-ID'] = '<{}>'.format(cid) - html_msg.attach(image_msg) - - if text_msg and html_msg: - msg = MIMEMultipart('alternative') - msg.attach(text_msg) - msg.attach(html_msg) - elif text_msg: - msg = text_msg - elif html_msg: - msg = html_msg - else: - raise ReportError("Neither plain text nor html body specified") + with open(text_fn) as f: + msg = MIMEText("Yocto build performance test report.\n" + f.read(), 'plain') pw_data = pwd.getpwuid(os.getuid()) full_name = pw_data.pw_gecos.split(',')[0] @@ -234,8 +90,6 @@ def main(argv=None): if args.debug: log.setLevel(logging.DEBUG) - check_utils() - if args.outdir: outdir = args.outdir if not os.path.exists(outdir): @@ -245,25 +99,16 @@ def main(argv=None): try: log.debug("Storing email parts in %s", outdir) - html_report = images = None - if args.html: - html_report, images = scrape_html_report(args.html, outdir, - args.phantomjs_args) - if args.to: log.info("Sending email to %s", ', '.join(args.to)) if args.cc: log.info("Copying to %s", ', '.join(args.cc)) if args.bcc: log.info("Blind copying to %s", ', '.join(args.bcc)) - send_email(args.text, html_report, images, args.subject, - args.to, args.cc, args.bcc) + send_email(args.text, args.subject, args.to, args.cc, args.bcc) except subprocess.CalledProcessError as err: log.error("%s, with output:\n%s", str(err), err.output.decode()) return 1 - except ReportError as err: - log.error(err) - return 1 finally: if not args.outdir: log.debug("Wiping %s", outdir) diff --git a/scripts/create-pull-request b/scripts/create-pull-request index 8eefcf63a5..2f91a355b0 100755 --- a/scripts/create-pull-request +++ b/scripts/create-pull-request @@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://" GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)" REMOTE_URL=${REMOTE_URL%.git} REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#") -REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#") +REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#") if [ -z "$BRANCH" ]; then BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2) diff --git a/scripts/git b/scripts/git new file mode 100755 index 0000000000..644055e540 --- /dev/null +++ b/scripts/git @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# Wrapper around 'git' that doesn't think we are root + +import os +import shutil +import sys + +os.environ['PSEUDO_UNLOAD'] = '1' + +# calculate path to the real 'git' +path = os.environ['PATH'] +# we need to remove our path but also any other copy of this script which +# may be present, e.g. eSDK. +replacements = [os.path.dirname(sys.argv[0])] +for p in path.split(":"): + if p.endswith("/scripts"): + replacements.append(p) +for r in replacements: + path = path.replace(r, '/ignoreme') +real_git = shutil.which('git', path=path) + +if len(sys.argv) == 1: + os.execl(real_git, 'git') + +os.execv(real_git, sys.argv) diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py index c69b5bf4d7..3b76286ba5 100644 --- a/scripts/lib/buildstats.py +++ b/scripts/lib/buildstats.py @@ -8,7 +8,7 @@ import json import logging import os import re -from collections import namedtuple,OrderedDict +from collections import namedtuple from statistics import mean @@ -238,7 +238,7 @@ class BuildStats(dict): subdirs = os.listdir(path) for dirname in subdirs: recipe_dir = os.path.join(path, dirname) - if not os.path.isdir(recipe_dir): + if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir): continue name, epoch, version, revision = cls.split_nevr(dirname) bsrecipe = BSRecipe(name, epoch, version, revision) diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py index fe545607bb..e69a10f452 100644 --- a/scripts/lib/checklayer/__init__.py +++ b/scripts/lib/checklayer/__init__.py @@ -146,7 +146,7 @@ def detect_layers(layer_directories, no_auto): return layers -def _find_layer_depends(depend, layers): +def _find_layer(depend, layers): for layer in layers: if 'collections' not in layer: continue @@ -156,7 +156,7 @@ def _find_layer_depends(depend, layers): return layer return None -def add_layer_dependencies(bblayersconf, layer, layers, logger): +def get_layer_dependencies(layer, layers, logger): def recurse_dependencies(depends, layer, layers, logger, ret = []): logger.debug('Processing dependencies %s for layer %s.' % \ (depends, layer['name'])) @@ -166,7 +166,7 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger): if depend == 'core': continue - layer_depend = _find_layer_depends(depend, layers) + layer_depend = _find_layer(depend, layers) if not layer_depend: logger.error('Layer %s depends on %s and isn\'t found.' % \ (layer['name'], depend)) @@ -203,6 +203,11 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger): layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends) # Note: [] (empty) is allowed, None is not! + return layer_depends + +def add_layer_dependencies(bblayersconf, layer, layers, logger): + + layer_depends = get_layer_dependencies(layer, layers, logger) if layer_depends is None: return False else: diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py index b82304e361..4495f71b24 100644 --- a/scripts/lib/checklayer/cases/common.py +++ b/scripts/lib/checklayer/cases/common.py @@ -14,7 +14,7 @@ class CommonCheckLayer(OECheckLayerTestCase): # The top-level README file may have a suffix (like README.rst or README.txt). readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*')) self.assertTrue(len(readme_files) > 0, - msg="Layer doesn't contains README file.") + msg="Layer doesn't contain a README file.") # There might be more than one file matching the file pattern above # (for example, README.rst and README-COPYING.rst). The one with the shortest diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py index aaa25dda08..b4f9fbfe45 100644 --- a/scripts/lib/devtool/deploy.py +++ b/scripts/lib/devtool/deploy.py @@ -168,9 +168,9 @@ def deploy(args, config, basepath, workspace): if args.strip and not args.dry_run: # Fakeroot copy to new destination srcdir = recipe_outdir - recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped') + recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'devtool-deploy-target-stripped') if os.path.isdir(recipe_outdir): - bb.utils.remove(recipe_outdir, True) + exec_fakeroot(rd, "rm -rf %s" % recipe_outdir, shell=True) exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True) os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or '']) oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'), @@ -201,9 +201,9 @@ def deploy(args, config, basepath, workspace): print(' %s' % item) return 0 - extraoptions = '' + extraoptions = '-o HostKeyAlgorithms=+ssh-rsa' if args.no_host_check: - extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' if not args.show_status: extraoptions += ' -q' @@ -274,9 +274,9 @@ def undeploy(args, config, basepath, workspace): elif not args.recipename and not args.all: raise argparse_oe.ArgumentUsageError('If you don\'t specify a recipe, you must specify -a/--all', 'undeploy-target') - extraoptions = '' + extraoptions = '-o HostKeyAlgorithms=+ssh-rsa' if args.no_host_check: - extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' + extraoptions += ' -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' if not args.show_status: extraoptions += ' -q' diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py index 95384c5333..ff9227035d 100644 --- a/scripts/lib/devtool/menuconfig.py +++ b/scripts/lib/devtool/menuconfig.py @@ -43,7 +43,7 @@ def menuconfig(args, config, basepath, workspace): return 1 check_workspace_recipe(workspace, args.component) - pn = rd.getVar('PN', True) + pn = rd.getVar('PN') if not rd.getVarFlag('do_menuconfig','task'): raise DevtoolError("This recipe does not support menuconfig option") diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py index d140b97de1..cfa88616af 100644 --- a/scripts/lib/devtool/standard.py +++ b/scripts/lib/devtool/standard.py @@ -357,7 +357,7 @@ def _move_file(src, dst, dry_run_outdir=None, base_outdir=None): bb.utils.mkdirhier(dst_d) shutil.move(src, dst) -def _copy_file(src, dst, dry_run_outdir=None): +def _copy_file(src, dst, dry_run_outdir=None, base_outdir=None): """Copy a file. Creates all the directory components of destination path.""" dry_run_suffix = ' (dry-run)' if dry_run_outdir else '' logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix)) @@ -474,7 +474,11 @@ def symlink_oelocal_files_srctree(rd,srctree): destpth = os.path.join(srctree, relpth, fn) if os.path.exists(destpth): os.unlink(destpth) - os.symlink('oe-local-files/%s' % fn, destpth) + if relpth != '.': + back_relpth = os.path.relpath(local_files_dir, root) + os.symlink('%s/oe-local-files/%s/%s' % (back_relpth, relpth, fn), destpth) + else: + os.symlink('oe-local-files/%s' % fn, destpth) addfiles.append(os.path.join(relpth, fn)) if addfiles: bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree) @@ -531,7 +535,6 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works initial_rev = None - appendexisted = False recipefile = d.getVar('FILE') appendfile = recipe_to_append(recipefile, config) is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d) @@ -590,6 +593,16 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works else: task = 'do_patch' + if 'noexec' in (d.getVarFlags(task, False) or []) or 'task' not in (d.getVarFlags(task, False) or []): + logger.info('The %s recipe has %s disabled. Running only ' + 'do_configure task dependencies' % (pn, task)) + + if 'depends' in d.getVarFlags('do_configure', False): + pn = d.getVarFlags('do_configure', False)['depends'] + pn = pn.replace('${PV}', d.getVar('PV')) + pn = pn.replace('${COMPILERDEP}', d.getVar('COMPILERDEP')) + task = None + # Run the fetch + unpack tasks res = tinfoil.build_targets(pn, task, @@ -601,6 +614,17 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works if not res: raise DevtoolError('Extracting source for %s failed' % pn) + if not is_kernel_yocto and ('noexec' in (d.getVarFlags('do_patch', False) or []) or 'task' not in (d.getVarFlags('do_patch', False) or [])): + workshareddir = d.getVar('S') + if os.path.islink(srctree): + os.unlink(srctree) + + os.symlink(workshareddir, srctree) + + # The initial_rev file is created in devtool_post_unpack function that will not be executed if + # do_unpack/do_patch tasks are disabled so we have to directly say that source extraction was successful + return True, True + try: with open(os.path.join(tempdir, 'initial_rev'), 'r') as f: initial_rev = f.read() @@ -848,10 +872,11 @@ def modify(args, config, basepath, workspace): if not initial_rev: return 1 logger.info('Source tree extracted to %s' % srctree) - # Get list of commits since this revision - (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree) - commits = stdout.split() - check_commits = True + if os.path.exists(os.path.join(srctree, '.git')): + # Get list of commits since this revision + (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree) + commits = stdout.split() + check_commits = True else: if os.path.exists(os.path.join(srctree, '.git')): # Check if it's a tree previously extracted by us. This is done @@ -928,12 +953,17 @@ def modify(args, config, basepath, workspace): if bb.data.inherits_class('kernel', rd): f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout ' - 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n') + 'do_fetch do_unpack do_kernel_configcheck"\n') f.write('\ndo_patch[noexec] = "1"\n') f.write('\ndo_configure_append() {\n' ' cp ${B}/.config ${S}/.config.baseline\n' ' ln -sfT ${B}/.config ${S}/.config.new\n' '}\n') + f.write('\ndo_kernel_configme_prepend() {\n' + ' if [ -e ${S}/.config ]; then\n' + ' mv ${S}/.config ${S}/.config.old\n' + ' fi\n' + '}\n') if rd.getVarFlag('do_menuconfig','task'): f.write('\ndo_configure_append() {\n' ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n' diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py index 566c75369a..a2c6d052a6 100644 --- a/scripts/lib/recipetool/create.py +++ b/scripts/lib/recipetool/create.py @@ -435,7 +435,7 @@ def create_recipe(args): if args.binary: # Assume the archive contains the directory structure verbatim # so we need to extract to a subdirectory - fetchuri += ';subdir=${BP}' + fetchuri += ';subdir=${BPN}' srcuri = fetchuri rev_re = re.compile(';rev=([^;]+)') res = rev_re.search(srcuri) @@ -478,6 +478,9 @@ def create_recipe(args): storeTagName = params['tag'] params['nobranch'] = '1' del params['tag'] + # Assume 'master' branch if not set + if scheme in ['git', 'gitsm'] and 'branch' not in params and 'nobranch' not in params: + params['branch'] = 'master' fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params)) tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR') @@ -527,10 +530,9 @@ def create_recipe(args): # Remove HEAD reference point and drop remote prefix get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')] if 'master' in get_branch: - # If it is master, we do not need to append 'branch=master' as this is default. # Even with the case where get_branch has multiple objects, if 'master' is one # of them, we should default take from 'master' - srcbranch = '' + srcbranch = 'master' elif len(get_branch) == 1: # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch' srcbranch = get_branch[0] @@ -543,8 +545,8 @@ def create_recipe(args): # Since we might have a value in srcbranch, we need to # recontruct the srcuri to include 'branch' in params. scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri) - if srcbranch: - params['branch'] = srcbranch + if scheme in ['git', 'gitsm']: + params['branch'] = srcbranch or 'master' if storeTagName and scheme in ['git', 'gitsm']: # Check srcrev using tag and check validity of the tag @@ -603,7 +605,7 @@ def create_recipe(args): splitline = line.split() if len(splitline) > 1: if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]): - srcuri = reformat_git_uri(splitline[1]) + srcuri = reformat_git_uri(splitline[1]) + ';branch=master' srcsubdir = 'git' break @@ -743,6 +745,10 @@ def create_recipe(args): for handler in handlers: handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues) + # native and nativesdk classes are special and must be inherited last + # If present, put them at the end of the classes list + classes.sort(key=lambda c: c in ("native", "nativesdk")) + extrafiles = extravalues.pop('extrafiles', {}) extra_pn = extravalues.pop('PN', None) extra_pv = extravalues.pop('PV', None) diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index f0ca50ebe2..a349510ab8 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py @@ -176,7 +176,10 @@ class ResultsTextReport(object): vals['sort'] = line['testseries'] + "_" + line['result_id'] vals['failed_testcases'] = line['failed_testcases'] for k in cols: - vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + if total_tested: + vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + else: + vals[k] = "0 (0%)" for k in maxlen: if k in vals and len(vals[k]) > maxlen[k]: maxlen[k] = len(vals[k]) diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py index 8917022d36..c5521d81bd 100644 --- a/scripts/lib/resulttool/resultutils.py +++ b/scripts/lib/resulttool/resultutils.py @@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv testseries = posixpath.basename(posixpath.dirname(url.path)) else: with open(f, "r") as filedata: - data = json.load(filedata) + try: + data = json.load(filedata) + except json.decoder.JSONDecodeError: + print("Cannot decode {}. Possible corruption. Skipping.".format(f)) + data = "" testseries = os.path.basename(os.path.dirname(f)) else: data = f @@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section): return decode_log(ptest['log']) def ptestresult_get_log(results, section): - return generic_get_log('ptestresuls.sections', results, section) + return generic_get_log('ptestresult.sections', results, section) def generic_get_rawlogs(sectname, results): if sectname not in results: diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py index f92255d8dc..47a08194d0 100644 --- a/scripts/lib/scriptutils.py +++ b/scripts/lib/scriptutils.py @@ -18,7 +18,8 @@ import sys import tempfile import threading import importlib -from importlib import machinery +import importlib.machinery +import importlib.util class KeepAliveStreamHandler(logging.StreamHandler): def __init__(self, keepalive=True, **kwargs): @@ -82,7 +83,9 @@ def load_plugins(logger, plugins, pluginpath): logger.debug('Loading plugin %s' % name) spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] ) if spec: - return spec.loader.load_module() + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod def plugin_name(filename): return os.path.splitext(os.path.basename(filename))[0] @@ -215,7 +218,8 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE'] for pathvar in pathvars: path = rd.getVar(pathvar) - shutil.rmtree(path) + if os.path.exists(path): + shutil.rmtree(path) finally: if fetchrecipe: try: diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py index 9ff4394757..7dbde85696 100644 --- a/scripts/lib/wic/engine.py +++ b/scripts/lib/wic/engine.py @@ -19,10 +19,10 @@ import os import tempfile import json import subprocess +import shutil import re from collections import namedtuple, OrderedDict -from distutils.spawn import find_executable from wic import WicError from wic.filemap import sparse_copy @@ -245,7 +245,7 @@ class Disk: for path in pathlist.split(':'): self.paths = "%s%s:%s" % (native_sysroot, path, self.paths) - self.parted = find_executable("parted", self.paths) + self.parted = shutil.which("parted", path=self.paths) if not self.parted: raise WicError("Can't find executable parted") @@ -283,7 +283,7 @@ class Disk: "resize2fs", "mkswap", "mkdosfs", "debugfs"): aname = "_%s" % name if aname not in self.__dict__: - setattr(self, aname, find_executable(name, self.paths)) + setattr(self, aname, shutil.which(name, path=self.paths)) if aname not in self.__dict__ or self.__dict__[aname] is None: raise WicError("Can't find executable '{}'".format(name)) return self.__dict__[aname] diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py index 1e3d06a87b..fcace95ff4 100644 --- a/scripts/lib/wic/help.py +++ b/scripts/lib/wic/help.py @@ -840,8 +840,8 @@ DESCRIPTION meanings. The commands are based on the Fedora kickstart documentation but with modifications to reflect wic capabilities. - http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition - http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader + https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#part-or-partition + https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#bootloader Commands @@ -980,6 +980,12 @@ DESCRIPTION copies. This option only has an effect with the rootfs source plugin. + --change-directory: This option is specific to wic. It changes to the + given directory before copying the files. This + option is useful when we want to split a rootfs in + multiple partitions and we want to keep the right + permissions and usernames in all the partitions. + --extra-space: This option is specific to wic. It adds extra space after the space filled by the content of the partition. The final size can go diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py index 76cc55b848..452a160232 100644 --- a/scripts/lib/wic/ksparser.py +++ b/scripts/lib/wic/ksparser.py @@ -152,6 +152,7 @@ class KickStart(): part.add_argument('--offset', type=sizetype("K", True)) part.add_argument('--exclude-path', nargs='+') part.add_argument('--include-path', nargs='+') + part.add_argument('--change-directory') part.add_argument("--extra-space", type=sizetype("M")) part.add_argument('--fsoptions', dest='fsopts') part.add_argument('--fstype', default='vfat', @@ -228,6 +229,23 @@ class KickStart(): err = "%s:%d: SquashFS does not support LABEL" \ % (confpath, lineno) raise KickStartError(err) + if parsed.fstype == 'msdos' or parsed.fstype == 'vfat': + if parsed.fsuuid: + if parsed.fsuuid.upper().startswith('0X'): + if len(parsed.fsuuid) > 10: + err = "%s:%d: fsuuid %s given in wks kickstart file " \ + "exceeds the length limit for %s filesystem. " \ + "It should be in the form of a 32 bit hexadecimal" \ + "number (for example, 0xABCD1234)." \ + % (confpath, lineno, parsed.fsuuid, parsed.fstype) + raise KickStartError(err) + elif len(parsed.fsuuid) > 8: + err = "%s:%d: fsuuid %s given in wks kickstart file " \ + "exceeds the length limit for %s filesystem. " \ + "It should be in the form of a 32 bit hexadecimal" \ + "number (for example, 0xABCD1234)." \ + % (confpath, lineno, parsed.fsuuid, parsed.fstype) + raise KickStartError(err) if parsed.use_label and not parsed.label: err = "%s:%d: Must set the label with --label" \ % (confpath, lineno) diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py index fe4abe8115..3e11822996 100644 --- a/scripts/lib/wic/misc.py +++ b/scripts/lib/wic/misc.py @@ -16,9 +16,9 @@ import logging import os import re import subprocess +import shutil from collections import defaultdict -from distutils import spawn from wic import WicError @@ -26,6 +26,7 @@ logger = logging.getLogger('wic') # executable -> recipe pairs for exec_native_cmd NATIVE_RECIPES = {"bmaptool": "bmap-tools", + "dumpe2fs": "e2fsprogs", "grub-mkimage": "grub-efi", "isohybrid": "syslinux", "mcopy": "mtools", @@ -45,7 +46,8 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools", "parted": "parted", "sfdisk": "util-linux", "sgdisk": "gptfdisk", - "syslinux": "syslinux" + "syslinux": "syslinux", + "tar": "tar" } def runtool(cmdln_or_args): @@ -112,6 +114,15 @@ def exec_cmd(cmd_and_args, as_shell=False): """ return _exec_cmd(cmd_and_args, as_shell)[1] +def find_executable(cmd, paths): + recipe = cmd + if recipe in NATIVE_RECIPES: + recipe = NATIVE_RECIPES[recipe] + provided = get_bitbake_var("ASSUME_PROVIDED") + if provided and "%s-native" % recipe in provided: + return True + + return shutil.which(cmd, path=paths) def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""): """ @@ -128,16 +139,19 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""): if pseudo: cmd_and_args = pseudo + cmd_and_args - native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin" % \ + hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR") + + native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin:%s" % \ (native_sysroot, native_sysroot, - native_sysroot, native_sysroot) + native_sysroot, native_sysroot, + hosttools_dir) native_cmd_and_args = "export PATH=%s:$PATH;%s" % \ (native_paths, cmd_and_args) logger.debug("exec_native_cmd: %s", native_cmd_and_args) # If the command isn't in the native sysroot say we failed. - if spawn.find_executable(args[0], native_paths): + if find_executable(args[0], native_paths): ret, out = _exec_cmd(native_cmd_and_args, True) else: ret = 127 diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py index 3490b4e75d..792bb3dcd3 100644 --- a/scripts/lib/wic/partition.py +++ b/scripts/lib/wic/partition.py @@ -31,6 +31,7 @@ class Partition(): self.extra_space = args.extra_space self.exclude_path = args.exclude_path self.include_path = args.include_path + self.change_directory = args.change_directory self.fsopts = args.fsopts self.fstype = args.fstype self.label = args.label @@ -53,6 +54,9 @@ class Partition(): self.uuid = args.uuid self.fsuuid = args.fsuuid self.type = args.type + self.updated_fstab_path = None + self.has_fstab = False + self.update_fstab_in_rootfs = False self.lineno = lineno self.source_file = "" @@ -100,7 +104,7 @@ class Partition(): extra_blocks = self.extra_space rootfs_size = actual_rootfs_size + extra_blocks - rootfs_size *= self.overhead_factor + rootfs_size = int(rootfs_size * self.overhead_factor) logger.debug("Added %d extra blocks to %s to get to %d total blocks", extra_blocks, self.mountpoint, rootfs_size) @@ -117,11 +121,15 @@ class Partition(): return self.fixed_size if self.fixed_size else self.size def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir, - bootimg_dir, kernel_dir, native_sysroot): + bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path): """ Prepare content for individual partitions, depending on partition command parameters. """ + self.updated_fstab_path = updated_fstab_path + if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"): + self.update_fstab_in_rootfs = True + if not self.source: if not self.size and not self.fixed_size: raise WicError("The %s partition has a size of zero. Please " @@ -191,29 +199,40 @@ class Partition(): (self.mountpoint, self.size, self.fixed_size)) def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir, - native_sysroot, real_rootfs = True): + native_sysroot, real_rootfs = True, pseudo_dir = None): """ Prepare content for a rootfs partition i.e. create a partition and fill it from a /rootfs dir. Currently handles ext2/3/4, btrfs, vfat and squashfs. """ - p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot) - p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR", - "%s/../pseudo" % rootfs_dir) - p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir) - p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1") - pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix - pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir - pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd - pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp - pseudo += "%s " % get_bitbake_var("FAKEROOTCMD") rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label, self.lineno, self.fstype) if os.path.isfile(rootfs): os.remove(rootfs) + p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot) + if (pseudo_dir): + # Canonicalize the ignore paths. This corresponds to + # calling oe.path.canonicalize(), which is used in bitbake.conf. + ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",") + canonical_paths = [] + for path in ignore_paths: + if "$" not in path: + trailing_slash = path.endswith("/") and "/" or "" + canonical_paths.append(os.path.realpath(path) + trailing_slash) + ignore_paths = ",".join(canonical_paths) + + pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix + pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir + pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir + pseudo += "export PSEUDO_NOSYMLINKEXP=1;" + pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths + pseudo += "%s " % get_bitbake_var("FAKEROOTCMD") + else: + pseudo = None + if not self.size and real_rootfs: # The rootfs size is not set in .ks file so try to get it # from bitbake variable @@ -235,7 +254,7 @@ class Partition(): prefix = "ext" if self.fstype.startswith("ext") else self.fstype method = getattr(self, "prepare_rootfs_" + prefix) - method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo) + method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo) self.source_file = rootfs # get the rootfs size in the right units for kickstart (kB) @@ -243,7 +262,7 @@ class Partition(): out = exec_cmd(du_cmd) self.size = int(out.split()[0]) - def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir, + def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo): """ Prepare content for an ext2/3/4 rootfs partition. @@ -267,10 +286,21 @@ class Partition(): (self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir) exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) + if self.updated_fstab_path and self.has_fstab: + debugfs_script_path = os.path.join(cr_workdir, "debugfs_script") + with open(debugfs_script_path, "w") as f: + f.write("cd etc\n") + f.write("rm fstab\n") + f.write("write %s fstab\n" % (self.updated_fstab_path)) + debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs) + exec_native_cmd(debugfs_cmd, native_sysroot) + mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs) exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) - def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir, + self.check_for_Y2038_problem(rootfs, native_sysroot) + + def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo): """ Prepare content for a btrfs rootfs partition. @@ -293,7 +323,7 @@ class Partition(): self.mkfs_extraopts, self.fsuuid, rootfs) exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) - def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir, + def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo): """ Prepare content for a msdos/vfat rootfs partition. @@ -322,12 +352,16 @@ class Partition(): mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir) exec_native_cmd(mcopy_cmd, native_sysroot) + if self.updated_fstab_path and self.has_fstab: + mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path) + exec_native_cmd(mcopy_cmd, native_sysroot) + chmod_cmd = "chmod 644 %s" % rootfs exec_cmd(chmod_cmd) prepare_rootfs_vfat = prepare_rootfs_msdos - def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir, + def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo): """ Prepare content for a squashfs rootfs partition. @@ -356,6 +390,8 @@ class Partition(): (self.fstype, extraopts, label_str, self.fsuuid, rootfs) exec_native_cmd(mkfs_cmd, native_sysroot) + self.check_for_Y2038_problem(rootfs, native_sysroot) + def prepare_empty_partition_btrfs(self, rootfs, oe_builddir, native_sysroot): """ @@ -417,3 +453,37 @@ class Partition(): mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path) exec_native_cmd(mkswap_cmd, native_sysroot) + + def check_for_Y2038_problem(self, rootfs, native_sysroot): + """ + Check if the filesystem is affected by the Y2038 problem + (Y2038 problem = 32 bit time_t overflow in January 2038) + """ + def get_err_str(part): + err = "The {} filesystem {} has no Y2038 support." + if part.mountpoint: + args = [part.fstype, "mounted at %s" % part.mountpoint] + elif part.label: + args = [part.fstype, "labeled '%s'" % part.label] + elif part.part_name: + args = [part.fstype, "in partition '%s'" % part.part_name] + else: + args = [part.fstype, "in partition %s" % part.num] + return err.format(*args) + + # ext2 and ext3 are always affected by the Y2038 problem + if self.fstype in ["ext2", "ext3"]: + logger.warn(get_err_str(self)) + return + + ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot) + + # if ext4 is affected by the Y2038 problem depends on the inode size + for line in out.splitlines(): + if line.startswith("Inode size:"): + size = int(line.split(":")[1].strip()) + if size < 256: + logger.warn("%s Inodes (of size %d) are too small." % + (get_err_str(self), size)) + break + diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py index d9b4e57747..b64568339b 100644 --- a/scripts/lib/wic/pluginbase.py +++ b/scripts/lib/wic/pluginbase.py @@ -9,9 +9,11 @@ __all__ = ['ImagerPlugin', 'SourcePlugin'] import os import logging +import types from collections import defaultdict -from importlib.machinery import SourceFileLoader +import importlib +import importlib.util from wic import WicError from wic.misc import get_bitbake_var @@ -54,7 +56,9 @@ class PluginMgr: mname = fname[:-3] mpath = os.path.join(ppath, fname) logger.debug("loading plugin module %s", mpath) - SourceFileLoader(mname, mpath).load_module() + spec = importlib.util.spec_from_file_location(mname, mpath) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) return PLUGINS.get(ptype) diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py index 55db826e93..42704d1e10 100644 --- a/scripts/lib/wic/plugins/imager/direct.py +++ b/scripts/lib/wic/plugins/imager/direct.py @@ -58,11 +58,11 @@ class DirectPlugin(ImagerPlugin): self.compressor = options.compressor self.bmap = options.bmap self.no_fstab_update = options.no_fstab_update - self.original_fstab = None + self.updated_fstab_path = None self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0], strftime("%Y%m%d%H%M")) - self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.') + self.workdir = self.setup_workdir(options.workdir) self._image = None self.ptable_format = self.ks.bootloader.ptable self.parts = self.ks.partitions @@ -78,6 +78,16 @@ class DirectPlugin(ImagerPlugin): self._image = PartitionedImage(image_path, self.ptable_format, self.parts, self.native_sysroot) + def setup_workdir(self, workdir): + if workdir: + if os.path.exists(workdir): + raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir)) + + os.makedirs(workdir) + return workdir + else: + return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.') + def do_create(self): """ Plugin entry point. @@ -90,11 +100,8 @@ class DirectPlugin(ImagerPlugin): finally: self.cleanup() - def _write_fstab(self, image_rootfs): - """overriden to generate fstab (temporarily) in rootfs. This is called - from _create, make sure it doesn't get called from - BaseImage.create() - """ + def update_fstab(self, image_rootfs): + """Assume partition order same as in wks""" if not image_rootfs: return @@ -104,20 +111,11 @@ class DirectPlugin(ImagerPlugin): with open(fstab_path) as fstab: fstab_lines = fstab.readlines() - self.original_fstab = fstab_lines.copy() - - if self._update_fstab(fstab_lines, self.parts): - with open(fstab_path, "w") as fstab: - fstab.writelines(fstab_lines) - else: - self.original_fstab = None - def _update_fstab(self, fstab_lines, parts): - """Assume partition order same as in wks""" updated = False - for part in parts: + for part in self.parts: if not part.realnum or not part.mountpoint \ - or part.mountpoint == "/": + or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"): continue if part.use_uuid: @@ -144,7 +142,10 @@ class DirectPlugin(ImagerPlugin): fstab_lines.append(line) updated = True - return updated + if updated: + self.updated_fstab_path = os.path.join(self.workdir, "fstab") + with open(self.updated_fstab_path, "w") as f: + f.writelines(fstab_lines) def _full_path(self, path, name, extention): """ Construct full file path to a file we generate. """ @@ -160,7 +161,7 @@ class DirectPlugin(ImagerPlugin): a partitioned image. """ if not self.no_fstab_update: - self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR")) + self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR")) for part in self.parts: # get rootfs size from bitbake variable if it's not set in .ks file @@ -273,12 +274,6 @@ class DirectPlugin(ImagerPlugin): if os.path.isfile(path): shutil.move(path, os.path.join(self.outdir, fname)) - #Restore original fstab - if self.original_fstab: - fstab_path = self.rootfs_dir.get("ROOTFS_DIR") + "/etc/fstab" - with open(fstab_path, "w") as fstab: - fstab.writelines(self.original_fstab) - # remove work directory shutil.rmtree(self.workdir, ignore_errors=True) @@ -343,6 +338,13 @@ class PartitionedImage(): part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper() else: part.fsuuid = str(uuid.uuid4()) + else: + #make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY + if part.fstype == 'vfat' or part.fstype == 'msdos': + if part.fsuuid.upper().startswith("0X"): + part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0") + else: + part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0") def prepare(self, imager): """Prepare an image. Call prepare method of all image partitions.""" @@ -351,7 +353,8 @@ class PartitionedImage(): # sizes before we can add them and do the layout. part.prepare(imager, imager.workdir, imager.oe_builddir, imager.rootfs_dir, imager.bootimg_dir, - imager.kernel_dir, imager.native_sysroot) + imager.kernel_dir, imager.native_sysroot, + imager.updated_fstab_path) # Converting kB to sectors for parted part.size_sec = part.disk_size * 1024 // self.sector_size diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py index 2cfdc10ecd..05e8471116 100644 --- a/scripts/lib/wic/plugins/source/bootimg-efi.py +++ b/scripts/lib/wic/plugins/source/bootimg-efi.py @@ -277,6 +277,13 @@ class BootimgEFIPlugin(SourcePlugin): logger.debug("Added %d extra blocks to %s to get to %d total blocks", extra_blocks, part.mountpoint, blocks) + # required for compatibility with certain devices expecting file system + # block count to be equal to partition block count + if blocks < part.fixed_size: + blocks = part.fixed_size + logger.debug("Overriding %s to %d total blocks for compatibility", + part.mountpoint, blocks) + # dosfs image, created by mkdosfs bootimg = "%s/boot.img" % cr_workdir diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py index 138986a71e..5dbe2558d2 100644 --- a/scripts/lib/wic/plugins/source/bootimg-partition.py +++ b/scripts/lib/wic/plugins/source/bootimg-partition.py @@ -141,7 +141,7 @@ class BootimgPartitionPlugin(SourcePlugin): break if not kernel_name: - raise WicError('No kernel file founded') + raise WicError('No kernel file found') # Compose the extlinux.conf extlinux_conf = "default Yocto\n" diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py index f2639e7004..32e47f1831 100644 --- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py +++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py @@ -186,8 +186,10 @@ class BootimgPcbiosPlugin(SourcePlugin): # dosfs image, created by mkdosfs bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno) - dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \ - (part.fsuuid, bootimg, blocks) + label = part.label if part.label else "boot" + + dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \ + (label, part.fsuuid, bootimg, blocks) exec_native_cmd(dosfs_cmd, native_sysroot) mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir) diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py index 705aeb5563..c8c1c0f58f 100644 --- a/scripts/lib/wic/plugins/source/rootfs.py +++ b/scripts/lib/wic/plugins/source/rootfs.py @@ -20,7 +20,7 @@ from oe.path import copyhardlinktree from wic import WicError from wic.pluginbase import SourcePlugin -from wic.misc import get_bitbake_var +from wic.misc import get_bitbake_var, exec_native_cmd logger = logging.getLogger('wic') @@ -44,6 +44,15 @@ class RootfsPlugin(SourcePlugin): return os.path.realpath(image_rootfs_dir) + @staticmethod + def __get_pseudo(native_sysroot, rootfs, pseudo_dir): + pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot + pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir + pseudo += "export PSEUDO_PASSWD=%s;" % rootfs + pseudo += "export PSEUDO_NOSYMLINKEXP=1;" + pseudo += "%s " % get_bitbake_var("FAKEROOTCMD") + return pseudo + @classmethod def do_prepare_partition(cls, part, source_params, cr, cr_workdir, oe_builddir, bootimg_dir, kernel_dir, @@ -68,18 +77,55 @@ class RootfsPlugin(SourcePlugin): "it is not a valid path, exiting" % part.rootfs_dir) part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir) + part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab")) + pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo") + if not os.path.lexists(pseudo_dir): + logger.warn("%s folder does not exist. " + "Usernames and permissions will be invalid " % pseudo_dir) + pseudo_dir = None new_rootfs = None + new_pseudo = None # Handle excluded paths. - if part.exclude_path or part.include_path: - # We need a new rootfs directory we can delete files from. Copy to - # workdir. + if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs: + # We need a new rootfs directory we can safely modify without + # interfering with other tasks. Copy to workdir. new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno)) if os.path.lexists(new_rootfs): shutil.rmtree(os.path.join(new_rootfs)) - copyhardlinktree(part.rootfs_dir, new_rootfs) + if part.change_directory: + cd = part.change_directory + if cd[-1] == '/': + cd = cd[:-1] + if os.path.isabs(cd): + logger.error("Must be relative: --change-directory=%s" % cd) + sys.exit(1) + orig_dir = os.path.realpath(os.path.join(part.rootfs_dir, cd)) + if not orig_dir.startswith(part.rootfs_dir): + logger.error("'%s' points to a path outside the rootfs" % orig_dir) + sys.exit(1) + + else: + orig_dir = part.rootfs_dir + copyhardlinktree(orig_dir, new_rootfs) + + # Convert the pseudo directory to its new location + if (pseudo_dir): + new_pseudo = os.path.realpath( + os.path.join(cr_workdir, "pseudo%d" % part.lineno)) + if os.path.lexists(new_pseudo): + shutil.rmtree(new_pseudo) + os.mkdir(new_pseudo) + shutil.copy(os.path.join(pseudo_dir, "files.db"), + os.path.join(new_pseudo, "files.db")) + + pseudo_cmd = "%s -B -m %s -M %s" % (cls.__get_pseudo(native_sysroot, + new_rootfs, + new_pseudo), + orig_dir, new_rootfs) + exec_native_cmd(pseudo_cmd, native_sysroot) for path in part.include_path or []: copyhardlinktree(path, new_rootfs) @@ -99,17 +145,34 @@ class RootfsPlugin(SourcePlugin): logger.error("'%s' points to a path outside the rootfs" % orig_path) sys.exit(1) + if new_pseudo: + pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo) + else: + pseudo = None if path.endswith(os.sep): # Delete content only. for entry in os.listdir(full_path): full_entry = os.path.join(full_path, entry) - if os.path.isdir(full_entry) and not os.path.islink(full_entry): - shutil.rmtree(full_entry) - else: - os.remove(full_entry) + rm_cmd = "rm -rf %s" % (full_entry) + exec_native_cmd(rm_cmd, native_sysroot, pseudo) else: # Delete whole directory. - shutil.rmtree(full_path) + rm_cmd = "rm -rf %s" % (full_path) + exec_native_cmd(rm_cmd, native_sysroot, pseudo) + + # Update part.has_fstab here as fstab may have been added or + # removed by the above modifications. + part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab")) + if part.update_fstab_in_rootfs and part.has_fstab: + fstab_path = os.path.join(new_rootfs, "etc/fstab") + # Assume that fstab should always be owned by root with fixed permissions + install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path) + if new_pseudo: + pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo) + else: + pseudo = None + exec_native_cmd(install_cmd, native_sysroot, pseudo) part.prepare_rootfs(cr_workdir, oe_builddir, - new_rootfs or part.rootfs_dir, native_sysroot) + new_rootfs or part.rootfs_dir, native_sysroot, + pseudo_dir = new_pseudo or pseudo_dir) diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp new file mode 100755 index 0000000000..30cc417d3a --- /dev/null +++ b/scripts/nativesdk-intercept/chgrp @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# +# Wrapper around 'chgrp' that redirects to root in all cases + +import os +import shutil +import sys + +# calculate path to the real 'chgrp' +path = os.environ['PATH'] +path = path.replace(os.path.dirname(sys.argv[0]), '') +real_chgrp = shutil.which('chgrp', path=path) + +args = list() + +found = False +for i in sys.argv: + if i.startswith("-"): + args.append(i) + continue + if not found: + args.append("root") + found = True + else: + args.append(i) + +os.execv(real_chgrp, args) diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown new file mode 100755 index 0000000000..3914b3e384 --- /dev/null +++ b/scripts/nativesdk-intercept/chown @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# +# Wrapper around 'chown' that redirects to root in all cases + +import os +import shutil +import sys + +# calculate path to the real 'chown' +path = os.environ['PATH'] +path = path.replace(os.path.dirname(sys.argv[0]), '') +real_chown = shutil.which('chown', path=path) + +args = list() + +found = False +for i in sys.argv: + if i.startswith("-"): + args.append(i) + continue + if not found: + args.append("root:root") + found = True + else: + args.append(i) + +os.execv(real_chown, args) diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot index 5eb3e12769..1c2d51c6ec 100755 --- a/scripts/oe-depends-dot +++ b/scripts/oe-depends-dot @@ -15,7 +15,7 @@ class Dot(object): def __init__(self): parser = argparse.ArgumentParser( description="Analyse recipe-depends.dot generated by bitbake -g", - epilog="Use %(prog)s --help to get help") + formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("dotfile", help = "Specify the dotfile", nargs = 1, action='store', default='') parser.add_argument("-k", "--key", @@ -32,6 +32,21 @@ class Dot(object): " For example, A->B, B->C, A->C, then A->C can be removed.", action="store_true", default=False) + parser.epilog = """ +Examples: +First generate the .dot file: + bitbake -g core-image-minimal + +To find out why a package is being built: + %(prog)s -k <package> -w ./task-depends.dot + +To find out what a package depends on: + %(prog)s -k <package> -d ./task-depends.dot + +Reduce the .dot file packages only, no tasks: + %(prog)s -r ./task-depends.dot +""" + self.args = parser.parse_args() if len(sys.argv) != 3 and len(sys.argv) < 5: @@ -99,6 +114,10 @@ class Dot(object): if key == "meta-world-pkgdata": continue dep = m.group(2) + key = key.split('.')[0] + dep = dep.split('.')[0] + if key == dep: + continue if key in depends: if not key in depends[key]: depends[key].add(dep) diff --git a/scripts/oe-pkgdata-browser b/scripts/oe-pkgdata-browser index 8d223185a4..65a6ee956e 100755 --- a/scripts/oe-pkgdata-browser +++ b/scripts/oe-pkgdata-browser @@ -236,6 +236,8 @@ class PkgUi(): update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False) def load_recipes(self): + if not os.path.exists(pkgdata): + sys.exit("Error: Please ensure %s exists by generating packages before using this tool." % pkgdata) for recipe in sorted(os.listdir(pkgdata)): if os.path.isfile(os.path.join(pkgdata, recipe)): self.recipe_iters[recipe] = self.recipe_store.append([recipe]) diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util index 93220e3617..75dd23efa3 100755 --- a/scripts/oe-pkgdata-util +++ b/scripts/oe-pkgdata-util @@ -598,6 +598,9 @@ def main(): logger.error("Unable to find bitbake by searching parent directory of this script or PATH") sys.exit(1) logger.debug('Found bitbake path: %s' % bitbakepath) + if not os.environ.get('BUILDDIR', ''): + logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)") + sys.exit(1) tinfoil = tinfoil_init() try: args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR') diff --git a/scripts/oe-run-native b/scripts/oe-run-native index 4e63e69cc4..22958d97e7 100755 --- a/scripts/oe-run-native +++ b/scripts/oe-run-native @@ -43,7 +43,7 @@ fi OLD_PATH=$PATH # look for a tool only in native sysroot -PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin/*-native -maxdepth 1 -type d -printf ":%p") +PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin -maxdepth 1 -name "*-native" -type d -printf ":%p") tool_find=`/usr/bin/which $tool 2>/dev/null` if [ -n "$tool_find" ] ; then diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir index 30eaa8efbe..5a51fa793f 100755 --- a/scripts/oe-setup-builddir +++ b/scripts/oe-setup-builddir @@ -113,10 +113,10 @@ if [ ! -z "$SHOWYPDOC" ]; then cat <<EOM The Yocto Project has extensive documentation about OE including a reference manual which can be found at: - http://yoctoproject.org/documentation + https://docs.yoctoproject.org For more information about OpenEmbedded see their website: - http://www.openembedded.org/ + https://www.openembedded.org/ EOM # unset SHOWYPDOC diff --git a/scripts/postinst-intercepts/update_font_cache b/scripts/postinst-intercepts/update_font_cache index 46bdb8c572..900db042d6 100644 --- a/scripts/postinst-intercepts/update_font_cache +++ b/scripts/postinst-intercepts/update_font_cache @@ -5,6 +5,8 @@ set -e +rm -f $D${fontconfigcachedir}/CACHEDIR.TAG + PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams} chown -R root:root $D${fontconfigcachedir} diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py index 53324b9f8b..fc708b55c3 100644 --- a/scripts/pybootchartgui/pybootchartgui/draw.py +++ b/scripts/pybootchartgui/pybootchartgui/draw.py @@ -267,11 +267,14 @@ def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range): # avoid divide by zero if max_y == 0: max_y = 1.0 - xscale = float (chart_bounds[2]) / (max_x - x_shift) + if (max_x - x_shift): + xscale = float (chart_bounds[2]) / (max_x - x_shift) + else: + xscale = float (chart_bounds[2]) # If data_range is given, scale the chart so that the value range in # data_range matches the chart bounds exactly. # Otherwise, scale so that the actual data matches the chart bounds. - if data_range: + if data_range and (data_range[1] - data_range[0]): yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0]) ybase = data_range[0] else: diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py index b42dac6b88..9d6787ec5a 100644 --- a/scripts/pybootchartgui/pybootchartgui/parsing.py +++ b/scripts/pybootchartgui/pybootchartgui/parsing.py @@ -128,7 +128,7 @@ class Trace: def compile(self, writer): def find_parent_id_for(pid): - if pid is 0: + if pid == 0: return 0 ppid = self.parent_map.get(pid) if ppid: diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py index 8c0fdb986a..8079d13750 100755 --- a/scripts/relocate_sdk.py +++ b/scripts/relocate_sdk.py @@ -97,11 +97,12 @@ def change_interpreter(elf_file_name): if (len(new_dl_path) >= p_filesz): print("ERROR: could not relocate %s, interp size = %i and %i is needed." \ % (elf_file_name, p_memsz, len(new_dl_path) + 1)) - break + return False dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path)) f.seek(p_offset) f.write(dl_path) break + return True def change_dl_sysdirs(elf_file_name): if arch == 32: @@ -215,6 +216,7 @@ else: executables_list = sys.argv[3:] +errors = False for e in executables_list: perms = os.stat(e)[stat.ST_MODE] if os.access(e, os.W_OK|os.R_OK): @@ -240,7 +242,8 @@ for e in executables_list: arch = get_arch() if arch: parse_elf_header() - change_interpreter(e) + if not change_interpreter(e): + errors = True change_dl_sysdirs(e) """ change permissions back """ @@ -253,3 +256,6 @@ for e in executables_list: print("New file size for %s is different. Looks like a relocation error!", e) sys.exit(-1) +if errors: + print("Relocation of one or more executables failed.") + sys.exit(-1) diff --git a/scripts/runqemu b/scripts/runqemu index cc87ea871a..4dfc0e2d38 100755 --- a/scripts/runqemu +++ b/scripts/runqemu @@ -764,7 +764,7 @@ class BaseConfig(object): raise RunQemuError('BIOS not found: %s' % bios_match_name) if not os.path.exists(self.bios): - raise RunQemuError("KERNEL %s not found" % self.bios) + raise RunQemuError("BIOS %s not found" % self.bios) def check_mem(self): @@ -974,17 +974,14 @@ class BaseConfig(object): else: self.nfs_server = '192.168.7.1' - # Figure out a new nfs_instance to allow multiple qemus running. - ps = subprocess.check_output(("ps", "auxww")).decode('utf-8') - pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) ' - all_instances = re.findall(pattern, ps, re.M) - if all_instances: - all_instances.sort(key=int) - self.nfs_instance = int(all_instances.pop()) + 1 - - nfsd_port = 3049 + 2 * self.nfs_instance - mountd_port = 3048 + 2 * self.nfs_instance + nfsd_port = 3048 + self.nfs_instance + lockdir = "/tmp/qemu-port-locks" + self.make_lock_dir(lockdir) + while not self.check_free_port('localhost', nfsd_port, lockdir): + self.nfs_instance += 1 + nfsd_port += 1 + mountd_port = nfsd_port # Export vars for runqemu-export-rootfs export_dict = { 'NFS_INSTANCE': self.nfs_instance, @@ -1034,6 +1031,17 @@ class BaseConfig(object): self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % ( self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper'))) + def make_lock_dir(self, lockdir): + if not os.path.exists(lockdir): + # There might be a race issue when multi runqemu processess are + # running at the same time. + try: + os.mkdir(lockdir) + os.chmod(lockdir, 0o777) + except FileExistsError: + pass + return + def setup_slirp(self): """Setup user networking""" @@ -1052,14 +1060,7 @@ class BaseConfig(object): mac = 2 lockdir = "/tmp/qemu-port-locks" - if not os.path.exists(lockdir): - # There might be a race issue when multi runqemu processess are - # running at the same time. - try: - os.mkdir(lockdir) - os.chmod(lockdir, 0o777) - except FileExistsError: - pass + self.make_lock_dir(lockdir) # Find a free port to avoid conflicts for p in ports[:]: @@ -1099,14 +1100,7 @@ class BaseConfig(object): logger.error("ip: %s" % ip) raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found") - if not os.path.exists(lockdir): - # There might be a race issue when multi runqemu processess are - # running at the same time. - try: - os.mkdir(lockdir) - os.chmod(lockdir, 0o777) - except FileExistsError: - pass + self.make_lock_dir(lockdir) cmd = (ip, 'link') logger.debug('Running %s...' % str(cmd)) @@ -1328,6 +1322,8 @@ class BaseConfig(object): for ovmf in self.ovmf_bios: format = ovmf.rsplit('.', 1)[-1] + if format == "bin": + format = "raw" self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf) self.qemu_opt += ' ' + self.qemu_opt_script @@ -1421,13 +1417,13 @@ class BaseConfig(object): logger.debug('Running %s' % str(cmd)) subprocess.check_call(cmd) self.release_taplock() - self.release_portlock() if self.nfs_running: logger.info("Shutting down the userspace NFS server...") cmd = ("runqemu-export-rootfs", "stop", self.rootfs) logger.debug('Running %s' % str(cmd)) subprocess.check_call(cmd) + self.release_portlock() if self.saved_stty: subprocess.check_call(("stty", self.saved_stty)) @@ -1514,7 +1510,8 @@ def main(): def sigterm_handler(signum, frame): logger.info("SIGTERM received") - os.kill(config.qemupid, signal.SIGTERM) + if config.qemupid: + os.kill(config.qemupid, signal.SIGTERM) config.cleanup() # Deliberately ignore the return code of 'tput smam'. subprocess.call(["tput", "smam"]) diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms index fb0cc719ea..14d8c298e9 100755 --- a/scripts/verify-bashisms +++ b/scripts/verify-bashisms @@ -100,7 +100,7 @@ if __name__=='__main__': args = parser.parse_args() if shutil.which("checkbashisms.pl") is None: - print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl") + print("Cannot find checkbashisms.pl on $PATH, get it from https://salsa.debian.org/debian/devscripts/raw/master/scripts/checkbashisms.pl") sys.exit(1) # The order of defining the worker function, diff --git a/scripts/wic b/scripts/wic index 24700f380f..99a8a97ccb 100755 --- a/scripts/wic +++ b/scripts/wic @@ -22,9 +22,9 @@ import sys import argparse import logging import subprocess +import shutil from collections import namedtuple -from distutils import spawn # External modules scripts_path = os.path.dirname(os.path.realpath(__file__)) @@ -47,7 +47,7 @@ if os.environ.get('SDKTARGETSYSROOT'): break sdkroot = os.path.dirname(sdkroot) -bitbake_exe = spawn.find_executable('bitbake') +bitbake_exe = shutil.which('bitbake') if bitbake_exe: bitbake_path = scriptpath.add_bitbake_lib_path() import bb @@ -206,7 +206,7 @@ def wic_create_subcommand(options, usage_str): logger.info(" (Please check that the build artifacts for the machine") logger.info(" selected in local.conf actually exist and that they") logger.info(" are the correct artifacts for the image (.wks file)).\n") - raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir) + raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir)) krootfs_dir = options.rootfs_dir if krootfs_dir is None: @@ -312,6 +312,8 @@ def wic_init_parser_create(subparser): subparser.add_argument("-o", "--outdir", dest="outdir", default='.', help="name of directory to create image in") + subparser.add_argument("-w", "--workdir", + help="temporary workdir to use for intermediate files") subparser.add_argument("-e", "--image-name", dest="image_name", help="name of the image to use the artifacts from " "e.g. core-image-sato") diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer index b7c83c8b54..dd930cdddd 100755 --- a/scripts/yocto-check-layer +++ b/scripts/yocto-check-layer @@ -24,7 +24,7 @@ import scriptpath scriptpath.add_oe_lib_path() scriptpath.add_bitbake_lib_path() -from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures, check_bblayers +from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_layer_dependencies, get_signatures, check_bblayers from oeqa.utils.commands import get_bb_vars PROGNAME = 'yocto-check-layer' @@ -51,6 +51,8 @@ def main(): help='File to output log (optional)', action='store') parser.add_argument('--dependency', nargs="+", help='Layers to process for dependencies', action='store') + parser.add_argument('--no-auto-dependency', help='Disable automatic testing of dependencies', + action='store_true') parser.add_argument('--machines', nargs="+", help='List of MACHINEs to be used during testing', action='store') parser.add_argument('--additional-layers', nargs="+", @@ -121,6 +123,21 @@ def main(): if not layers: return 1 + # Find all dependencies, and get them checked too + if not args.no_auto_dependency: + depends = [] + for layer in layers: + layer_depends = get_layer_dependencies(layer, dep_layers, logger) + if layer_depends: + for d in layer_depends: + if d not in depends: + depends.append(d) + + for d in depends: + if d not in layers: + logger.info("Adding %s to the list of layers to test, as a dependency", d['name']) + layers.append(d) + shutil.copyfile(bblayersconf, bblayersconf + '.backup') def cleanup_bblayers(signum, frame): shutil.copyfile(bblayersconf + '.backup', bblayersconf) @@ -138,6 +155,9 @@ def main(): layer['type'] == LayerType.ERROR_BSP_DISTRO: continue + # Reset to a clean backup copy for each run + shutil.copyfile(bblayersconf + '.backup', bblayersconf) + if check_bblayers(bblayersconf, layer['path'], logger): logger.info("%s already in %s. To capture initial signatures, layer under test should not present " "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name'])) @@ -149,17 +169,13 @@ def main(): logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'], layer['path'])) - shutil.copyfile(bblayersconf + '.backup', bblayersconf) - missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) if not missing_dependencies: for additional_layer in additional_layers: if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger): missing_dependencies = True break - if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \ - any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger), - additional_layers)): + if missing_dependencies: logger.info('Skipping %s due to missing dependencies.' % layer['name']) results[layer['name']] = None results_status[layer['name']] = 'SKIPPED (Missing dependencies)' |