diff options
Diffstat (limited to 'scripts')
36 files changed, 479 insertions, 305 deletions
diff --git a/scripts/combo-layer b/scripts/combo-layer index 045de65642..19ad32660d 100755 --- a/scripts/combo-layer +++ b/scripts/combo-layer @@ -21,7 +21,6 @@ import re import copy import pipes import shutil -from collections import OrderedDict from string import Template from functools import reduce @@ -192,6 +191,23 @@ def runcmd(cmd,destdir=None,printerr=True,out=None,env=None): logger.debug("output: %s" % output.replace(chr(0), '\\0')) return output +def action_sync_revs(conf, args): + """ + Update the last_revision config option for each repo with the latest + revision in the remote's branch. Useful if multiple people are using + combo-layer. + """ + repos = get_repos(conf, args[1:]) + + for name in repos: + repo = conf.repos[name] + ldir = repo['local_repo_dir'] + branch = repo.get('branch', "master") + runcmd("git fetch", ldir) + lastrev = runcmd('git rev-parse origin/%s' % branch, ldir).strip() + print("Updating %s to %s" % (name, lastrev)) + conf.update(name, "last_revision", lastrev) + def action_init(conf, args): """ Clone component repositories @@ -467,7 +483,7 @@ def check_repo_clean(repodir): exit if repo is dirty """ output=runcmd("git status --porcelain", repodir) - r = re.compile('\?\? patch-.*/') + r = re.compile(r'\?\? patch-.*/') dirtyout = [item for item in output.splitlines() if not r.match(item)] if dirtyout: logger.error("git repo %s is dirty, please fix it first", repodir) @@ -508,7 +524,7 @@ def check_patch(patchfile): f.close() if of: of.close() - bb.utils.rename(patchfile + '.tmp', patchfile) + os.rename(of.name, patchfile) def drop_to_shell(workdir=None): if not sys.stdin.isatty(): @@ -1302,6 +1318,7 @@ actions = { "update": action_update, "pull": action_pull, "splitpatch": action_splitpatch, + "sync-revs": action_sync_revs, } def main(): @@ -1312,10 +1329,11 @@ def main(): Create and update a combination layer repository from multiple component repositories. Action: - init initialise the combo layer repo - update [components] get patches from component repos and apply them to the combo repo - pull [components] just pull component repos only - splitpatch [commit] generate commit patch and split per component, default commit is HEAD""") + init initialise the combo layer repo + update [components] get patches from component repos and apply them to the combo repo + pull [components] just pull component repos only + sync-revs [components] update the config file's last_revision for each repository + splitpatch [commit] generate commit patch and split per component, default commit is HEAD""") parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).", action = "store", dest = "conffile", default = "conf/combo-layer.conf") diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py index 090133600b..a9cdf082ab 100755 --- a/scripts/contrib/bbvars.py +++ b/scripts/contrib/bbvars.py @@ -36,8 +36,8 @@ def bbvar_is_documented(var, documented_vars): def collect_documented_vars(docfiles): ''' Walk the docfiles and collect the documented variables ''' documented_vars = [] - prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-") - var_prog = re.compile('<glossentry id=\'var-(.*)\'>') + prog = re.compile(r".*($|[^A-Z_])<glossentry id=\'var-") + var_prog = re.compile(r'<glossentry id=\'var-(.*)\'>') for d in docfiles: with open(d) as f: documented_vars += var_prog.findall(f.read()) @@ -45,7 +45,7 @@ def collect_documented_vars(docfiles): return documented_vars def bbvar_doctag(var, docconf): - prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var)) + prog = re.compile(r'^%s\[doc\] *= *"(.*)"' % (var)) if docconf == "": return "?" diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py index 4d41a4c475..c69acb4095 100755 --- a/scripts/contrib/convert-overrides.py +++ b/scripts/contrib/convert-overrides.py @@ -22,66 +22,78 @@ import sys import tempfile import shutil import mimetypes +import argparse -if len(sys.argv) < 2: - print("Please specify a directory to run the conversion script against.") - sys.exit(1) +parser = argparse.ArgumentParser(description="Convert override syntax") +parser.add_argument("--override", "-o", action="append", default=[], help="Add additional strings to consider as an override (e.g. custom machines/distros") +parser.add_argument("--skip", "-s", action="append", default=[], help="Add additional string to skip and not consider an override") +parser.add_argument("--skip-ext", "-e", action="append", default=[], help="Additional file suffixes to skip when processing (e.g. '.foo')") +parser.add_argument("--package-vars", action="append", default=[], help="Additional variables to treat as package variables") +parser.add_argument("--image-vars", action="append", default=[], help="Additional variables to treat as image variables") +parser.add_argument("--short-override", action="append", default=[], help="Additional strings to treat as short overrides") +parser.add_argument("path", nargs="+", help="Paths to convert") + +args = parser.parse_args() # List of strings to treat as overrides -vars = ["append", "prepend", "remove"] -vars = vars + ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"] -vars = vars + ["genericx86", "edgerouter", "beaglebone-yocto"] -vars = vars + ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"] -vars = vars + ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"] -vars = vars + ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"] -vars = vars + ["tune-", "pn-", "forcevariable"] -vars = vars + ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"] -vars = vars + ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"] -vars = vars + ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"] -vars = vars + ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"] -vars = vars + ["linux-gnueabi", "eabi"] -vars = vars + ["virtclass-multilib", "virtclass-mcextend"] +vars = args.override +vars += ["append", "prepend", "remove"] +vars += ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"] +vars += ["genericx86", "edgerouter", "beaglebone-yocto"] +vars += ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"] +vars += ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"] +vars += ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"] +vars += ["tune-", "pn-", "forcevariable"] +vars += ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"] +vars += ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"] +vars += ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"] +vars += ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"] +vars += ["linux-gnueabi", "eabi"] +vars += ["virtclass-multilib", "virtclass-mcextend"] # List of strings to treat as overrides but only with whitespace following or another override (more restricted matching). # Handles issues with arc matching arch. -shortvars = ["arc", "mips", "mipsel", "sh4"] +shortvars = ["arc", "mips", "mipsel", "sh4"] + args.short_override # Variables which take packagenames as an override packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY", "pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE", "PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst", "pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA", - "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + args.package_vars # Expressions to skip if encountered, these are not overrides -skips = ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"] -skips = skips + ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"] -skips = skips + ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"] -skips = skips + ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"] -skips = skips + ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"] -skips = skips + ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"] -skips = skips + ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"] -skips = skips + ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"] -skips = skips + ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"] - -imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] -packagevars = packagevars + imagevars +skips = args.skip +skips += ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"] +skips += ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"] +skips += ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"] +skips += ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"] +skips += ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"] +skips += ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"] +skips += ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"] +skips += ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"] +skips += ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"] + +imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] + args.image_vars +packagevars += imagevars + +skip_ext = [".html", ".patch", ".m4", ".diff"] + args.skip_ext vars_re = {} for exp in vars: - vars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp) + vars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp) shortvars_re = {} for exp in shortvars: - shortvars_re[exp] = (re.compile('((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + '([\(\'"\s:])'), r"\1:" + exp + r"\3") + shortvars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + r'([\(\'"\s:])'), r"\1:" + exp + r"\3") package_re = {} for exp in packagevars: - package_re[exp] = (re.compile('(^|[#\'"\s\-\+]+)' + exp + '_' + '([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2") + package_re[exp] = (re.compile(r'(^|[#\'"\s\-\+]+)' + exp + r'_' + r'([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2") # Other substitutions to make subs = { - 'r = re.compile("([^:]+):\s*(.*)")' : 'r = re.compile("(^.+?):\s+(.*)")', + 'r = re.compile(r"([^:]+):\s*(.*)")' : 'r = re.compile(r"(^.+?):\s+(.*)")', "val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))", "f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))", "d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))", @@ -124,21 +136,20 @@ def processfile(fn): ourname = os.path.basename(sys.argv[0]) ourversion = "0.9.3" -if os.path.isfile(sys.argv[1]): - processfile(sys.argv[1]) - sys.exit(0) - -for targetdir in sys.argv[1:]: - print("processing directory '%s'" % targetdir) - for root, dirs, files in os.walk(targetdir): - for name in files: - if name == ourname: - continue - fn = os.path.join(root, name) - if os.path.islink(fn): - continue - if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"): - continue - processfile(fn) +for p in args.path: + if os.path.isfile(p): + processfile(p) + else: + print("processing directory '%s'" % p) + for root, dirs, files in os.walk(p): + for name in files: + if name == ourname: + continue + fn = os.path.join(root, name) + if os.path.islink(fn): + continue + if "/.git/" in fn or any(fn.endswith(ext) for ext in skip_ext): + continue + processfile(fn) print("All files processed with version %s" % ourversion) diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest index 3c07a73a4e..4d65a99258 100755 --- a/scripts/contrib/image-manifest +++ b/scripts/contrib/image-manifest @@ -392,7 +392,7 @@ def export_manifest_info(args): for key in rd.getVarFlags('PACKAGECONFIG').keys(): if key == 'doc': continue - rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key, True) + rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key) if config['patches'] == 'yes': patches = oe.recipeutils.get_recipe_patches(rd) diff --git a/scripts/create-pull-request b/scripts/create-pull-request index 8eefcf63a5..885105fab3 100755 --- a/scripts/create-pull-request +++ b/scripts/create-pull-request @@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://" GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)" REMOTE_URL=${REMOTE_URL%.git} REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#") -REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#") +REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#") if [ -z "$BRANCH" ]; then BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2) @@ -149,13 +149,10 @@ fi WEB_URL="" case "$REMOTE_URL" in *git.yoctoproject.org*) - WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH" - ;; - *git.pokylinux.org*) - WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH" + WEB_URL="https://git.yoctoproject.org/$REMOTE_REPO/log/?h=$BRANCH" ;; *git.openembedded.org*) - WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH" + WEB_URL="https://git.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH" ;; *github.com*) WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH" diff --git a/scripts/devtool b/scripts/devtool index af4811b922..20d785c7f7 100755 --- a/scripts/devtool +++ b/scripts/devtool @@ -104,6 +104,7 @@ def read_workspace(): for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')): with open(fn, 'r') as f: pnvalues = {} + pn = None for line in f: res = externalsrc_re.match(line.rstrip()) if res: @@ -123,6 +124,9 @@ def read_workspace(): elif line.startswith('# srctreebase: '): pnvalues['srctreebase'] = line.split(':', 1)[1].strip() if pnvalues: + if not pn: + raise DevtoolError("Found *.bbappend in %s, but could not determine EXTERNALSRC:pn-*. " + "Maybe still using old syntax?" % config.workspace_path) if not pnvalues.get('srctreebase', None): pnvalues['srctreebase'] = pnvalues['srctree'] logger.debug('Found recipe %s' % pnvalues) @@ -314,10 +318,10 @@ def main(): args = parser.parse_args(unparsed_args, namespace=global_args) - if not getattr(args, 'no_workspace', False): - read_workspace() - try: + if not getattr(args, 'no_workspace', False): + read_workspace() + ret = args.func(args, config, basepath, workspace) except DevtoolError as err: if str(err): diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py index c69b5bf4d7..6db60d5bcf 100644 --- a/scripts/lib/buildstats.py +++ b/scripts/lib/buildstats.py @@ -8,7 +8,7 @@ import json import logging import os import re -from collections import namedtuple,OrderedDict +from collections import namedtuple from statistics import mean @@ -79,8 +79,8 @@ class BSTask(dict): return self['rusage']['ru_oublock'] @classmethod - def from_file(cls, buildstat_file): - """Read buildstat text file""" + def from_file(cls, buildstat_file, fallback_end=0): + """Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing.""" bs_task = cls() log.debug("Reading task buildstats from %s", buildstat_file) end_time = None @@ -108,7 +108,10 @@ class BSTask(dict): bs_task[ru_type][ru_key] = val elif key == 'Status': bs_task['status'] = val - if end_time is not None and start_time is not None: + # If the task didn't finish, fill in the fallback end time if specified + if start_time and not end_time and fallback_end: + end_time = fallback_end + if start_time and end_time: bs_task['elapsed_time'] = end_time - start_time else: raise BSError("{} looks like a invalid buildstats file".format(buildstat_file)) @@ -226,25 +229,44 @@ class BuildStats(dict): epoch = match.group('epoch') return name, epoch, version, revision + @staticmethod + def parse_top_build_stats(path): + """ + Parse the top-level build_stats file for build-wide start and duration. + """ + start = elapsed = 0 + with open(path) as fobj: + for line in fobj.readlines(): + key, val = line.split(':', 1) + val = val.strip() + if key == 'Build Started': + start = float(val) + elif key == "Elapsed time": + elapsed = float(val.split()[0]) + return start, elapsed + @classmethod def from_dir(cls, path): """Load buildstats from a buildstats directory""" - if not os.path.isfile(os.path.join(path, 'build_stats')): + top_stats = os.path.join(path, 'build_stats') + if not os.path.isfile(top_stats): raise BSError("{} does not look like a buildstats directory".format(path)) log.debug("Reading buildstats directory %s", path) - buildstats = cls() + build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats) + build_end = build_started + build_elapsed + subdirs = os.listdir(path) for dirname in subdirs: recipe_dir = os.path.join(path, dirname) - if not os.path.isdir(recipe_dir): + if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir): continue name, epoch, version, revision = cls.split_nevr(dirname) bsrecipe = BSRecipe(name, epoch, version, revision) for task in os.listdir(recipe_dir): bsrecipe.tasks[task] = BSTask.from_file( - os.path.join(recipe_dir, task)) + os.path.join(recipe_dir, task), build_end) if name in buildstats: raise BSError("Cannot handle multiple versions of the same " "package ({})".format(name)) diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py index aa946f3036..53f99dce1e 100644 --- a/scripts/lib/checklayer/__init__.py +++ b/scripts/lib/checklayer/__init__.py @@ -16,6 +16,7 @@ class LayerType(Enum): BSP = 0 DISTRO = 1 SOFTWARE = 2 + CORE = 3 ERROR_NO_LAYER_CONF = 98 ERROR_BSP_DISTRO = 99 @@ -106,7 +107,13 @@ def _detect_layer(layer_path): if distros: is_distro = True - if is_bsp and is_distro: + layer['collections'] = _get_layer_collections(layer['path']) + + if layer_name == "meta" and "core" in layer['collections']: + layer['type'] = LayerType.CORE + layer['conf']['machines'] = machines + layer['conf']['distros'] = distros + elif is_bsp and is_distro: layer['type'] = LayerType.ERROR_BSP_DISTRO elif is_bsp: layer['type'] = LayerType.BSP @@ -117,8 +124,6 @@ def _detect_layer(layer_path): else: layer['type'] = LayerType.SOFTWARE - layer['collections'] = _get_layer_collections(layer['path']) - return layer def detect_layers(layer_directories, no_auto): @@ -319,8 +324,8 @@ def get_signatures(builddir, failsafe=False, machine=None, extravars=None): else: raise - sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$") - tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*") + sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$") + tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*") current_tune = None with open(sigs_file, 'r') as f: for line in f.readlines(): diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py index a80a5844da..b76163fb56 100644 --- a/scripts/lib/checklayer/cases/bsp.py +++ b/scripts/lib/checklayer/cases/bsp.py @@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase class BSPCheckLayer(OECheckLayerTestCase): @classmethod def setUpClass(self): - if self.tc.layer['type'] != LayerType.BSP: + if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE): raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\ self.tc.layer['name']) diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py index 491a13953c..722d3cf638 100644 --- a/scripts/lib/checklayer/cases/common.py +++ b/scripts/lib/checklayer/cases/common.py @@ -12,6 +12,9 @@ from checklayer.case import OECheckLayerTestCase class CommonCheckLayer(OECheckLayerTestCase): def test_readme(self): + if self.tc.layer['type'] == LayerType.CORE: + raise unittest.SkipTest("Core layer's README is top level") + # The top-level README file may have a suffix (like README.rst or README.txt). readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*')) self.assertTrue(len(readme_files) > 0, diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py index f0bee5493c..a35332451c 100644 --- a/scripts/lib/checklayer/cases/distro.py +++ b/scripts/lib/checklayer/cases/distro.py @@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase class DistroCheckLayer(OECheckLayerTestCase): @classmethod def setUpClass(self): - if self.tc.layer['type'] != LayerType.DISTRO: + if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE): raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\ self.tc.layer['name']) diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py index 95384c5333..ff9227035d 100644 --- a/scripts/lib/devtool/menuconfig.py +++ b/scripts/lib/devtool/menuconfig.py @@ -43,7 +43,7 @@ def menuconfig(args, config, basepath, workspace): return 1 check_workspace_recipe(workspace, args.component) - pn = rd.getVar('PN', True) + pn = rd.getVar('PN') if not rd.getVarFlag('do_menuconfig','task'): raise DevtoolError("This recipe does not support menuconfig option") diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py index 4b50e3c63b..7a005c9010 100644 --- a/scripts/lib/devtool/standard.py +++ b/scripts/lib/devtool/standard.py @@ -520,7 +520,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works for event in history: if not 'flag' in event: if event['op'].startswith((':append[', ':prepend[')): - extra_overrides.append(event['op'].split('[')[1].split(']')[0]) + override = event['op'].split('[')[1].split(']')[0] + if not override.startswith('pn-'): + extra_overrides.append(override) # We want to remove duplicate overrides. If a recipe had multiple # SRC_URI_override += values it would cause mulitple instances of # overrides. This doesn't play nicely with things like creating a @@ -763,6 +765,16 @@ def get_staging_kbranch(srcdir): staging_kbranch = "".join(branch.split('\n')[0]) return staging_kbranch +def get_real_srctree(srctree, s, workdir): + # Check that recipe isn't using a shared workdir + s = os.path.abspath(s) + workdir = os.path.abspath(workdir) + if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir: + # Handle if S is set to a subdirectory of the source + srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1] + srctree = os.path.join(srctree, srcsubdir) + return srctree + def modify(args, config, basepath, workspace): """Entry point for the devtool 'modify' subcommand""" import bb @@ -921,14 +933,7 @@ def modify(args, config, basepath, workspace): # Need to grab this here in case the source is within a subdirectory srctreebase = srctree - - # Check that recipe isn't using a shared workdir - s = os.path.abspath(rd.getVar('S')) - workdir = os.path.abspath(rd.getVar('WORKDIR')) - if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir: - # Handle if S is set to a subdirectory of the source - srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1] - srctree = os.path.join(srctree, srcsubdir) + srctree = get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR')) bb.utils.mkdirhier(os.path.dirname(appendfile)) with open(appendfile, 'w') as f: @@ -1404,6 +1409,18 @@ def _export_local_files(srctree, rd, destdir, srctreebase): updated = OrderedDict() added = OrderedDict() removed = OrderedDict() + + # Get current branch and return early with empty lists + # if on one of the override branches + # (local files are provided only for the main branch and processing + # them against lists from recipe overrides will result in mismatches + # and broken modifications to recipes). + stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD', + cwd=srctree) + branchname = stdout.rstrip() + if branchname.startswith(override_branch_prefix): + return (updated, added, removed) + local_files_dir = os.path.join(srctreebase, 'oe-local-files') git_files = _git_ls_tree(srctree) if 'oe-local-files' in git_files: @@ -1604,6 +1621,19 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil if not os.path.exists(append): raise DevtoolError('unable to find workspace bbappend for recipe %s' % recipename) + srctreebase = workspace[recipename]['srctreebase'] + relpatchdir = os.path.relpath(srctreebase, srctree) + if relpatchdir == '.': + patchdir_params = {} + else: + patchdir_params = {'patchdir': relpatchdir} + + def srcuri_entry(basepath): + if patchdir_params: + paramstr = ';' + ';'.join('%s=%s' % (k,v) for k,v in patchdir_params.items()) + else: + paramstr = '' + return 'file://%s%s' % (basepath, paramstr) initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh) if not initial_rev: @@ -1620,32 +1650,25 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil tempdir = tempfile.mkdtemp(prefix='devtool') try: local_files_dir = tempfile.mkdtemp(dir=tempdir) - if filter_patches: - upd_f = {} - new_f = {} - del_f = {} - else: - srctreebase = workspace[recipename]['srctreebase'] - upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase) - - remove_files = [] - if not no_remove: - # Get all patches from source tree and check if any should be removed - all_patches_dir = tempfile.mkdtemp(dir=tempdir) - _, _, del_p = _export_patches(srctree, rd, initial_rev, - all_patches_dir) - # Remove deleted local files and patches - remove_files = list(del_f.values()) + list(del_p.values()) + upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase) # Get updated patches from source tree patches_dir = tempfile.mkdtemp(dir=tempdir) upd_p, new_p, _ = _export_patches(srctree, rd, update_rev, patches_dir, changed_revs) + # Get all patches from source tree and check if any should be removed + all_patches_dir = tempfile.mkdtemp(dir=tempdir) + _, _, del_p = _export_patches(srctree, rd, initial_rev, + all_patches_dir) logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p))) if filter_patches: new_p = OrderedDict() upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches) - remove_files = [f for f in remove_files if f in filter_patches] + del_p = OrderedDict((k,v) for k,v in del_p.items() if k in filter_patches) + remove_files = [] + if not no_remove: + # Remove deleted local files and patches + remove_files = list(del_f.values()) + list(del_p.values()) updatefiles = False updaterecipe = False destpath = None @@ -1661,14 +1684,15 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil removedentries, remaining = _remove_file_entries( srcuri, remove_files) if removedentries or remaining: - remaining = ['file://' + os.path.basename(item) for + remaining = [srcuri_entry(os.path.basename(item)) for item in remaining] removevalues = {'SRC_URI': removedentries + remaining} appendfile, destpath = oe.recipeutils.bbappend_recipe( rd, appendlayerdir, files, wildcardver=wildcard_version, removevalues=removevalues, - redirect_output=dry_run_outdir) + redirect_output=dry_run_outdir, + params=[patchdir_params] * len(files)) else: logger.info('No patches or local source files needed updating') else: @@ -1692,7 +1716,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil # replace the entry in SRC_URI with our local version logger.info('Replacing remote patch %s with updated local version' % basepath) path = os.path.join(files_dir, basepath) - _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath) + _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath)) updaterecipe = True else: logger.info('Updating patch %s%s' % (basepath, dry_run_suffix)) @@ -1706,7 +1730,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil os.path.join(files_dir, basepath), dry_run_outdir=dry_run_outdir, base_outdir=recipedir) - srcuri.append('file://%s' % basepath) + srcuri.append(srcuri_entry(basepath)) updaterecipe = True for basepath, path in new_p.items(): logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix)) @@ -1714,7 +1738,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil os.path.join(files_dir, basepath), dry_run_outdir=dry_run_outdir, base_outdir=recipedir) - srcuri.append('file://%s' % basepath) + srcuri.append(srcuri_entry(basepath)) updaterecipe = True # Update recipe, if needed if _remove_file_entries(srcuri, remove_files)[0]: diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py index 0357ec07bf..6c4a62b558 100644 --- a/scripts/lib/devtool/upgrade.py +++ b/scripts/lib/devtool/upgrade.py @@ -88,7 +88,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path): _rename_recipe_dirs(oldpv, newpv, path) return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path) -def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d): +def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, rev, copied, workspace, d): """Writes an append file""" if not os.path.exists(rc): raise DevtoolError("bbappend not created because %s does not exist" % rc) @@ -104,6 +104,11 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d) af = os.path.join(appendpath, '%s.bbappend' % brf) with open(af, 'w') as f: f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n') + # Local files can be modified/tracked in separate subdir under srctree + # Mostly useful for packages with S != WORKDIR + f.write('FILESPATH:prepend := "%s:"\n' % + os.path.join(srctreebase, 'oe-local-files')) + f.write('# srctreebase: %s\n' % srctreebase) f.write('inherit externalsrc\n') f.write(('# NOTE: We use pn- overrides here to avoid affecting' 'multiple variants in the case where the recipe uses BBCLASSEXTEND\n')) @@ -119,20 +124,16 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d) f.write('# original_files: %s\n' % ' '.join(copied)) return af -def _cleanup_on_error(rf, srctree): - rfp = os.path.split(rf)[0] # recipe folder - rfpp = os.path.split(rfp)[0] # recipes folder - if os.path.exists(rfp): - shutil.rmtree(rfp) - if not len(os.listdir(rfpp)): - os.rmdir(rfpp) +def _cleanup_on_error(rd, srctree): + if os.path.exists(rd): + shutil.rmtree(rd) srctree = os.path.abspath(srctree) if os.path.exists(srctree): shutil.rmtree(srctree) -def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None): - if rf and not keep_failure: - _cleanup_on_error(rf, srctree) +def _upgrade_error(e, rd, srctree, keep_failure=False, extramsg=None): + if not keep_failure: + _cleanup_on_error(rd, srctree) logger.error(e) if extramsg: logger.error(extramsg) @@ -337,7 +338,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src replacing = True new_src_uri = [] for entry in src_uri: - scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry) + try: + scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry) + except bb.fetch2.MalformedUrl as e: + raise DevtoolError("Could not decode SRC_URI: {}".format(e)) if replacing and scheme in ['git', 'gitsm']: branch = params.get('branch', 'master') if rd.expand(branch) != srcbranch: @@ -426,7 +430,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src try: rd = tinfoil.parse_recipe_file(fullpath, False) except bb.tinfoil.TinfoilCommandFailed as e: - _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed') + _upgrade_error(e, os.path.dirname(fullpath), srctree, keep_failure, 'Parsing of upgraded recipe failed') oe.recipeutils.patch_recipe(rd, fullpath, newvalues) return fullpath, copied @@ -522,14 +526,7 @@ def upgrade(args, config, basepath, workspace): else: srctree = standard.get_default_srctree(config, pn) - # Check that recipe isn't using a shared workdir - s = os.path.abspath(rd.getVar('S')) - workdir = os.path.abspath(rd.getVar('WORKDIR')) - srctree_s = srctree - if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir: - # Handle if S is set to a subdirectory of the source - srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1] - srctree_s = os.path.join(srctree, srcsubdir) + srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR')) # try to automatically discover latest version and revision if not provided on command line if not args.version and not args.srcrev: @@ -568,13 +565,12 @@ def upgrade(args, config, basepath, workspace): new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or "")) license_diff = _generate_license_diff(old_licenses, new_licenses) rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure) - except bb.process.CmdError as e: - _upgrade_error(e, rf, srctree, args.keep_failure) - except DevtoolError as e: - _upgrade_error(e, rf, srctree, args.keep_failure) + except (bb.process.CmdError, DevtoolError) as e: + recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN')) + _upgrade_error(e, recipedir, srctree, args.keep_failure) standard._add_md5(config, pn, os.path.dirname(rf)) - af = _write_append(rf, srctree_s, args.same_dir, args.no_same_dir, rev2, + af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2, copied, config.workspace_path, rd) standard._add_md5(config, pn, af) diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py index 220465ed2f..7b4c501456 100644 --- a/scripts/lib/recipetool/create.py +++ b/scripts/lib/recipetool/create.py @@ -745,6 +745,10 @@ def create_recipe(args): for handler in handlers: handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues) + # native and nativesdk classes are special and must be inherited last + # If present, put them at the end of the classes list + classes.sort(key=lambda c: c in ("native", "nativesdk")) + extrafiles = extravalues.pop('extrafiles', {}) extra_pn = extravalues.pop('PN', None) extra_pv = extravalues.pop('PV', None) @@ -1069,12 +1073,12 @@ def crunch_license(licfile): # Note: these are carefully constructed! license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$') license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$') - copyright_re = re.compile('^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$') - disclaimer_re = re.compile('^ *\*? ?All [Rr]ights [Rr]eserved\.$') - email_re = re.compile('^.*<[\w\.-]*@[\w\.\-]*>$') - header_re = re.compile('^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$') - tag_re = re.compile('^ *@?\(?([Ll]icense|MIT)\)?$') - url_re = re.compile('^ *[#\*]* *https?:\/\/[\w\.\/\-]+$') + copyright_re = re.compile(r'^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$') + disclaimer_re = re.compile(r'^ *\*? ?All [Rr]ights [Rr]eserved\.$') + email_re = re.compile(r'^.*<[\w\.-]*@[\w\.\-]*>$') + header_re = re.compile(r'^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$') + tag_re = re.compile(r'^ *@?\(?([Ll]icense|MIT)\)?$') + url_re = re.compile(r'^ *[#\*]* *https?:\/\/[\w\.\/\-]+$') crunched_md5sums = {} diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py index 5015634476..bc4fb14a20 100644 --- a/scripts/lib/recipetool/create_buildsys.py +++ b/scripts/lib/recipetool/create_buildsys.py @@ -137,15 +137,15 @@ class CmakeRecipeHandler(RecipeHandler): deps = [] unmappedpkgs = [] - proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE) - pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE) - pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE) - findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE) - findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*') - checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE) - include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE) - subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE) - dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?') + proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE) + pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE) + pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE) + findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE) + findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*') + checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE) + include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE) + subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE) + dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?') def find_cmake_package(pkg): RecipeHandler.load_devel_filemap(tinfoil.config_data) @@ -423,16 +423,16 @@ class AutotoolsRecipeHandler(RecipeHandler): 'makeinfo': 'texinfo', } - pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*') - pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*') - lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*') - libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*') - progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*') - dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?') - ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*') - am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*') - define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)') - version_re = re.compile('([0-9.]+)') + pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*') + pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*') + lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*') + libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*') + progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*') + dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?') + ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*') + am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*') + define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)') + version_re = re.compile(r'([0-9.]+)') defines = {} def subst_defines(value): diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py index 5686a62d3f..a7eed3256f 100644 --- a/scripts/lib/recipetool/create_buildsys_python.py +++ b/scripts/lib/recipetool/create_buildsys_python.py @@ -10,7 +10,7 @@ import codecs import collections import setuptools.command.build_py import email -import imp +import importlib import glob import itertools import logging @@ -561,7 +561,6 @@ class PythonRecipeHandler(RecipeHandler): return deps def parse_pkgdata_for_python_packages(self): - suffixes = [t[0] for t in imp.get_suffixes()] pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR') ldata = tinfoil.config_data.createCopy() @@ -585,7 +584,7 @@ class PythonRecipeHandler(RecipeHandler): continue for fn in files_info: - for suffix in suffixes: + for suffix in importlib.machinery.all_suffixes(): if fn.endswith(suffix): break else: diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py index f0ca50ebe2..a349510ab8 100644 --- a/scripts/lib/resulttool/report.py +++ b/scripts/lib/resulttool/report.py @@ -176,7 +176,10 @@ class ResultsTextReport(object): vals['sort'] = line['testseries'] + "_" + line['result_id'] vals['failed_testcases'] = line['failed_testcases'] for k in cols: - vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + if total_tested: + vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f')) + else: + vals[k] = "0 (0%)" for k in maxlen: if k in vals and len(vals[k]) > maxlen[k]: maxlen[k] = len(vals[k]) diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py index 8917022d36..c5521d81bd 100644 --- a/scripts/lib/resulttool/resultutils.py +++ b/scripts/lib/resulttool/resultutils.py @@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv testseries = posixpath.basename(posixpath.dirname(url.path)) else: with open(f, "r") as filedata: - data = json.load(filedata) + try: + data = json.load(filedata) + except json.decoder.JSONDecodeError: + print("Cannot decode {}. Possible corruption. Skipping.".format(f)) + data = "" testseries = os.path.basename(os.path.dirname(f)) else: data = f @@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section): return decode_log(ptest['log']) def ptestresult_get_log(results, section): - return generic_get_log('ptestresuls.sections', results, section) + return generic_get_log('ptestresult.sections', results, section) def generic_get_rawlogs(sectname, results): if sectname not in results: diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py index 3e11822996..2b90821b30 100644 --- a/scripts/lib/wic/misc.py +++ b/scripts/lib/wic/misc.py @@ -36,6 +36,7 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools", "mkdosfs": "dosfstools", "mkisofs": "cdrtools", "mkfs.btrfs": "btrfs-tools", + "mkfs.erofs": "erofs-utils", "mkfs.ext2": "e2fsprogs", "mkfs.ext3": "e2fsprogs", "mkfs.ext4": "e2fsprogs", @@ -140,11 +141,12 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""): cmd_and_args = pseudo + cmd_and_args hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR") + target_sys = get_bitbake_var("TARGET_SYS") - native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin:%s" % \ + native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/usr/bin/%s:%s/bin:%s" % \ (native_sysroot, native_sysroot, - native_sysroot, native_sysroot, - hosttools_dir) + native_sysroot, native_sysroot, target_sys, + native_sysroot, hosttools_dir) native_cmd_and_args = "export PATH=%s:$PATH;%s" % \ (native_paths, cmd_and_args) diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py index 09e491dd49..5275da6ed3 100644 --- a/scripts/lib/wic/partition.py +++ b/scripts/lib/wic/partition.py @@ -132,6 +132,8 @@ class Partition(): self.update_fstab_in_rootfs = True if not self.source: + if self.fstype == "none" or self.no_table: + return if not self.size and not self.fixed_size: raise WicError("The %s partition has a size of zero. Please " "specify a non-zero --size/--fixed-size for that " @@ -299,6 +301,30 @@ class Partition(): mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs) exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo) + if os.getenv('SOURCE_DATE_EPOCH'): + sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH'))) + debugfs_script_path = os.path.join(cr_workdir, "debugfs_script") + files = [] + for root, dirs, others in os.walk(rootfs_dir): + base = root.replace(rootfs_dir, "").rstrip(os.sep) + files += [ "/" if base == "" else base ] + files += [ base + "/" + n for n in dirs + others ] + with open(debugfs_script_path, "w") as f: + f.write("set_current_time %s\n" % (sde_time)) + if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update: + f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time)) + f.write("set_inode_field /etc/fstab mtime_extra 0\n") + for file in set(files): + for time in ["atime", "ctime", "crtime"]: + f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time)) + f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time)) + for time in ["wtime", "mkfs_time", "lastcheck"]: + f.write("set_super_value %s %s\n" % (time, sde_time)) + for time in ["mtime", "first_error_time", "last_error_time"]: + f.write("set_super_value %s 0\n" % (time)) + debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs) + exec_native_cmd(debugfs_cmd, native_sysroot) + self.check_for_Y2038_problem(rootfs, native_sysroot) def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir, @@ -352,7 +378,7 @@ class Partition(): exec_native_cmd(mcopy_cmd, native_sysroot) if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update: - mcopy_cmd = "mcopy -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path) + mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path) exec_native_cmd(mcopy_cmd, native_sysroot) chmod_cmd = "chmod 644 %s" % rootfs @@ -380,6 +406,9 @@ class Partition(): (extraopts, self.fsuuid, rootfs, rootfs_dir) exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo) + def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot): + pass + def prepare_empty_partition_ext(self, rootfs, oe_builddir, native_sysroot): """ diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py index 4d0b836ef6..165fc2979f 100644 --- a/scripts/lib/wic/plugins/imager/direct.py +++ b/scripts/lib/wic/plugins/imager/direct.py @@ -117,7 +117,7 @@ class DirectPlugin(ImagerPlugin): updated = False for part in self.parts: if not part.realnum or not part.mountpoint \ - or part.mountpoint == "/" or not part.mountpoint.startswith('/'): + or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"): continue if part.use_uuid: @@ -148,6 +148,9 @@ class DirectPlugin(ImagerPlugin): self.updated_fstab_path = os.path.join(self.workdir, "fstab") with open(self.updated_fstab_path, "w") as f: f.writelines(fstab_lines) + if os.getenv('SOURCE_DATE_EPOCH'): + fstab_time = int(os.getenv('SOURCE_DATE_EPOCH')) + os.utime(self.updated_fstab_path, (fstab_time, fstab_time)) def _full_path(self, path, name, extention): """ Construct full file path to a file we generate. """ diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py index 0391aebdc8..a2b9f4c893 100644 --- a/scripts/lib/wic/plugins/source/bootimg-efi.py +++ b/scripts/lib/wic/plugins/source/bootimg-efi.py @@ -326,21 +326,22 @@ class BootimgEFIPlugin(SourcePlugin): exec_cmd(install_cmd) staging_dir_host = get_bitbake_var("STAGING_DIR_HOST") + target_sys = get_bitbake_var("TARGET_SYS") # https://www.freedesktop.org/software/systemd/man/systemd-stub.html - objcopy_cmd = "objcopy \ - --add-section .osrel=%s --change-section-vma .osrel=0x20000 \ - --add-section .cmdline=%s --change-section-vma .cmdline=0x30000 \ - --add-section .linux=%s --change-section-vma .linux=0x2000000 \ - --add-section .initrd=%s --change-section-vma .initrd=0x3000000 \ - %s %s" % \ - ("%s/usr/lib/os-release" % staging_dir_host, - cmdline.name, - "%s/%s" % (staging_kernel_dir, kernel), - initrd.name, - efi_stub, - "%s/EFI/Linux/linux.efi" % hdddir) - exec_cmd(objcopy_cmd) + objcopy_cmd = "%s-objcopy" % target_sys + objcopy_cmd += " --enable-deterministic-archives" + objcopy_cmd += " --preserve-dates" + objcopy_cmd += " --add-section .osrel=%s/usr/lib/os-release" % staging_dir_host + objcopy_cmd += " --change-section-vma .osrel=0x20000" + objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name + objcopy_cmd += " --change-section-vma .cmdline=0x30000" + objcopy_cmd += " --add-section .linux=%s/%s" % (staging_kernel_dir, kernel) + objcopy_cmd += " --change-section-vma .linux=0x2000000" + objcopy_cmd += " --add-section .initrd=%s" % initrd.name + objcopy_cmd += " --change-section-vma .initrd=0x3000000" + objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir) + exec_native_cmd(objcopy_cmd, native_sysroot) else: install_cmd = "install -m 0644 %s/%s %s/%s" % \ (staging_kernel_dir, kernel, hdddir, kernel) @@ -391,6 +392,13 @@ class BootimgEFIPlugin(SourcePlugin): logger.debug("Added %d extra blocks to %s to get to %d total blocks", extra_blocks, part.mountpoint, blocks) + # required for compatibility with certain devices expecting file system + # block count to be equal to partition block count + if blocks < part.fixed_size: + blocks = part.fixed_size + logger.debug("Overriding %s to %d total blocks for compatibility", + part.mountpoint, blocks) + # dosfs image, created by mkdosfs bootimg = "%s/boot.img" % cr_workdir diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py index 25bb41dd70..e29f3a4c2f 100644 --- a/scripts/lib/wic/plugins/source/rootfs.py +++ b/scripts/lib/wic/plugins/source/rootfs.py @@ -35,7 +35,7 @@ class RootfsPlugin(SourcePlugin): @staticmethod def __validate_path(cmd, rootfs_dir, path): if os.path.isabs(path): - logger.error("%s: Must be relative: %s" % (cmd, orig_path)) + logger.error("%s: Must be relative: %s" % (cmd, path)) sys.exit(1) # Disallow climbing outside of parent directory using '..', @@ -224,7 +224,7 @@ class RootfsPlugin(SourcePlugin): if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update: fstab_path = os.path.join(new_rootfs, "etc/fstab") # Assume that fstab should always be owned by root with fixed permissions - install_cmd = "install -m 0644 %s %s" % (part.updated_fstab_path, fstab_path) + install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path) if new_pseudo: pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo) else: diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate index f4cc5869de..0d171c4463 100755 --- a/scripts/oe-check-sstate +++ b/scripts/oe-check-sstate @@ -18,7 +18,6 @@ import re scripts_path = os.path.dirname(os.path.realpath(__file__)) lib_path = scripts_path + '/lib' sys.path = sys.path + [lib_path] -import scriptutils import scriptpath scriptpath.add_bitbake_lib_path() import argparse_oe @@ -51,13 +50,10 @@ def check(args): env['TMPDIR:forcevariable'] = tmpdir try: - output = subprocess.check_output( - 'bitbake -n %s' % ' '.join(args.target), - stderr=subprocess.STDOUT, - env=env, - shell=True) + cmd = ['bitbake', '--dry-run', '--runall=build'] + args.target + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env) - task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)') + task_re = re.compile(r'NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)') tasks = [] for line in output.decode('utf-8').splitlines(): res = task_re.match(line) diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot index 5eb3e12769..1c2d51c6ec 100755 --- a/scripts/oe-depends-dot +++ b/scripts/oe-depends-dot @@ -15,7 +15,7 @@ class Dot(object): def __init__(self): parser = argparse.ArgumentParser( description="Analyse recipe-depends.dot generated by bitbake -g", - epilog="Use %(prog)s --help to get help") + formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("dotfile", help = "Specify the dotfile", nargs = 1, action='store', default='') parser.add_argument("-k", "--key", @@ -32,6 +32,21 @@ class Dot(object): " For example, A->B, B->C, A->C, then A->C can be removed.", action="store_true", default=False) + parser.epilog = """ +Examples: +First generate the .dot file: + bitbake -g core-image-minimal + +To find out why a package is being built: + %(prog)s -k <package> -w ./task-depends.dot + +To find out what a package depends on: + %(prog)s -k <package> -d ./task-depends.dot + +Reduce the .dot file packages only, no tasks: + %(prog)s -r ./task-depends.dot +""" + self.args = parser.parse_args() if len(sys.argv) != 3 and len(sys.argv) < 5: @@ -99,6 +114,10 @@ class Dot(object): if key == "meta-world-pkgdata": continue dep = m.group(2) + key = key.split('.')[0] + dep = dep.split('.')[0] + if key == dep: + continue if key in depends: if not key in depends[key]: depends[key].add(dep) diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util index 7412cc1f47..44ae40549a 100755 --- a/scripts/oe-pkgdata-util +++ b/scripts/oe-pkgdata-util @@ -296,7 +296,7 @@ def package_info(args): extra = '' for line in f: for var in vars: - m = re.match(var + '(?::\S+)?:\s*(.+?)\s*$', line) + m = re.match(var + r'(?::\S+)?:\s*(.+?)\s*$', line) if m: vals[var] = m.group(1) pkg_version = vals['PKGV'] or '' diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir index 54048e62ec..5d644168cb 100755 --- a/scripts/oe-setup-builddir +++ b/scripts/oe-setup-builddir @@ -74,9 +74,10 @@ fi if [ ! -r "$BUILDDIR/conf/local.conf" ]; then cat <<EOM You had no conf/local.conf file. This configuration file has therefore been -created for you with some default values. You may wish to edit it to, for -example, select a different MACHINE (target hardware). See conf/local.conf -for more information as common configuration options are commented. +created for you from $OECORELOCALCONF +You may wish to edit it to, for example, select a different MACHINE (target +hardware). See conf/local.conf for more information as common configuration +options are commented. EOM cp -f "$OECORELOCALCONF" "$BUILDDIR/conf/local.conf" @@ -89,8 +90,9 @@ fi if [ ! -r "$BUILDDIR/conf/bblayers.conf" ]; then cat <<EOM You had no conf/bblayers.conf file. This configuration file has therefore been -created for you with some default values. To add additional metadata layers -into your configuration please add entries to conf/bblayers.conf. +created for you from $OECORELAYERCONF +To add additional metadata layers into your configuration please add entries +to conf/bblayers.conf. EOM diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py index bc3ab43823..084d9ef684 100755 --- a/scripts/opkg-query-helper.py +++ b/scripts/opkg-query-helper.py @@ -29,7 +29,7 @@ for arg in sys.argv[1:]: args.append(arg) # Regex for removing version specs after dependency items -verregex = re.compile(' \([=<>]* [^ )]*\)') +verregex = re.compile(r' \([=<>]* [^ )]*\)') pkg = "" ver = "" diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py index fc708b55c3..ce3af74e2b 100644 --- a/scripts/pybootchartgui/pybootchartgui/draw.py +++ b/scripts/pybootchartgui/pybootchartgui/draw.py @@ -558,6 +558,11 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w): draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h)) draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h) + + # Show elapsed time for each task + elapsed_time = f"{trace.processes[process][1] - start}s" + draw_text(ctx, elapsed_time, PROC_TEXT_COLOR, x + w + 4, y + proc_h - 4) + y = y + proc_h return curr_y @@ -698,7 +703,7 @@ def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) : cmdString = proc.cmd else: cmdString = '' - if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0: + if (OPTIONS.show_pid or OPTIONS.show_all) and ipid != 0: cmdString = cmdString + " [" + str(ipid // 1000) + "]" if OPTIONS.show_all: if proc.args: @@ -796,7 +801,7 @@ class CumlSample: if self.color is None: i = self.next() % HSV_MAX_MOD h = 0.0 - if i is not 0: + if i != 0: h = (1.0 * i) / HSV_MAX_MOD s = 0.5 v = 1.0 diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py index b42dac6b88..9d6787ec5a 100644 --- a/scripts/pybootchartgui/pybootchartgui/parsing.py +++ b/scripts/pybootchartgui/pybootchartgui/parsing.py @@ -128,7 +128,7 @@ class Trace: def compile(self, writer): def find_parent_id_for(pid): - if pid is 0: + if pid == 0: return 0 ppid = self.parent_map.get(pid) if ppid: diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py index 4ed8bfc0d1..8a728720ba 100755 --- a/scripts/relocate_sdk.py +++ b/scripts/relocate_sdk.py @@ -104,11 +104,12 @@ def change_interpreter(elf_file_name): if (len(new_dl_path) >= p_filesz): print("ERROR: could not relocate %s, interp size = %i and %i is needed." \ % (elf_file_name, p_memsz, len(new_dl_path) + 1)) - break + return False dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path)) f.seek(p_offset) f.write(dl_path) break + return True def change_dl_sysdirs(elf_file_name): if arch == 32: @@ -222,6 +223,7 @@ else: executables_list = sys.argv[3:] +errors = False for e in executables_list: perms = os.stat(e)[stat.ST_MODE] if os.access(e, os.W_OK|os.R_OK): @@ -247,7 +249,8 @@ for e in executables_list: arch = get_arch() if arch: parse_elf_header() - change_interpreter(e) + if not change_interpreter(e): + errors = True change_dl_sysdirs(e) """ change permissions back """ @@ -260,3 +263,6 @@ for e in executables_list: print("New file size for %s is different. Looks like a relocation error!", e) sys.exit(-1) +if errors: + print("Relocation of one or more executables failed.") + sys.exit(-1) diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh index 7cd771bbe7..8199b43784 100755 --- a/scripts/rpm2cpio.sh +++ b/scripts/rpm2cpio.sh @@ -7,7 +7,7 @@ fatal() { } pkg="$1" -[ -n "$pkg" -a -e "$pkg" ] || +[ -n "$pkg" ] && [ -e "$pkg" ] || fatal "No package supplied" _dd() { @@ -16,14 +16,23 @@ _dd() { } calcsize() { + + case "$(_dd $1 bs=4 count=1 | tr -d '\0')" in + "$(printf '\216\255\350')"*) ;; # '\x8e\xad\xe8' + *) fatal "File doesn't look like rpm: $pkg" ;; + esac + offset=$(($1 + 8)) local i b b0 b1 b2 b3 b4 b5 b6 b7 i=0 while [ $i -lt 8 ]; do - b=$(_dd $(($offset + $i)) bs=1 count=1; echo X) - b=${b%X} + # add . to not loose \n + # strip \0 as it gets dropped with warning otherwise + b="$(_dd $(($offset + $i)) bs=1 count=1 | tr -d '\0' ; echo .)" + b=${b%.} # strip . again + [ -z "$b" ] && b="0" || b="$(exec printf '%u\n' "'$b")" @@ -35,7 +44,7 @@ calcsize() { offset=$(($offset + $rsize)) } -case "$(_dd 0 bs=8 count=1)" in +case "$(_dd 0 bs=4 count=1 | tr -d '\0')" in "$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb' *) fatal "File doesn't look like rpm: $pkg" ;; esac @@ -46,10 +55,11 @@ sigsize=$rsize calcsize $(($offset + (8 - ($sigsize % 8)) % 8)) hdrsize=$rsize -case "$(_dd $offset bs=3 count=1)" in - "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a' - "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b' - "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37' - "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00' - *) fatal "Unrecognized rpm file: $pkg" ;; +case "$(_dd $offset bs=2 count=1 | tr -d '\0')" in + "$(printf '\102\132')") _dd $offset | bunzip2 ;; # '\x42\x5a' + "$(printf '\037\213')") _dd $offset | gunzip ;; # '\x1f\x8b' + "$(printf '\375\067')") _dd $offset | xzcat ;; # '\xfd\x37' + "$(printf '\135')") _dd $offset | unlzma ;; # '\x5d\x00' + "$(printf '\050\265')") _dd $offset | unzstd ;; # '\x28\xb5' + *) fatal "Unrecognized payload compression format in rpm file: $pkg" ;; esac diff --git a/scripts/runqemu b/scripts/runqemu index 6e1f073ed2..ba7c1b2461 100755 --- a/scripts/runqemu +++ b/scripts/runqemu @@ -82,6 +82,7 @@ of the following environment variables (in any order): kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required) publicvnc - enable a VNC server open to all hosts audio - enable audio + qmp=<path> - create a QMP socket (defaults to unix:qmp.sock if unspecified) [*/]ovmf* - OVMF firmware file or base name for booting with UEFI tcpserial=<port> - specify tcp serial port number qemuparams=<xyz> - specify custom parameters to QEMU @@ -210,11 +211,13 @@ class BaseConfig(object): self.mac_tap = "52:54:00:12:34:" self.mac_slirp = "52:54:00:12:35:" # pid of the actual qemu process - self.qemupid = None + self.qemu_environ = os.environ.copy() + self.qemuprocess = None # avoid cleanup twice self.cleaned = False # Files to cleanup after run self.cleanup_files = [] + self.qmp = None def acquire_taplock(self, error=True): logger.debug("Acquiring lockfile %s..." % self.taplock) @@ -361,7 +364,7 @@ class BaseConfig(object): if p.endswith('.qemuboot.conf'): self.qemuboot = p self.qbconfload = True - elif re.search('\.bin$', p) or re.search('bzImage', p) or \ + elif re.search('\\.bin$', p) or re.search('bzImage', p) or \ re.search('zImage', p) or re.search('vmlinux', p) or \ re.search('fitImage', p) or re.search('uImage', p): self.kernel = p @@ -375,13 +378,13 @@ class BaseConfig(object): fst = t break if not fst: - m = re.search('.*\.(.*)$', self.rootfs) + m = re.search('.*\\.(.*)$', self.rootfs) if m: fst = m.group(1) if fst: self.check_arg_fstype(fst) - qb = re.sub('\.' + fst + "$", '', self.rootfs) - qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf') + qb = re.sub('\\.' + fst + "$", '', self.rootfs) + qb = '%s%s' % (re.sub('\\.rootfs$', '', qb), '.qemuboot.conf') if os.path.exists(qb): self.qemuboot = qb self.qbconfload = True @@ -446,29 +449,16 @@ class BaseConfig(object): self.set("MACHINE", arg) def set_dri_path(self): - # As runqemu can be run within bitbake (when using testimage, for example), - # we need to ensure that we run host pkg-config, and that it does not - # get mis-directed to native build paths set by bitbake. - try: - del os.environ['PKG_CONFIG_PATH'] - del os.environ['PKG_CONFIG_DIR'] - del os.environ['PKG_CONFIG_LIBDIR'] - del os.environ['PKG_CONFIG_SYSROOT_DIR'] - except KeyError: - pass - try: - dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True) - except subprocess.CalledProcessError as e: - raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.") - os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip() - - # This preloads uninative libc pieces and therefore ensures that RPATH/RUNPATH - # in host mesa drivers doesn't trick uninative into loading host libc. - preload_items = ['libdl.so.2', 'librt.so.1', 'libpthread.so.0'] - uninative_path = os.path.dirname(self.get("UNINATIVE_LOADER")) - if os.path.exists(uninative_path): - preload_paths = [os.path.join(uninative_path, i) for i in preload_items] - os.environ['LD_PRELOAD'] = " ".join(preload_paths) + drivers_path = os.path.join(self.bindir_native, '../lib/dri') + if not os.path.exists(drivers_path) or not os.listdir(drivers_path): + raise RunQemuError(""" +qemu has been built without opengl support and accelerated graphics support is not available. +To enable it, add: +DISTRO_FEATURES_NATIVE:append = " opengl" +DISTRO_FEATURES_NATIVESDK:append = " opengl" +to your build configuration. +""") + self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path def check_args(self): for debug in ("-d", "--debug"): @@ -482,8 +472,8 @@ class BaseConfig(object): sys.argv.remove(quiet) if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]: - os.environ['SDL_RENDER_DRIVER'] = 'software' - os.environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false' + self.qemu_environ['SDL_RENDER_DRIVER'] = 'software' + self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false' unknown_arg = "" for arg in sys.argv[1:]: @@ -497,7 +487,7 @@ class BaseConfig(object): self.gtk = True elif arg == 'gl': self.gl = True - elif 'gl-es' in sys.argv[1:]: + elif arg == 'gl-es': self.gl_es = True elif arg == 'egl-headless': self.egl_headless = True @@ -524,6 +514,10 @@ class BaseConfig(object): elif arg == 'publicvnc': self.publicvnc = True self.qemu_opt_script += ' -vnc :0' + elif arg == "qmp": + self.qmp = "unix:qmp.sock" + elif arg.startswith("qmp="): + self.qmp = arg[len('qmp='):] elif arg.startswith('tcpserial='): self.tcpserial_portnum = '%s' % arg[len('tcpserial='):] elif arg.startswith('qemuparams='): @@ -999,17 +993,14 @@ class BaseConfig(object): else: self.nfs_server = '192.168.7.1' - # Figure out a new nfs_instance to allow multiple qemus running. - ps = subprocess.check_output(("ps", "auxww")).decode('utf-8') - pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) ' - all_instances = re.findall(pattern, ps, re.M) - if all_instances: - all_instances.sort(key=int) - self.nfs_instance = int(all_instances.pop()) + 1 - - nfsd_port = 3049 + 2 * self.nfs_instance - mountd_port = 3048 + 2 * self.nfs_instance + nfsd_port = 3048 + self.nfs_instance + lockdir = "/tmp/qemu-port-locks" + self.make_lock_dir(lockdir) + while not self.check_free_port('localhost', nfsd_port, lockdir): + self.nfs_instance += 1 + nfsd_port += 1 + mountd_port = nfsd_port # Export vars for runqemu-export-rootfs export_dict = { 'NFS_INSTANCE': self.nfs_instance, @@ -1060,6 +1051,17 @@ class BaseConfig(object): self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % ( self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper'))) + def make_lock_dir(self, lockdir): + if not os.path.exists(lockdir): + # There might be a race issue when multi runqemu processess are + # running at the same time. + try: + os.mkdir(lockdir) + os.chmod(lockdir, 0o777) + except FileExistsError: + pass + return + def setup_slirp(self): """Setup user networking""" @@ -1078,14 +1080,7 @@ class BaseConfig(object): mac = 2 lockdir = "/tmp/qemu-port-locks" - if not os.path.exists(lockdir): - # There might be a race issue when multi runqemu processess are - # running at the same time. - try: - os.mkdir(lockdir) - os.chmod(lockdir, 0o777) - except FileExistsError: - pass + self.make_lock_dir(lockdir) # Find a free port to avoid conflicts for p in ports[:]: @@ -1125,14 +1120,7 @@ class BaseConfig(object): logger.error("ip: %s" % ip) raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found") - if not os.path.exists(lockdir): - # There might be a race issue when multi runqemu processess are - # running at the same time. - try: - os.mkdir(lockdir) - os.chmod(lockdir, 0o777) - except FileExistsError: - pass + self.make_lock_dir(lockdir) cmd = (ip, 'link') logger.debug('Running %s...' % str(cmd)) @@ -1350,6 +1338,10 @@ class BaseConfig(object): raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!") self.qemu_system = qemu_system + def setup_qmp(self): + if self.qmp: + self.qemu_opt += " -qmp %s,server,nowait" % self.qmp + def setup_vga(self): if self.nographic == True: if self.sdl == True: @@ -1369,13 +1361,13 @@ class BaseConfig(object): # need our font setup and show-cusor below so we need to see what qemu --help says # is supported so we can pass our correct config in. if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True: - output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True) + output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ) if "-display gtk" in output: self.gtk = True elif "-display sdl" in output: self.sdl = True else: - self.qemu_opt += '-display none' + self.qemu_opt += ' -display none' if self.sdl == True or self.gtk == True or self.egl_headless == True: @@ -1393,7 +1385,7 @@ class BaseConfig(object): if self.sdl == True: self.qemu_opt += 'sdl,' elif self.gtk == True: - os.environ['FONTCONFIG_PATH'] = '/etc/fonts' + self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts' self.qemu_opt += 'gtk,' if self.gl == True: @@ -1480,6 +1472,7 @@ class BaseConfig(object): if self.snapshot: self.qemu_opt += " -snapshot" + self.setup_qmp() self.setup_serial() self.setup_vga() @@ -1500,14 +1493,17 @@ class BaseConfig(object): cmd = "%s %s" % (self.qemu_opt, kernel_opts) cmds = shlex.split(cmd) logger.info('Running %s\n' % cmd) + with open('/proc/uptime', 'r') as f: + uptime_seconds = f.readline().split()[0] + logger.info('Host uptime: %s\n' % uptime_seconds) pass_fds = [] if self.taplock_descriptor: pass_fds = [self.taplock_descriptor.fileno()] if len(self.portlocks): for descriptor in self.portlocks.values(): pass_fds.append(descriptor.fileno()) - process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds) - self.qemupid = process.pid + process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ) + self.qemuprocess = process retcode = process.wait() if retcode: if retcode == -signal.SIGTERM: @@ -1523,18 +1519,30 @@ class BaseConfig(object): signal.signal(signal.SIGTERM, signal.SIG_IGN) logger.info("Cleaning up") + + if self.qemuprocess: + try: + # give it some time to shut down, ignore return values and output + self.qemuprocess.send_signal(signal.SIGTERM) + self.qemuprocess.communicate(timeout=5) + except subprocess.TimeoutExpired: + self.qemuprocess.kill() + + with open('/proc/uptime', 'r') as f: + uptime_seconds = f.readline().split()[0] + logger.info('Host uptime: %s\n' % uptime_seconds) if self.cleantap: cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native) logger.debug('Running %s' % str(cmd)) subprocess.check_call(cmd) self.release_taplock() - self.release_portlock() if self.nfs_running: logger.info("Shutting down the userspace NFS server...") cmd = ("runqemu-export-rootfs", "stop", self.rootfs) logger.debug('Running %s' % str(cmd)) subprocess.check_call(cmd) + self.release_portlock() if self.saved_stty: subprocess.check_call(("stty", self.saved_stty)) @@ -1547,6 +1555,9 @@ class BaseConfig(object): else: shutil.rmtree(ent) + # Deliberately ignore the return code of 'tput smam'. + subprocess.call(["tput", "smam"]) + self.cleaned = True def run_bitbake_env(self, mach=None): @@ -1623,12 +1634,8 @@ def main(): subprocess.check_call([renice, str(os.getpid())]) def sigterm_handler(signum, frame): - logger.info("SIGTERM received") - if config.qemupid: - os.kill(config.qemupid, signal.SIGTERM) + logger.info("Received signal: %s" % (signum)) config.cleanup() - # Deliberately ignore the return code of 'tput smam'. - subprocess.call(["tput", "smam"]) signal.signal(signal.SIGTERM, sigterm_handler) config.check_args() @@ -1650,8 +1657,6 @@ def main(): return 1 finally: config.cleanup() - # Deliberately ignore the return code of 'tput smam'. - subprocess.call(["tput", "smam"]) if __name__ == "__main__": sys.exit(main()) diff --git a/scripts/runqemu.README b/scripts/runqemu.README index da9abd7dfb..e5f4b4634c 100644 --- a/scripts/runqemu.README +++ b/scripts/runqemu.README @@ -1,12 +1,12 @@ Using OE images with QEMU ========================= -OE-Core can generate qemu bootable kernels and images with can be used +OE-Core can generate qemu bootable kernels and images which can be used on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC -and x86 (32 and 64 bit) images. The scripts can be used within the OE build -system or externaly. +and x86 (32 and 64 bit) images. The scripts can be used within the OE build +system or externally. -The runqemu script is run as: +The runqemu script is run as: runqemu <machine> <zimage> <filesystem> @@ -15,13 +15,13 @@ where: <machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64) <zimage> is the path to a kernel (e.g. zimage-qemuarm.bin) <filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory - -If <machine> isn't specified, the script will try to detect the machine name + +If <machine> isn't specified, the script will try to detect the machine name from the name of the <zimage> file. If <filesystem> isn't specified, nfs booting will be assumed. -When used within the build system, it will default to qemuarm, ext2 and the last kernel and +When used within the build system, it will default to qemuarm, ext2 and the last kernel and core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look for sato and minimal images. @@ -31,7 +31,7 @@ Full usage instructions can be seen by running the command with no options speci Notes ===== - - The scripts run qemu using sudo. Change perms on /dev/net/tun to + - The scripts run qemu using sudo. Change perms on /dev/net/tun to run as non root. The runqemu-gen-tapdevs script can also be used by root to prepopulate the appropriate network devices. - You can access the host computer at 192.168.7.1 within the image. diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer index 0e5b75b1f7..67cc71950f 100755 --- a/scripts/yocto-check-layer +++ b/scripts/yocto-check-layer @@ -168,14 +168,13 @@ def main(): layers_tested = 0 for layer in layers: - if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \ - layer['type'] == LayerType.ERROR_BSP_DISTRO: + if layer['type'] in (LayerType.ERROR_NO_LAYER_CONF, LayerType.ERROR_BSP_DISTRO): continue # Reset to a clean backup copy for each run shutil.copyfile(bblayersconf + '.backup', bblayersconf) - if check_bblayers(bblayersconf, layer['path'], logger): + if layer['type'] not in (LayerType.CORE, ) and check_bblayers(bblayersconf, layer['path'], logger): logger.info("%s already in %s. To capture initial signatures, layer under test should not present " "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name'])) results[layer['name']] = None |