summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/.oe-layers.json7
-rwxr-xr-xscripts/autobuilder-worker-prereq-tests28
-rwxr-xr-xscripts/bblock184
-rwxr-xr-xscripts/bitbake-prserv-tool9
-rwxr-xr-xscripts/bitbake-whatchanged320
-rwxr-xr-xscripts/buildhistory-collect-srcrevs6
-rwxr-xr-xscripts/buildhistory-diff5
-rwxr-xr-xscripts/buildstats-diff2
-rwxr-xr-xscripts/buildstats-summary126
-rwxr-xr-xscripts/combo-layer38
-rwxr-xr-xscripts/combo-layer-hook-default.sh2
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh4
-rwxr-xr-xscripts/contrib/bbvars.py6
-rwxr-xr-xscripts/contrib/build-perf-test-wrapper.sh15
-rwxr-xr-xscripts/contrib/convert-overrides.py155
-rwxr-xr-xscripts/contrib/convert-spdx-licenses.py145
-rwxr-xr-xscripts/contrib/convert-srcuri.py77
-rwxr-xr-xscripts/contrib/convert-variable-renames.py116
-rwxr-xr-xscripts/contrib/ddimage2
-rwxr-xr-xscripts/contrib/dialog-power-control2
-rwxr-xr-xscripts/contrib/documentation-audit.sh6
-rwxr-xr-xscripts/contrib/image-manifest523
-rwxr-xr-xscripts/contrib/list-packageconfig-flags.py2
-rwxr-xr-xscripts/contrib/oe-build-perf-report-email.py167
-rwxr-xr-xscripts/contrib/patchreview.py71
-rwxr-xr-xscripts/contrib/test_build_time.sh2
-rwxr-xr-xscripts/contrib/test_build_time_worker.sh2
-rwxr-xr-xscripts/contrib/verify-homepage.py2
-rwxr-xr-xscripts/cp-noerror2
-rwxr-xr-xscripts/create-pull-request9
l---------scripts/cross-intercept/ar1
-rwxr-xr-xscripts/crosstap8
-rwxr-xr-xscripts/devtool34
l---------scripts/esdk-tools/devtool1
l---------scripts/esdk-tools/oe-find-native-sysroot1
l---------scripts/esdk-tools/recipetool1
l---------scripts/esdk-tools/runqemu1
l---------scripts/esdk-tools/runqemu-addptable2image1
l---------scripts/esdk-tools/runqemu-export-rootfs1
l---------scripts/esdk-tools/runqemu-extract-sdk1
l---------scripts/esdk-tools/runqemu-gen-tapdevs1
l---------scripts/esdk-tools/runqemu-ifdown1
l---------scripts/esdk-tools/runqemu-ifup1
l---------scripts/esdk-tools/wic1
-rwxr-xr-xscripts/gen-lockedsig-cache6
-rwxr-xr-xscripts/git30
-rwxr-xr-xscripts/install-buildtools22
-rw-r--r--scripts/lib/argparse_oe.py2
-rw-r--r--scripts/lib/build_perf/report.py3
-rw-r--r--scripts/lib/buildstats.py38
-rw-r--r--scripts/lib/checklayer/__init__.py58
-rw-r--r--scripts/lib/checklayer/cases/bsp.py4
-rw-r--r--scripts/lib/checklayer/cases/common.py46
-rw-r--r--scripts/lib/checklayer/cases/distro.py2
-rw-r--r--scripts/lib/devtool/__init__.py27
-rw-r--r--scripts/lib/devtool/build_image.py2
-rw-r--r--scripts/lib/devtool/build_sdk.py2
-rw-r--r--scripts/lib/devtool/deploy.py240
-rw-r--r--scripts/lib/devtool/ide_plugins/__init__.py282
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_code.py463
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_none.py53
-rwxr-xr-xscripts/lib/devtool/ide_sdk.py1070
-rw-r--r--scripts/lib/devtool/menuconfig.py4
-rw-r--r--scripts/lib/devtool/sdk.py5
-rw-r--r--scripts/lib/devtool/search.py5
-rw-r--r--scripts/lib/devtool/standard.py534
-rw-r--r--scripts/lib/devtool/upgrade.py198
-rw-r--r--scripts/lib/recipetool/append.py78
-rw-r--r--scripts/lib/recipetool/create.py360
-rw-r--r--scripts/lib/recipetool/create_buildsys.py43
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py1100
-rw-r--r--scripts/lib/recipetool/create_go.py779
-rw-r--r--scripts/lib/recipetool/create_kmod.py2
-rw-r--r--scripts/lib/recipetool/create_npm.py107
-rw-r--r--scripts/lib/recipetool/licenses.csv37
-rw-r--r--scripts/lib/recipetool/setvar.py1
-rw-r--r--scripts/lib/resulttool/log.py13
-rw-r--r--scripts/lib/resulttool/regression.py281
-rw-r--r--scripts/lib/resulttool/report.py5
-rw-r--r--scripts/lib/resulttool/resultutils.py8
-rw-r--r--scripts/lib/scriptutils.py25
-rw-r--r--scripts/lib/wic/canned-wks/efi-bootdisk.wks.in2
-rw-r--r--scripts/lib/wic/canned-wks/qemuloongarch.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--scripts/lib/wic/engine.py6
-rw-r--r--scripts/lib/wic/filemap.py7
-rw-r--r--scripts/lib/wic/help.py18
-rw-r--r--scripts/lib/wic/ksparser.py30
-rw-r--r--scripts/lib/wic/misc.py16
-rw-r--r--scripts/lib/wic/partition.py153
-rw-r--r--scripts/lib/wic/pluginbase.py8
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py195
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py211
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py9
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py12
-rw-r--r--scripts/lib/wic/plugins/source/empty.py89
-rw-r--r--scripts/lib/wic/plugins/source/isoimage-isohybrid.py16
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py42
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py40
-rwxr-xr-xscripts/lnr24
-rwxr-xr-xscripts/native-intercept/ar32
-rwxr-xr-xscripts/nativesdk-intercept/chgrp30
-rwxr-xr-xscripts/nativesdk-intercept/chown30
-rwxr-xr-xscripts/oe-buildenv-internal35
-rwxr-xr-xscripts/oe-check-sstate14
-rwxr-xr-xscripts/oe-debuginfod28
-rwxr-xr-xscripts/oe-depends-dot34
-rwxr-xr-xscripts/oe-find-native-sysroot15
-rwxr-xr-xscripts/oe-gnome-terminal-phonehome2
-rwxr-xr-xscripts/oe-pkgdata-browser13
-rwxr-xr-xscripts/oe-pkgdata-util50
-rwxr-xr-xscripts/oe-publish-sdk4
-rwxr-xr-xscripts/oe-pylint2
-rwxr-xr-xscripts/oe-run-native2
-rwxr-xr-xscripts/oe-setup-build122
-rwxr-xr-xscripts/oe-setup-builddir106
-rwxr-xr-xscripts/oe-setup-layers146
-rwxr-xr-xscripts/oe-setup-vscode93
-rwxr-xr-xscripts/oe-time-dd-test.sh106
-rwxr-xr-xscripts/oe-trim-schemas2
-rwxr-xr-xscripts/oepydevshell-internal.py4
-rwxr-xr-xscripts/opkg-query-helper.py2
-rwxr-xr-xscripts/patchtest232
-rwxr-xr-xscripts/patchtest-get-branch81
-rwxr-xr-xscripts/patchtest-get-series115
-rwxr-xr-xscripts/patchtest-send-results110
-rwxr-xr-xscripts/patchtest-setup-sharedir83
-rw-r--r--scripts/patchtest.README153
-rw-r--r--scripts/postinst-intercepts/update_gtk_icon_cache6
-rw-r--r--scripts/postinst-intercepts/update_mandb18
-rw-r--r--scripts/postinst-intercepts/update_udev_hwdb6
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py158
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py35
-rw-r--r--scripts/pybootchartgui/pybootchartgui/samples.py25
-rwxr-xr-xscripts/pythondeps2
-rwxr-xr-xscripts/relocate_sdk.py35
-rwxr-xr-xscripts/rpm2cpio.sh30
-rwxr-xr-xscripts/runqemu634
-rwxr-xr-xscripts/runqemu-addptable2image2
-rwxr-xr-xscripts/runqemu-export-rootfs25
-rwxr-xr-xscripts/runqemu-extract-sdk2
-rwxr-xr-xscripts/runqemu-gen-tapdevs120
-rwxr-xr-xscripts/runqemu-ifdown42
-rwxr-xr-xscripts/runqemu-ifup65
-rw-r--r--scripts/runqemu.README16
-rwxr-xr-xscripts/sstate-cache-management.py329
-rwxr-xr-xscripts/sstate-cache-management.sh458
-rwxr-xr-xscripts/sstate-diff-machines.sh6
-rwxr-xr-xscripts/sstate-sysroot-cruft.sh14
-rwxr-xr-xscripts/sysroot-relativelinks.py2
-rwxr-xr-xscripts/task-time2
-rwxr-xr-xscripts/verify-bashisms14
-rwxr-xr-xscripts/wic13
-rwxr-xr-xscripts/yocto-check-layer52
-rwxr-xr-xscripts/yocto_testresults_query.py131
155 files changed, 10075 insertions, 2720 deletions
diff --git a/scripts/.oe-layers.json b/scripts/.oe-layers.json
new file mode 100644
index 0000000000..1b00a84b54
--- /dev/null
+++ b/scripts/.oe-layers.json
@@ -0,0 +1,7 @@
+{
+ "layers": [
+ "../meta-poky",
+ "../meta"
+ ],
+ "version": "1.0"
+}
diff --git a/scripts/autobuilder-worker-prereq-tests b/scripts/autobuilder-worker-prereq-tests
index 5d7e6e2601..54fd3c1004 100755
--- a/scripts/autobuilder-worker-prereq-tests
+++ b/scripts/autobuilder-worker-prereq-tests
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
# Script which can be run on new autobuilder workers to check all needed configuration is present.
# Designed to be run in a repo where bitbake/oe-core are already present.
#
@@ -35,6 +37,11 @@ if [ "$?" != "0" ]; then
echo "Please set git config --global user.email"
exit 1
fi
+python3 -c "import jinja2"
+if [ "$?" != "0" ]; then
+ echo "Please ensure jinja2 is available"
+ exit 1
+fi
bitbake -p
if [ "$?" != "0" ]; then
echo "Bitbake parsing failed"
@@ -46,16 +53,31 @@ if (( $WATCHES < 65000 )); then
echo 'Need to increase watches (echo fs.inotify.max_user_watches=65536 | sudo tee -a /etc/sysctl.conf'
exit 1
fi
+OPEN_FILES=$(ulimit -n)
+if (( $OPEN_FILES < 65535 )); then
+ echo 'Increase maximum open files in /etc/security/limits.conf'
+ echo '* soft nofile 131072'
+ echo '* hard nofile 131072'
+ exit 1
+fi
+MAX_PROCESSES=$(ulimit -u)
+if (( $MAX_PROCESSES < 514542 )); then
+ echo 'Increase maximum user processes in /etc/security/limits.conf'
+ echo '* hard nproc 515294'
+ echo '* soft nproc 514543'
+ exit 1
+fi
+
mkdir -p tmp/deploy/images/qemux86-64
pushd tmp/deploy/images/qemux86-64
if [ ! -e core-image-minimal-qemux86-64.ext4 ]; then
- wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.ext4
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.ext4
fi
if [ ! -e core-image-minimal-qemux86-64.qemuboot.conf ]; then
- wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.qemuboot.conf
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.qemuboot.conf
fi
if [ ! -e bzImage-qemux86-64.bin ]; then
- wget http://downloads.yoctoproject.org/releases/yocto/yocto-2.5.1/machines/qemu/qemux86-64/bzImage-qemux86-64.bin
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/bzImage-qemux86-64.bin
fi
popd
bitbake qemu-helper-native
diff --git a/scripts/bblock b/scripts/bblock
new file mode 100755
index 0000000000..0082059af8
--- /dev/null
+++ b/scripts/bblock
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+# bblock
+# lock/unlock task to latest signature
+#
+# Copyright (c) 2023 BayLibre, SAS
+# Author: Julien Stepahn <jstephan@baylibre.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import bb.msg
+
+import argparse_oe
+
+myname = os.path.basename(sys.argv[0])
+logger = bb.msg.logger_create(myname)
+
+
+def getTaskSignatures(tinfoil, pn, tasks):
+ tinfoil.set_event_mask(
+ [
+ "bb.event.GetTaskSignatureResult",
+ "logging.LogRecord",
+ "bb.command.CommandCompleted",
+ "bb.command.CommandFailed",
+ ]
+ )
+ ret = tinfoil.run_command("getTaskSignatures", pn, tasks)
+ if ret:
+ while True:
+ event = tinfoil.wait_event(1)
+ if event:
+ if isinstance(event, bb.command.CommandCompleted):
+ break
+ elif isinstance(event, bb.command.CommandFailed):
+ logger.error(str(event))
+ sys.exit(2)
+ elif isinstance(event, bb.event.GetTaskSignatureResult):
+ sig = event.sig
+ elif isinstance(event, logging.LogRecord):
+ logger.handle(event)
+ else:
+ logger.error("No result returned from getTaskSignatures command")
+ sys.exit(2)
+ return sig
+
+
+def parseRecipe(tinfoil, recipe):
+ try:
+ tinfoil.parse_recipes()
+ d = tinfoil.parse_recipe(recipe)
+ except Exception:
+ logger.error("Failed to get recipe info for: %s" % recipe)
+ sys.exit(1)
+ return d
+
+
+def bblockDump(lockfile):
+ try:
+ with open(lockfile, "r") as lockfile:
+ for line in lockfile:
+ print(line.strip())
+ except IOError:
+ return 1
+ return 0
+
+
+def bblockReset(lockfile, pns, package_archs, tasks):
+ if not pns:
+ logger.info("Unlocking all recipes")
+ try:
+ os.remove(lockfile)
+ except FileNotFoundError:
+ pass
+ else:
+ logger.info("Unlocking {pns}".format(pns=pns))
+ tmp_lockfile = lockfile + ".tmp"
+ with open(lockfile, "r") as infile, open(tmp_lockfile, "w") as outfile:
+ for line in infile:
+ if not (
+ any(element in line for element in pns)
+ and any(element in line for element in package_archs.split())
+ ):
+ outfile.write(line)
+ else:
+ if tasks and not any(element in line for element in tasks):
+ outfile.write(line)
+ os.remove(lockfile)
+ os.rename(tmp_lockfile, lockfile)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Lock and unlock a recipe")
+ parser.add_argument("pn", nargs="*", help="Space separated list of recipe to lock")
+ parser.add_argument(
+ "-t",
+ "--tasks",
+ help="Comma separated list of tasks",
+ type=lambda s: [
+ task if task.startswith("do_") else "do_" + task for task in s.split(",")
+ ],
+ )
+ parser.add_argument(
+ "-r",
+ "--reset",
+ action="store_true",
+ help="Unlock pn recipes, or all recipes if pn is empty",
+ )
+ parser.add_argument(
+ "-d",
+ "--dump",
+ action="store_true",
+ help="Dump generated bblock.conf file",
+ )
+
+ global_args, unparsed_args = parser.parse_known_args()
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ package_archs = tinfoil.config_data.getVar("PACKAGE_ARCHS")
+ builddir = tinfoil.config_data.getVar("TOPDIR")
+ lockfile = "{builddir}/conf/bblock.conf".format(builddir=builddir)
+
+ if global_args.dump:
+ bblockDump(lockfile)
+ return 0
+
+ if global_args.reset:
+ bblockReset(lockfile, global_args.pn, package_archs, global_args.tasks)
+ return 0
+
+ with open(lockfile, "a") as lockfile:
+ s = ""
+ if lockfile.tell() == 0:
+ s = "# Generated by bblock\n"
+ s += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "info"\n'
+ s += 'SIGGEN_LOCKEDSIGS_TYPES += "${PACKAGE_ARCHS}"\n'
+ s += "\n"
+
+ for pn in global_args.pn:
+ d = parseRecipe(tinfoil, pn)
+ package_arch = d.getVar("PACKAGE_ARCH")
+ siggen_locked_sigs_package_arch = d.getVar(
+ "SIGGEN_LOCKEDSIGS_{package_arch}".format(package_arch=package_arch)
+ )
+ sigs = getTaskSignatures(tinfoil, [pn], global_args.tasks)
+ for sig in sigs:
+ new_entry = "{pn}:{taskname}:{sig}".format(
+ pn=sig[0], taskname=sig[1], sig=sig[2]
+ )
+ if (
+ siggen_locked_sigs_package_arch
+ and not new_entry in siggen_locked_sigs_package_arch
+ ) or not siggen_locked_sigs_package_arch:
+ s += 'SIGGEN_LOCKEDSIGS_{package_arch} += "{new_entry}"\n'.format(
+ package_arch=package_arch, new_entry=new_entry
+ )
+ lockfile.write(s)
+ return 0
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
index e55d98c72e..80028342b1 100755
--- a/scripts/bitbake-prserv-tool
+++ b/scripts/bitbake-prserv-tool
@@ -1,5 +1,7 @@
#!/usr/bin/env bash
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -15,8 +17,11 @@ help ()
clean_cache()
{
s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
+ # Stop any active memory resident server
+ bitbake -m
+ # Remove cache entries since we want to trigger a full reparse
if [ "x${s}" != "x" ]; then
- rm -rf ${s}
+ rm -f ${s}/bb_cache*.dat.*
fi
}
@@ -60,7 +65,7 @@ do_migrate_localcount ()
return 1
fi
- rm -rf $df
+ rm -f $df
clean_cache
echo "Exporting LOCALCOUNT to AUTOINCs..."
bitbake -R conf/migrate_localcount.conf -p
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
deleted file mode 100755
index 3095dafa46..0000000000
--- a/scripts/bitbake-whatchanged
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# Copyright (c) 2013 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-import os
-import sys
-import getopt
-import shutil
-import re
-import warnings
-import subprocess
-import argparse
-
-scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
-lib_path = scripts_path + '/lib'
-sys.path = sys.path + [lib_path]
-
-import scriptpath
-
-# Figure out where is the bitbake/lib/bb since we need bb.siggen and bb.process
-bitbakepath = scriptpath.add_bitbake_lib_path()
-if not bitbakepath:
- sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
- sys.exit(1)
-scriptpath.add_oe_lib_path()
-import argparse_oe
-
-import bb.siggen
-import bb.process
-
-# Match the stamp's filename
-# group(1): PE_PV (may no PE)
-# group(2): PR
-# group(3): TASK
-# group(4): HASH
-stamp_re = re.compile("(?P<pv>.*)-(?P<pr>r\d+)\.(?P<task>do_\w+)\.(?P<hash>[^\.]*)")
-sigdata_re = re.compile(".*\.sigdata\..*")
-
-def gen_dict(stamps):
- """
- Generate the dict from the stamps dir.
- The output dict format is:
- {fake_f: {pn: PN, pv: PV, pr: PR, task: TASK, path: PATH}}
- Where:
- fake_f: pv + task + hash
- path: the path to the stamp file
- """
- # The member of the sub dict (A "path" will be appended below)
- sub_mem = ("pv", "pr", "task")
- d = {}
- for dirpath, _, files in os.walk(stamps):
- for f in files:
- # The "bitbake -S" would generate ".sigdata", but no "_setscene".
- fake_f = re.sub('_setscene.', '.', f)
- fake_f = re.sub('.sigdata', '', fake_f)
- subdict = {}
- tmp = stamp_re.match(fake_f)
- if tmp:
- for i in sub_mem:
- subdict[i] = tmp.group(i)
- if len(subdict) != 0:
- pn = os.path.basename(dirpath)
- subdict['pn'] = pn
- # The path will be used by os.stat() and bb.siggen
- subdict['path'] = dirpath + "/" + f
- fake_f = tmp.group('pv') + tmp.group('task') + tmp.group('hash')
- d[fake_f] = subdict
- return d
-
-# Re-construct the dict
-def recon_dict(dict_in):
- """
- The output dict format is:
- {pn_task: {pv: PV, pr: PR, path: PATH}}
- """
- dict_out = {}
- for k in dict_in.keys():
- subdict = {}
- # The key
- pn_task = "%s_%s" % (dict_in.get(k).get('pn'), dict_in.get(k).get('task'))
- # If more than one stamps are found, use the latest one.
- if pn_task in dict_out:
- full_path_pre = dict_out.get(pn_task).get('path')
- full_path_cur = dict_in.get(k).get('path')
- if os.stat(full_path_pre).st_mtime > os.stat(full_path_cur).st_mtime:
- continue
- subdict['pv'] = dict_in.get(k).get('pv')
- subdict['pr'] = dict_in.get(k).get('pr')
- subdict['path'] = dict_in.get(k).get('path')
- dict_out[pn_task] = subdict
-
- return dict_out
-
-def split_pntask(s):
- """
- Split the pn_task in to (pn, task) and return it
- """
- tmp = re.match("(.*)_(do_.*)", s)
- return (tmp.group(1), tmp.group(2))
-
-
-def print_added(d_new = None, d_old = None):
- """
- Print the newly added tasks
- """
- added = {}
- for k in list(d_new.keys()):
- if k not in d_old:
- # Add the new one to added dict, and remove it from
- # d_new, so the remaining ones are the changed ones
- added[k] = d_new.get(k)
- del(d_new[k])
-
- if not added:
- return 0
-
- # Format the output, the dict format is:
- # {pn: task1, task2 ...}
- added_format = {}
- counter = 0
- for k in added.keys():
- pn, task = split_pntask(k)
- if pn in added_format:
- # Append the value
- added_format[pn] = "%s %s" % (added_format.get(pn), task)
- else:
- added_format[pn] = task
- counter += 1
- print("=== Newly added tasks: (%s tasks)" % counter)
- for k in added_format.keys():
- print(" %s: %s" % (k, added_format.get(k)))
-
- return counter
-
-def print_vrchanged(d_new = None, d_old = None, vr = None):
- """
- Print the pv or pr changed tasks.
- The arg "vr" is "pv" or "pr"
- """
- pvchanged = {}
- counter = 0
- for k in list(d_new.keys()):
- if d_new.get(k).get(vr) != d_old.get(k).get(vr):
- counter += 1
- pn, task = split_pntask(k)
- if pn not in pvchanged:
- # Format the output, we only print pn (no task) since
- # all the tasks would be changed when pn or pr changed,
- # the dict format is:
- # {pn: pv/pr_old -> pv/pr_new}
- pvchanged[pn] = "%s -> %s" % (d_old.get(k).get(vr), d_new.get(k).get(vr))
- del(d_new[k])
-
- if not pvchanged:
- return 0
-
- print("\n=== %s changed: (%s tasks)" % (vr.upper(), counter))
- for k in pvchanged.keys():
- print(" %s: %s" % (k, pvchanged.get(k)))
-
- return counter
-
-def print_depchanged(d_new = None, d_old = None, verbose = False):
- """
- Print the dependency changes
- """
- depchanged = {}
- counter = 0
- for k in d_new.keys():
- counter += 1
- pn, task = split_pntask(k)
- if (verbose):
- full_path_old = d_old.get(k).get("path")
- full_path_new = d_new.get(k).get("path")
- # No counter since it is not ready here
- if sigdata_re.match(full_path_old) and sigdata_re.match(full_path_new):
- output = bb.siggen.compare_sigfiles(full_path_old, full_path_new)
- if output:
- print("\n=== The verbose changes of %s.%s:" % (pn, task))
- print('\n'.join(output))
- else:
- # Format the output, the format is:
- # {pn: task1, task2, ...}
- if pn in depchanged:
- depchanged[pn] = "%s %s" % (depchanged.get(pn), task)
- else:
- depchanged[pn] = task
-
- if len(depchanged) > 0:
- print("\n=== Dependencies changed: (%s tasks)" % counter)
- for k in depchanged.keys():
- print(" %s: %s" % (k, depchanged[k]))
-
- return counter
-
-
-def main():
- """
- Print what will be done between the current and last builds:
- 1) Run "STAMPS_DIR=<path> bitbake -S recipe" to re-generate the stamps
- 2) Figure out what are newly added and changed, can't figure out
- what are removed since we can't know the previous stamps
- clearly, for example, if there are several builds, we can't know
- which stamps the last build has used exactly.
- 3) Use bb.siggen.compare_sigfiles to diff the old and new stamps
- """
-
- parser = argparse_oe.ArgumentParser(usage = """%(prog)s [options] [package ...]
-print what will be done between the current and last builds, for example:
-
- $ bitbake core-image-sato
- # Edit the recipes
- $ bitbake-whatchanged core-image-sato
-
-The changes will be printed"
-
-Note:
- The amount of tasks is not accurate when the task is "do_build" since
- it usually depends on other tasks.
- The "nostamp" task is not included.
-"""
-)
- parser.add_argument("recipe", help="recipe to check")
- parser.add_argument("-v", "--verbose", help = "print the verbose changes", action = "store_true")
- args = parser.parse_args()
-
- # Get the STAMPS_DIR
- print("Figuring out the STAMPS_DIR ...")
- cmdline = "bitbake -e | sed -ne 's/^STAMPS_DIR=\"\(.*\)\"/\\1/p'"
- try:
- stampsdir, err = bb.process.run(cmdline)
- except:
- raise
- if not stampsdir:
- print("ERROR: No STAMPS_DIR found for '%s'" % args.recipe, file=sys.stderr)
- return 2
- stampsdir = stampsdir.rstrip("\n")
- if not os.path.isdir(stampsdir):
- print("ERROR: stamps directory \"%s\" not found!" % stampsdir, file=sys.stderr)
- return 2
-
- # The new stamps dir
- new_stampsdir = stampsdir + ".bbs"
- if os.path.exists(new_stampsdir):
- print("ERROR: %s already exists!" % new_stampsdir, file=sys.stderr)
- return 2
-
- try:
- # Generate the new stamps dir
- print("Generating the new stamps ... (need several minutes)")
- cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, args.recipe)
- # FIXME
- # The "bitbake -S" may fail, not fatal error, the stamps will still
- # be generated, this might be a bug of "bitbake -S".
- try:
- bb.process.run(cmdline)
- except Exception as exc:
- print(exc)
-
- # The dict for the new and old stamps.
- old_dict = gen_dict(stampsdir)
- new_dict = gen_dict(new_stampsdir)
-
- # Remove the same one from both stamps.
- cnt_unchanged = 0
- for k in list(new_dict.keys()):
- if k in old_dict:
- cnt_unchanged += 1
- del(new_dict[k])
- del(old_dict[k])
-
- # Re-construct the dict to easily find out what is added or changed.
- # The dict format is:
- # {pn_task: {pv: PV, pr: PR, path: PATH}}
- new_recon = recon_dict(new_dict)
- old_recon = recon_dict(old_dict)
-
- del new_dict
- del old_dict
-
- # Figure out what are changed, the new_recon would be changed
- # by the print_xxx function.
- # Newly added
- cnt_added = print_added(new_recon, old_recon)
-
- # PV (including PE) and PR changed
- # Let the bb.siggen handle them if verbose
- cnt_rv = {}
- if not args.verbose:
- for i in ('pv', 'pr'):
- cnt_rv[i] = print_vrchanged(new_recon, old_recon, i)
-
- # Dependencies changed (use bitbake-diffsigs)
- cnt_dep = print_depchanged(new_recon, old_recon, args.verbose)
-
- total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep
-
- print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged))
- if args.verbose:
- print("Newly added: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_dep))
- else:
- print("Newly added: %s\nPV changed: %s\nPR changed: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_rv.get('pv') or 0, cnt_rv.get('pr') or 0, cnt_dep))
- except:
- print("ERROR occurred!")
- raise
- finally:
- # Remove the newly generated stamps dir
- if os.path.exists(new_stampsdir):
- print("Removing the newly generated stamps dir ...")
- shutil.rmtree(new_stampsdir)
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/scripts/buildhistory-collect-srcrevs b/scripts/buildhistory-collect-srcrevs
index 340bee78bb..c937e49c2a 100755
--- a/scripts/buildhistory-collect-srcrevs
+++ b/scripts/buildhistory-collect-srcrevs
@@ -53,7 +53,7 @@ def main():
sys.exit(1)
if options.forcevariable:
- forcevariable = '_forcevariable'
+ forcevariable = ':forcevariable'
else:
forcevariable = ''
@@ -99,9 +99,9 @@ def main():
print('# %s' % curdir)
for pn, name, srcrev in srcrevs:
if name:
- print('SRCREV_%s_pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
+ print('SRCREV_%s:pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
else:
- print('SRCREV_pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
+ print('SRCREV:pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
if __name__ == "__main__":
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
index 3bd40a2a1e..a6e785aa23 100755
--- a/scripts/buildhistory-diff
+++ b/scripts/buildhistory-diff
@@ -11,7 +11,6 @@
import sys
import os
import argparse
-from distutils.version import LooseVersion
# Ensure PythonGit is installed (buildhistory_analysis needs it)
try:
@@ -73,10 +72,6 @@ def main():
parser = get_args_parser()
args = parser.parse_args()
- if LooseVersion(git.__version__) < '0.3.1':
- sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
- sys.exit(1)
-
if len(args.revisions) > 2:
sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
parser.print_help()
diff --git a/scripts/buildstats-diff b/scripts/buildstats-diff
index 2f6498ab67..c9aa76a8fa 100755
--- a/scripts/buildstats-diff
+++ b/scripts/buildstats-diff
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Script for comparing buildstats from two different builds
#
diff --git a/scripts/buildstats-summary b/scripts/buildstats-summary
new file mode 100755
index 0000000000..b10c671b29
--- /dev/null
+++ b/scripts/buildstats-summary
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Dump a summary of the specified buildstats to the terminal, filtering and
+# sorting by walltime.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import dataclasses
+import datetime
+import enum
+import os
+import pathlib
+import sys
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, "lib"))
+import buildstats
+
+
+@dataclasses.dataclass
+class Task:
+ recipe: str
+ task: str
+ start: datetime.datetime
+ duration: datetime.timedelta
+
+
+class Sorting(enum.Enum):
+ start = 1
+ duration = 2
+
+ # argparse integration
+ def __str__(self) -> str:
+ return self.name
+
+ def __repr__(self) -> str:
+ return self.name
+
+ @staticmethod
+ def from_string(s: str):
+ try:
+ return Sorting[s]
+ except KeyError:
+ return s
+
+
+def read_buildstats(path: pathlib.Path) -> buildstats.BuildStats:
+ if not path.exists():
+ raise Exception(f"No such file or directory: {path}")
+ if path.is_file():
+ return buildstats.BuildStats.from_file_json(path)
+ if (path / "build_stats").is_file():
+ return buildstats.BuildStats.from_dir(path)
+ raise Exception(f"Cannot find buildstats in {path}")
+
+
+def dump_buildstats(args, bs: buildstats.BuildStats):
+ tasks = []
+ for recipe in bs.values():
+ for task, stats in recipe.tasks.items():
+ t = Task(
+ recipe.name,
+ task,
+ datetime.datetime.fromtimestamp(stats["start_time"]),
+ datetime.timedelta(seconds=int(stats.walltime)),
+ )
+ tasks.append(t)
+
+ tasks.sort(key=lambda t: getattr(t, args.sort.name))
+
+ minimum = datetime.timedelta(seconds=args.shortest)
+ highlight = datetime.timedelta(seconds=args.highlight)
+
+ for t in tasks:
+ if t.duration >= minimum:
+ line = f"{t.duration} {t.recipe}:{t.task}"
+ if args.highlight and t.duration >= highlight:
+ print(f"\033[1m{line}\033[0m")
+ else:
+ print(line)
+
+
+def main(argv=None) -> int:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "buildstats", metavar="BUILDSTATS", help="Buildstats file", type=pathlib.Path
+ )
+ parser.add_argument(
+ "--sort",
+ "-s",
+ type=Sorting.from_string,
+ choices=list(Sorting),
+ default=Sorting.start,
+ help="Sort tasks",
+ )
+ parser.add_argument(
+ "--shortest",
+ "-t",
+ type=int,
+ default=1,
+ metavar="SECS",
+ help="Hide tasks shorter than SECS seconds",
+ )
+ parser.add_argument(
+ "--highlight",
+ "-g",
+ type=int,
+ default=60,
+ metavar="SECS",
+ help="Highlight tasks longer than SECS seconds (0 disabled)",
+ )
+
+ args = parser.parse_args(argv)
+
+ bs = read_buildstats(args.buildstats)
+ dump_buildstats(args, bs)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/combo-layer b/scripts/combo-layer
index a634dd69d2..4a715914af 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -19,9 +19,8 @@ import tempfile
import configparser
import re
import copy
-import pipes
+import shlex
import shutil
-from collections import OrderedDict
from string import Template
from functools import reduce
@@ -192,6 +191,23 @@ def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
logger.debug("output: %s" % output.replace(chr(0), '\\0'))
return output
+def action_sync_revs(conf, args):
+ """
+ Update the last_revision config option for each repo with the latest
+ revision in the remote's branch. Useful if multiple people are using
+ combo-layer.
+ """
+ repos = get_repos(conf, args[1:])
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ runcmd("git fetch", ldir)
+ lastrev = runcmd('git rev-parse origin/%s' % branch, ldir).strip()
+ print("Updating %s to %s" % (name, lastrev))
+ conf.update(name, "last_revision", lastrev)
+
def action_init(conf, args):
"""
Clone component repositories
@@ -467,7 +483,7 @@ def check_repo_clean(repodir):
exit if repo is dirty
"""
output=runcmd("git status --porcelain", repodir)
- r = re.compile('\?\? patch-.*/')
+ r = re.compile(r'\?\? patch-.*/')
dirtyout = [item for item in output.splitlines() if not r.match(item)]
if dirtyout:
logger.error("git repo %s is dirty, please fix it first", repodir)
@@ -508,7 +524,7 @@ def check_patch(patchfile):
f.close()
if of:
of.close()
- os.rename(patchfile + '.tmp', patchfile)
+ os.rename(of.name, patchfile)
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
@@ -1259,7 +1275,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
target = os.path.join(wargs["destdir"], dest_dir)
if not os.path.isdir(target):
os.makedirs(target)
- quoted_target = pipes.quote(target)
+ quoted_target = shlex.quote(target)
# os.sysconf('SC_ARG_MAX') is lying: running a command with
# string length 629343 already failed with "Argument list too
# long" although SC_ARG_MAX = 2097152. "man execve" explains
@@ -1271,7 +1287,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
unquoted_args = []
cmdsize = 100 + len(quoted_target)
while update:
- quoted_next = pipes.quote(update[0])
+ quoted_next = shlex.quote(update[0])
size_next = len(quoted_next) + len(dest_dir) + 1
logger.debug('cmdline length %d + %d < %d?' % (cmdsize, size_next, os.sysconf('SC_ARG_MAX')))
if cmdsize + size_next < max_cmdsize:
@@ -1302,6 +1318,7 @@ actions = {
"update": action_update,
"pull": action_pull,
"splitpatch": action_splitpatch,
+ "sync-revs": action_sync_revs,
}
def main():
@@ -1312,10 +1329,11 @@ def main():
Create and update a combination layer repository from multiple component repositories.
Action:
- init initialise the combo layer repo
- update [components] get patches from component repos and apply them to the combo repo
- pull [components] just pull component repos only
- splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ pull [components] just pull component repos only
+ sync-revs [components] update the config file's last_revision for each repository
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
action = "store", dest = "conffile", default = "conf/combo-layer.conf")
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh
index 11547a9826..fb9651b31f 100755
--- a/scripts/combo-layer-hook-default.sh
+++ b/scripts/combo-layer-hook-default.sh
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Hook to add source component/revision info to commit message
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
index e7bd129e9e..6672189c95 100755
--- a/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -16,8 +16,8 @@
# Setup the defaults
DATFILE="bb-matrix.dat"
-XLABEL="BB_NUMBER_THREADS"
-YLABEL="PARALLEL_MAKE"
+XLABEL="BB\\\\_NUMBER\\\\_THREADS"
+YLABEL="PARALLEL\\\\_MAKE"
FIELD=3
DEF_TITLE="Elapsed Time (seconds)"
PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100"
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index 090133600b..a9cdf082ab 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -36,8 +36,8 @@ def bbvar_is_documented(var, documented_vars):
def collect_documented_vars(docfiles):
''' Walk the docfiles and collect the documented variables '''
documented_vars = []
- prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
- var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ prog = re.compile(r".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile(r'<glossentry id=\'var-(.*)\'>')
for d in docfiles:
with open(d) as f:
documented_vars += var_prog.findall(f.read())
@@ -45,7 +45,7 @@ def collect_documented_vars(docfiles):
return documented_vars
def bbvar_doctag(var, docconf):
- prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ prog = re.compile(r'^%s\[doc\] *= *"(.*)"' % (var))
if docconf == "":
return "?"
diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh
index fa71d4a2e9..0a85e6e708 100755
--- a/scripts/contrib/build-perf-test-wrapper.sh
+++ b/scripts/contrib/build-perf-test-wrapper.sh
@@ -87,21 +87,10 @@ if [ $# -ne 0 ]; then
exit 1
fi
-if [ -n "$email_to" ]; then
- if ! [ -x "$(command -v phantomjs)" ]; then
- echo "ERROR: Sending email needs phantomjs."
- exit 1
- fi
- if ! [ -x "$(command -v optipng)" ]; then
- echo "ERROR: Sending email needs optipng."
- exit 1
- fi
-fi
-
# Open a file descriptor for flock and acquire lock
LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
if ! exec 3> "$LOCK_FILE"; then
- echo "ERROR: Unable to open lock file"
+ echo "ERROR: Unable to open loemack file"
exit 1
fi
if ! flock -n 3; then
@@ -226,7 +215,7 @@ if [ -n "$results_repo" ]; then
if [ -n "$email_to" ]; then
echo "Emailing test report"
os_name=`get_os_release_var PRETTY_NAME`
- "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
fi
# Upload report files, unless we're on detached head
diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py
new file mode 100755
index 0000000000..c69acb4095
--- /dev/null
+++ b/scripts/contrib/convert-overrides.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+#
+# Conversion script to add new override syntax to existing bitbake metadata
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# To use this script on a new layer you need to list the overrides the
+# layer is known to use in the list below.
+#
+# Known constraint: Matching is 'loose' and in particular will find variable
+# and function names with "_append" and "_remove" in them. Those need to be
+# filtered out manually or in the skip list below.
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+import argparse
+
+parser = argparse.ArgumentParser(description="Convert override syntax")
+parser.add_argument("--override", "-o", action="append", default=[], help="Add additional strings to consider as an override (e.g. custom machines/distros")
+parser.add_argument("--skip", "-s", action="append", default=[], help="Add additional string to skip and not consider an override")
+parser.add_argument("--skip-ext", "-e", action="append", default=[], help="Additional file suffixes to skip when processing (e.g. '.foo')")
+parser.add_argument("--package-vars", action="append", default=[], help="Additional variables to treat as package variables")
+parser.add_argument("--image-vars", action="append", default=[], help="Additional variables to treat as image variables")
+parser.add_argument("--short-override", action="append", default=[], help="Additional strings to treat as short overrides")
+parser.add_argument("path", nargs="+", help="Paths to convert")
+
+args = parser.parse_args()
+
+# List of strings to treat as overrides
+vars = args.override
+vars += ["append", "prepend", "remove"]
+vars += ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
+vars += ["genericx86", "edgerouter", "beaglebone-yocto"]
+vars += ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
+vars += ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
+vars += ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
+vars += ["tune-", "pn-", "forcevariable"]
+vars += ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
+vars += ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
+vars += ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
+vars += ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
+vars += ["linux-gnueabi", "eabi"]
+vars += ["virtclass-multilib", "virtclass-mcextend"]
+
+# List of strings to treat as overrides but only with whitespace following or another override (more restricted matching).
+# Handles issues with arc matching arch.
+shortvars = ["arc", "mips", "mipsel", "sh4"] + args.short_override
+
+# Variables which take packagenames as an override
+packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY",
+ "pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE",
+ "PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst",
+ "pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA",
+ "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + args.package_vars
+
+# Expressions to skip if encountered, these are not overrides
+skips = args.skip
+skips += ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
+skips += ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
+skips += ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
+skips += ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
+skips += ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
+skips += ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
+skips += ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
+skips += ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
+skips += ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
+
+imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] + args.image_vars
+packagevars += imagevars
+
+skip_ext = [".html", ".patch", ".m4", ".diff"] + args.skip_ext
+
+vars_re = {}
+for exp in vars:
+ vars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
+
+shortvars_re = {}
+for exp in shortvars:
+ shortvars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + r'([\(\'"\s:])'), r"\1:" + exp + r"\3")
+
+package_re = {}
+for exp in packagevars:
+ package_re[exp] = (re.compile(r'(^|[#\'"\s\-\+]+)' + exp + r'_' + r'([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
+
+# Other substitutions to make
+subs = {
+ 'r = re.compile(r"([^:]+):\s*(.*)")' : 'r = re.compile(r"(^.+?):\s+(.*)")',
+ "val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))",
+ "f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))",
+ "d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))",
+ 'ret.append(v + "_" + p)' : 'ret.append(v + ":" + p)',
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ skip = False
+ for s in skips:
+ if s in line:
+ skip = True
+ if "ptest_append" in line or "ptest_remove" in line or "ptest_prepend" in line:
+ skip = False
+ for sub in subs:
+ if sub in line:
+ line = line.replace(sub, subs[sub])
+ skip = True
+ if not skip:
+ for pvar in packagevars:
+ line = package_re[pvar][0].sub(package_re[pvar][1], line)
+ for var in vars:
+ line = vars_re[var][0].sub(vars_re[var][1], line)
+ for shortvar in shortvars:
+ line = shortvars_re[shortvar][0].sub(shortvars_re[shortvar][1], line)
+ if "pkg_postinst:ontarget" in line:
+ line = line.replace("pkg_postinst:ontarget", "pkg_postinst_ontarget")
+ new_file.write(line)
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.9.3"
+
+for p in args.path:
+ if os.path.isfile(p):
+ processfile(p)
+ else:
+ print("processing directory '%s'" % p)
+ for root, dirs, files in os.walk(p):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or any(fn.endswith(ext) for ext in skip_ext):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-spdx-licenses.py b/scripts/contrib/convert-spdx-licenses.py
new file mode 100755
index 0000000000..4e194dee3f
--- /dev/null
+++ b/scripts/contrib/convert-spdx-licenses.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+#
+# Conversion script to change LICENSE entries to SPDX identifiers
+#
+# Copyright (C) 2021-2022 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+license_map = {
+"AGPL-3" : "AGPL-3.0-only",
+"AGPL-3+" : "AGPL-3.0-or-later",
+"AGPLv3" : "AGPL-3.0-only",
+"AGPLv3+" : "AGPL-3.0-or-later",
+"AGPLv3.0" : "AGPL-3.0-only",
+"AGPLv3.0+" : "AGPL-3.0-or-later",
+"AGPL-3.0" : "AGPL-3.0-only",
+"AGPL-3.0+" : "AGPL-3.0-or-later",
+"BSD-0-Clause" : "0BSD",
+"GPL-1" : "GPL-1.0-only",
+"GPL-1+" : "GPL-1.0-or-later",
+"GPLv1" : "GPL-1.0-only",
+"GPLv1+" : "GPL-1.0-or-later",
+"GPLv1.0" : "GPL-1.0-only",
+"GPLv1.0+" : "GPL-1.0-or-later",
+"GPL-1.0" : "GPL-1.0-only",
+"GPL-1.0+" : "GPL-1.0-or-later",
+"GPL-2" : "GPL-2.0-only",
+"GPL-2+" : "GPL-2.0-or-later",
+"GPLv2" : "GPL-2.0-only",
+"GPLv2+" : "GPL-2.0-or-later",
+"GPLv2.0" : "GPL-2.0-only",
+"GPLv2.0+" : "GPL-2.0-or-later",
+"GPL-2.0" : "GPL-2.0-only",
+"GPL-2.0+" : "GPL-2.0-or-later",
+"GPL-3" : "GPL-3.0-only",
+"GPL-3+" : "GPL-3.0-or-later",
+"GPLv3" : "GPL-3.0-only",
+"GPLv3+" : "GPL-3.0-or-later",
+"GPLv3.0" : "GPL-3.0-only",
+"GPLv3.0+" : "GPL-3.0-or-later",
+"GPL-3.0" : "GPL-3.0-only",
+"GPL-3.0+" : "GPL-3.0-or-later",
+"LGPLv2" : "LGPL-2.0-only",
+"LGPLv2+" : "LGPL-2.0-or-later",
+"LGPLv2.0" : "LGPL-2.0-only",
+"LGPLv2.0+" : "LGPL-2.0-or-later",
+"LGPL-2.0" : "LGPL-2.0-only",
+"LGPL-2.0+" : "LGPL-2.0-or-later",
+"LGPL2.1" : "LGPL-2.1-only",
+"LGPL2.1+" : "LGPL-2.1-or-later",
+"LGPLv2.1" : "LGPL-2.1-only",
+"LGPLv2.1+" : "LGPL-2.1-or-later",
+"LGPL-2.1" : "LGPL-2.1-only",
+"LGPL-2.1+" : "LGPL-2.1-or-later",
+"LGPLv3" : "LGPL-3.0-only",
+"LGPLv3+" : "LGPL-3.0-or-later",
+"LGPL-3.0" : "LGPL-3.0-only",
+"LGPL-3.0+" : "LGPL-3.0-or-later",
+"MPL-1" : "MPL-1.0",
+"MPLv1" : "MPL-1.0",
+"MPLv1.1" : "MPL-1.1",
+"MPLv2" : "MPL-2.0",
+"MIT-X" : "MIT",
+"MIT-style" : "MIT",
+"openssl" : "OpenSSL",
+"PSF" : "PSF-2.0",
+"PSFv2" : "PSF-2.0",
+"Python-2" : "Python-2.0",
+"Apachev2" : "Apache-2.0",
+"Apache-2" : "Apache-2.0",
+"Artisticv1" : "Artistic-1.0",
+"Artistic-1" : "Artistic-1.0",
+"AFL-2" : "AFL-2.0",
+"AFL-1" : "AFL-1.2",
+"AFLv2" : "AFL-2.0",
+"AFLv1" : "AFL-1.2",
+"CDDLv1" : "CDDL-1.0",
+"CDDL-1" : "CDDL-1.0",
+"EPLv1.0" : "EPL-1.0",
+"FreeType" : "FTL",
+"Nauman" : "Naumen",
+"tcl" : "TCL",
+"vim" : "Vim",
+"SGIv1" : "SGI-1",
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if not line.startswith("LICENSE"):
+ new_file.write(line)
+ continue
+ orig = line
+ for license in sorted(license_map, key=len, reverse=True):
+ for ending in ['"', "'", " ", ")"]:
+ line = line.replace(license + ending, license_map[license] + ending)
+ if orig != line:
+ modified = True
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.01"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-srcuri.py b/scripts/contrib/convert-srcuri.py
new file mode 100755
index 0000000000..587392334f
--- /dev/null
+++ b/scripts/contrib/convert-srcuri.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+#
+# Conversion script to update SRC_URI to add branch to git urls
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+def processfile(fn):
+ def matchline(line):
+ if "MIRROR" in line or ".*" in line or "GNOME_GIT" in line:
+ return False
+ return True
+ print("processing file '%s'" % fn)
+ try:
+ if "distro_alias.inc" in fn or "linux-yocto-custom.bb" in fn:
+ return
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if ("git://" in line or "gitsm://" in line) and "branch=" not in line and matchline(line):
+ if line.endswith('"\n'):
+ line = line.replace('"\n', ';branch=master"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';branch=master \\\\', line)
+ modified = True
+ if ("git://" in line or "gitsm://" in line) and "github.com" in line and "protocol=https" not in line and matchline(line):
+ if "protocol=git" in line:
+ line = line.replace('protocol=git', 'protocol=https')
+ elif line.endswith('"\n'):
+ line = line.replace('"\n', ';protocol=https"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';protocol=https \\\\', line)
+ modified = True
+ new_file.write(line)
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-variable-renames.py b/scripts/contrib/convert-variable-renames.py
new file mode 100755
index 0000000000..eded90ca61
--- /dev/null
+++ b/scripts/contrib/convert-variable-renames.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+#
+# Conversion script to rename variables to versions with improved terminology.
+# Also highlights potentially problematic language and removed variables.
+#
+# Copyright (C) 2021 Richard Purdie
+# Copyright (C) 2022 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+renames = {
+"BB_ENV_WHITELIST" : "BB_ENV_PASSTHROUGH",
+"BB_ENV_EXTRAWHITE" : "BB_ENV_PASSTHROUGH_ADDITIONS",
+"BB_HASHCONFIG_WHITELIST" : "BB_HASHCONFIG_IGNORE_VARS",
+"BB_SETSCENE_ENFORCE_WHITELIST" : "BB_SETSCENE_ENFORCE_IGNORE_TASKS",
+"BB_HASHBASE_WHITELIST" : "BB_BASEHASH_IGNORE_VARS",
+"BB_HASHTASK_WHITELIST" : "BB_TASKHASH_IGNORE_TASKS",
+"CVE_CHECK_PN_WHITELIST" : "CVE_CHECK_SKIP_RECIPE",
+"CVE_CHECK_WHITELIST" : "CVE_CHECK_IGNORE",
+"MULTI_PROVIDER_WHITELIST" : "BB_MULTI_PROVIDER_ALLOWED",
+"PNBLACKLIST" : "SKIP_RECIPE",
+"SDK_LOCAL_CONF_BLACKLIST" : "ESDK_LOCALCONF_REMOVE",
+"SDK_LOCAL_CONF_WHITELIST" : "ESDK_LOCALCONF_ALLOW",
+"SDK_INHERIT_BLACKLIST" : "ESDK_CLASS_INHERIT_DISABLE",
+"SSTATE_DUPWHITELIST" : "SSTATE_ALLOW_OVERLAP_FILES",
+"SYSROOT_DIRS_BLACKLIST" : "SYSROOT_DIRS_IGNORE",
+"UNKNOWN_CONFIGURE_WHITELIST" : "UNKNOWN_CONFIGURE_OPT_IGNORE",
+"ICECC_USER_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_SYSTEM_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_USER_PACKAGE_WL" : "ICECC_RECIPE_ENABLE",
+"ICECC_USER_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"ICECC_SYSTEM_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"LICENSE_FLAGS_WHITELIST" : "LICENSE_FLAGS_ACCEPTED",
+}
+
+removed_list = [
+"BB_STAMP_WHITELIST",
+"BB_STAMP_POLICY",
+"INHERIT_BLACKLIST",
+"TUNEABI_WHITELIST",
+]
+
+context_check_list = [
+"blacklist",
+"whitelist",
+"abort",
+]
+
+def processfile(fn):
+
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ lineno = 0
+ for line in old_file:
+ lineno += 1
+ if not line or "BB_RENAMED_VARIABLE" in line:
+ continue
+ # Do the renames
+ for old_name, new_name in renames.items():
+ if old_name in line:
+ line = line.replace(old_name, new_name)
+ modified = True
+ # Find removed names
+ for removed_name in removed_list:
+ if removed_name in line:
+ print("%s needs further work at line %s because %s has been deprecated" % (fn, lineno, removed_name))
+ for check_word in context_check_list:
+ if re.search(check_word, line, re.IGNORECASE):
+ print("%s needs further work at line %s since it contains %s"% (fn, lineno, check_word))
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ print("*** Modified file '%s'" % (fn))
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "ChangeLog" in fn or "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage
index 7f2ad112a6..70eee8ebea 100755
--- a/scripts/contrib/ddimage
+++ b/scripts/contrib/ddimage
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control
index ad6070c369..82c84baa1d 100755
--- a/scripts/contrib/dialog-power-control
+++ b/scripts/contrib/dialog-power-control
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Simple script to show a manual power prompt for when you want to use
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
index 1191f57a8e..7197a2fcea 100755
--- a/scripts/contrib/documentation-audit.sh
+++ b/scripts/contrib/documentation-audit.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Perform an audit of which packages provide documentation and which
@@ -26,8 +28,8 @@ if [ -z "$BITBAKE" ]; then
fi
echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
-echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
-echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+echo "REMINDER: you need to set LICENSE_FLAGS_ACCEPTED appropriately in local.conf or "
+echo " you'll get false positives. For example, LICENSE_FLAGS_ACCEPTED = \"commercial\""
for pkg in `bitbake -s | awk '{ print \$1 }'`; do
if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest
new file mode 100755
index 0000000000..4d65a99258
--- /dev/null
+++ b/scripts/contrib/image-manifest
@@ -0,0 +1,523 @@
+#!/usr/bin/env python3
+
+# Script to extract information from image manifests
+#
+# Copyright (C) 2018 Intel Corporation
+# Copyright (C) 2021 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import argparse
+import logging
+import json
+import shutil
+import tempfile
+import tarfile
+from collections import OrderedDict
+
+scripts_path = os.path.dirname(__file__)
+lib_path = scripts_path + '/../lib'
+sys.path = sys.path + [lib_path]
+
+import scriptutils
+logger = scriptutils.logger_create(os.path.basename(__file__))
+
+import argparse_oe
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+logger.debug('Using standard bitbake path %s' % bitbakepath)
+scriptpath.add_oe_lib_path()
+
+import bb.tinfoil
+import bb.utils
+import oe.utils
+import oe.recipeutils
+
+def get_pkg_list(manifest):
+ pkglist = []
+ with open(manifest, 'r') as f:
+ for line in f:
+ linesplit = line.split()
+ if len(linesplit) == 3:
+ # manifest file
+ pkglist.append(linesplit[0])
+ elif len(linesplit) == 1:
+ # build dependency file
+ pkglist.append(linesplit[0])
+ return sorted(pkglist)
+
+def list_packages(args):
+ pkglist = get_pkg_list(args.manifest)
+ for pkg in pkglist:
+ print('%s' % pkg)
+
+def pkg2recipe(tinfoil, pkg):
+ if "-native" in pkg:
+ logger.info('skipping %s' % pkg)
+ return None
+
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ pkgdatafile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ logger.debug('pkgdatafile %s' % pkgdatafile)
+ try:
+ f = open(pkgdatafile, 'r')
+ for line in f:
+ if line.startswith('PN:'):
+ recipe = line.split(':', 1)[1].strip()
+ return recipe
+ except Exception:
+ logger.warning('%s is missing' % pkgdatafile)
+ return None
+
+def get_recipe_list(manifest, tinfoil):
+ pkglist = get_pkg_list(manifest)
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+
+ return sorted(recipelist)
+
+def list_recipes(args):
+ import bb.tinfoil
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+ recipelist = get_recipe_list(args.manifest, tinfoil)
+ for recipe in sorted(recipelist):
+ print('%s' % recipe)
+
+def list_layers(args):
+
+ def find_git_repo(pth):
+ checkpth = pth
+ while checkpth != os.sep:
+ if os.path.exists(os.path.join(checkpth, '.git')):
+ return checkpth
+ checkpth = os.path.dirname(checkpth)
+ return None
+
+ def get_git_remote_branch(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', '--abbrev-ref', '--symbolic-full-name', '@{u}'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_head_commit(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', 'HEAD'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_repo_url(repodir, remote='origin'):
+ import bb.process
+ # Try to get upstream repo location from origin remote
+ try:
+ stdout, _ = bb.process.run(['git', 'remote', '-v'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ for line in stdout.splitlines():
+ splitline = line.split()
+ if len(splitline) > 1:
+ if splitline[0] == remote and scriptutils.is_src_url(splitline[1]):
+ return splitline[1]
+ return None
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+ layers = OrderedDict()
+ for layerdir in tinfoil.config_data.getVar('BBLAYERS').split():
+ layerdata = OrderedDict()
+ layername = os.path.basename(layerdir)
+ logger.debug('layername %s, layerdir %s' % (layername, layerdir))
+ if layername in layers:
+ logger.warning('layername %s is not unique in configuration' % layername)
+ layername = os.path.basename(os.path.dirname(layerdir)) + '_' + os.path.basename(layerdir)
+ logger.debug('trying layername %s' % layername)
+ if layername in layers:
+ logger.error('Layer name %s is not unique in configuration' % layername)
+ sys.exit(2)
+ repodir = find_git_repo(layerdir)
+ if repodir:
+ remotebranch = get_git_remote_branch(repodir)
+ remote = 'origin'
+ if remotebranch and '/' in remotebranch:
+ rbsplit = remotebranch.split('/', 1)
+ layerdata['actual_branch'] = rbsplit[1]
+ remote = rbsplit[0]
+ layerdata['vcs_url'] = get_git_repo_url(repodir, remote)
+ if os.path.abspath(repodir) != os.path.abspath(layerdir):
+ layerdata['vcs_subdir'] = os.path.relpath(layerdir, repodir)
+ commit = get_git_head_commit(repodir)
+ if commit:
+ layerdata['vcs_commit'] = commit
+ layers[layername] = layerdata
+
+ json.dump(layers, args.output, indent=2)
+
+def get_recipe(args):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ print(' %s package provided by %s' % (args.package, recipe))
+
+def pkg_dependencies(args):
+ def get_recipe_info(tinfoil, recipe):
+ try:
+ info = tinfoil.get_recipe_info(recipe)
+ except Exception:
+ logger.error('Failed to get recipe info for: %s' % recipe)
+ sys.exit(1)
+ if not info:
+ logger.warning('No recipe info found for: %s' % recipe)
+ sys.exit(1)
+ append_files = tinfoil.get_file_appends(info.fn)
+ appends = True
+ data = tinfoil.parse_recipe_file(info.fn, appends, append_files)
+ data.pn = info.pn
+ data.pv = info.pv
+ return data
+
+ def find_dependencies(tinfoil, assume_provided, recipe_info, packages, rn, order):
+ spaces = ' ' * order
+ data = recipe_info[rn]
+ if args.native:
+ logger.debug('%s- %s' % (spaces, data.pn))
+ elif "-native" not in data.pn:
+ if "cross" not in data.pn:
+ logger.debug('%s- %s' % (spaces, data.pn))
+
+ depends = []
+ for dep in data.depends:
+ if dep not in assume_provided:
+ depends.append(dep)
+
+ # First find all dependencies not in package list.
+ for dep in depends:
+ if dep not in packages:
+ packages.append(dep)
+ dep_data = get_recipe_info(tinfoil, dep)
+ # Do this once now to reduce the number of bitbake calls.
+ dep_data.depends = dep_data.getVar('DEPENDS').split()
+ recipe_info[dep] = dep_data
+
+ # Then recursively analyze all of the dependencies for the current recipe.
+ for dep in depends:
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, dep, order + 1)
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare()
+
+ assume_provided = tinfoil.config_data.getVar('ASSUME_PROVIDED').split()
+ logger.debug('assumed provided:')
+ for ap in sorted(assume_provided):
+ logger.debug(' - %s' % ap)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ data = get_recipe_info(tinfoil, recipe)
+ data.depends = []
+ depends = data.getVar('DEPENDS').split()
+ for dep in depends:
+ if dep not in assume_provided:
+ data.depends.append(dep)
+
+ recipe_info = dict([(recipe, data)])
+ packages = []
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, recipe, order=1)
+
+ print('\nThe following packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" not in data.pn:
+ if "cross" not in data.pn:
+ print(" %s (%s)" % (data.pn,p))
+
+ if args.native:
+ print('\nThe following native packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+ if "cross" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+
+def default_config():
+ vlist = OrderedDict()
+ vlist['PV'] = 'yes'
+ vlist['SUMMARY'] = 'no'
+ vlist['DESCRIPTION'] = 'no'
+ vlist['SECTION'] = 'no'
+ vlist['LICENSE'] = 'yes'
+ vlist['HOMEPAGE'] = 'no'
+ vlist['BUGTRACKER'] = 'no'
+ vlist['PROVIDES'] = 'no'
+ vlist['BBCLASSEXTEND'] = 'no'
+ vlist['DEPENDS'] = 'no'
+ vlist['PACKAGECONFIG'] = 'no'
+ vlist['SRC_URI'] = 'yes'
+ vlist['SRCREV'] = 'yes'
+ vlist['EXTRA_OECONF'] = 'no'
+ vlist['EXTRA_OESCONS'] = 'no'
+ vlist['EXTRA_OECMAKE'] = 'no'
+ vlist['EXTRA_OEMESON'] = 'no'
+
+ clist = OrderedDict()
+ clist['variables'] = vlist
+ clist['filepath'] = 'no'
+ clist['sha256sum'] = 'no'
+ clist['layerdir'] = 'no'
+ clist['layer'] = 'no'
+ clist['inherits'] = 'no'
+ clist['source_urls'] = 'no'
+ clist['packageconfig_opts'] = 'no'
+ clist['patches'] = 'no'
+ clist['packagedir'] = 'no'
+ return clist
+
+def dump_config(args):
+ config = default_config()
+ f = open('default_config.json', 'w')
+ json.dump(config, f, indent=2)
+ logger.info('Default config list dumped to default_config.json')
+
+def export_manifest_info(args):
+
+ def handle_value(value):
+ if value:
+ return oe.utils.squashspaces(value)
+ else:
+ return value
+
+ if args.config:
+ logger.debug('config: %s' % args.config)
+ f = open(args.config, 'r')
+ config = json.load(f, object_pairs_hook=OrderedDict)
+ else:
+ config = default_config()
+ if logger.isEnabledFor(logging.DEBUG):
+ print('Configuration:')
+ json.dump(config, sys.stdout, indent=2)
+ print('')
+
+ tmpoutdir = tempfile.mkdtemp(prefix=os.path.basename(__file__)+'-')
+ logger.debug('tmp dir: %s' % tmpoutdir)
+
+ # export manifest
+ shutil.copy2(args.manifest,os.path.join(tmpoutdir, "manifest"))
+
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+
+ pkglist = get_pkg_list(args.manifest)
+ # export pkg list
+ f = open(os.path.join(tmpoutdir, "pkgs"), 'w')
+ for pkg in pkglist:
+ f.write('%s\n' % pkg)
+ f.close()
+
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+ recipelist.sort()
+ # export recipe list
+ f = open(os.path.join(tmpoutdir, "recipes"), 'w')
+ for recipe in recipelist:
+ f.write('%s\n' % recipe)
+ f.close()
+
+ try:
+ rvalues = OrderedDict()
+ for pn in sorted(recipelist):
+ logger.debug('Package: %s' % pn)
+ rd = tinfoil.parse_recipe(pn)
+
+ rvalues[pn] = OrderedDict()
+
+ for varname in config['variables']:
+ if config['variables'][varname] == 'yes':
+ rvalues[pn][varname] = handle_value(rd.getVar(varname))
+
+ fpth = rd.getVar('FILE')
+ layerdir = oe.recipeutils.find_layerdir(fpth)
+ if config['filepath'] == 'yes':
+ rvalues[pn]['filepath'] = os.path.relpath(fpth, layerdir)
+ if config['sha256sum'] == 'yes':
+ rvalues[pn]['sha256sum'] = bb.utils.sha256_file(fpth)
+
+ if config['layerdir'] == 'yes':
+ rvalues[pn]['layerdir'] = layerdir
+
+ if config['layer'] == 'yes':
+ rvalues[pn]['layer'] = os.path.basename(layerdir)
+
+ if config['inherits'] == 'yes':
+ gr = set(tinfoil.config_data.getVar("__inherit_cache") or [])
+ lr = set(rd.getVar("__inherit_cache") or [])
+ rvalues[pn]['inherits'] = sorted({os.path.splitext(os.path.basename(r))[0] for r in lr if r not in gr})
+
+ if config['source_urls'] == 'yes':
+ rvalues[pn]['source_urls'] = []
+ for url in (rd.getVar('SRC_URI') or '').split():
+ if not url.startswith('file://'):
+ url = url.split(';')[0]
+ rvalues[pn]['source_urls'].append(url)
+
+ if config['packageconfig_opts'] == 'yes':
+ rvalues[pn]['packageconfig_opts'] = OrderedDict()
+ for key in rd.getVarFlags('PACKAGECONFIG').keys():
+ if key == 'doc':
+ continue
+ rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key)
+
+ if config['patches'] == 'yes':
+ patches = oe.recipeutils.get_recipe_patches(rd)
+ rvalues[pn]['patches'] = []
+ if patches:
+ recipeoutdir = os.path.join(tmpoutdir, pn, 'patches')
+ bb.utils.mkdirhier(recipeoutdir)
+ for patch in patches:
+ # Patches may be in other layers too
+ patchlayerdir = oe.recipeutils.find_layerdir(patch)
+ # patchlayerdir will be None for remote patches, which we ignore
+ # (since currently they are considered as part of sources)
+ if patchlayerdir:
+ rvalues[pn]['patches'].append((os.path.basename(patchlayerdir), os.path.relpath(patch, patchlayerdir)))
+ shutil.copy(patch, recipeoutdir)
+
+ if config['packagedir'] == 'yes':
+ pn_dir = os.path.join(tmpoutdir, pn)
+ bb.utils.mkdirhier(pn_dir)
+ f = open(os.path.join(pn_dir, 'recipe.json'), 'w')
+ json.dump(rvalues[pn], f, indent=2)
+ f.close()
+
+ with open(os.path.join(tmpoutdir, 'recipes.json'), 'w') as f:
+ json.dump(rvalues, f, indent=2)
+
+ if args.output:
+ outname = os.path.basename(args.output)
+ else:
+ outname = os.path.splitext(os.path.basename(args.manifest))[0]
+ if outname.endswith('.tar.gz'):
+ outname = outname[:-7]
+ elif outname.endswith('.tgz'):
+ outname = outname[:-4]
+
+ tarfn = outname
+ if tarfn.endswith(os.sep):
+ tarfn = tarfn[:-1]
+ if not tarfn.endswith(('.tar.gz', '.tgz')):
+ tarfn += '.tar.gz'
+ with open(tarfn, 'wb') as f:
+ with tarfile.open(None, "w:gz", f) as tar:
+ tar.add(tmpoutdir, outname)
+ finally:
+ shutil.rmtree(tmpoutdir)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Image manifest utility",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ # get recipe info
+ parser_get_recipes = subparsers.add_parser('recipe-info',
+ help='Get recipe info',
+ description='Get recipe information for a package')
+ parser_get_recipes.add_argument('package', help='Package name')
+ parser_get_recipes.set_defaults(func=get_recipe)
+
+ # list runtime dependencies
+ parser_pkg_dep = subparsers.add_parser('list-depends',
+ help='List dependencies',
+ description='List dependencies required to build the package')
+ parser_pkg_dep.add_argument('--native', help='also print native and cross packages', action='store_true')
+ parser_pkg_dep.add_argument('package', help='Package name')
+ parser_pkg_dep.set_defaults(func=pkg_dependencies)
+
+ # list recipes
+ parser_recipes = subparsers.add_parser('list-recipes',
+ help='List recipes producing packages within an image',
+ description='Lists recipes producing the packages that went into an image, using the manifest and pkgdata')
+ parser_recipes.add_argument('manifest', help='Manifest file')
+ parser_recipes.set_defaults(func=list_recipes)
+
+ # list packages
+ parser_packages = subparsers.add_parser('list-packages',
+ help='List packages within an image',
+ description='Lists packages that went into an image, using the manifest')
+ parser_packages.add_argument('manifest', help='Manifest file')
+ parser_packages.set_defaults(func=list_packages)
+
+ # list layers
+ parser_layers = subparsers.add_parser('list-layers',
+ help='List included layers',
+ description='Lists included layers')
+ parser_layers.add_argument('-o', '--output', help='Output file - defaults to stdout if not specified',
+ default=sys.stdout, type=argparse.FileType('w'))
+ parser_layers.set_defaults(func=list_layers)
+
+ # dump default configuration file
+ parser_dconfig = subparsers.add_parser('dump-config',
+ help='Dump default config',
+ description='Dump default config to default_config.json')
+ parser_dconfig.set_defaults(func=dump_config)
+
+ # export recipe info for packages in manifest
+ parser_export = subparsers.add_parser('manifest-info',
+ help='Export recipe info for a manifest',
+ description='Export recipe information using the manifest')
+ parser_export.add_argument('-c', '--config', help='load config from json file')
+ parser_export.add_argument('-o', '--output', help='Output file (tarball) - defaults to manifest name if not specified')
+ parser_export.add_argument('manifest', help='Manifest file')
+ parser_export.set_defaults(func=export_manifest_info)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ logger.debug("Debug Enabled")
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ ret = args.func(args)
+
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py
index d6de4dc84d..bb288e9099 100755
--- a/scripts/contrib/list-packageconfig-flags.py
+++ b/scripts/contrib/list-packageconfig-flags.py
@@ -33,7 +33,7 @@ import bb.tinfoil
def get_fnlist(bbhandler, pkg_pn, preferred):
''' Get all recipe file names '''
if preferred:
- (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
+ (latest_versions, preferred_versions, required_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
fn_list = []
for pn in sorted(pkg_pn):
diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py
index de3862c897..7192113c28 100755
--- a/scripts/contrib/oe-build-perf-report-email.py
+++ b/scripts/contrib/oe-build-perf-report-email.py
@@ -19,8 +19,6 @@ import socket
import subprocess
import sys
import tempfile
-from email.mime.image import MIMEImage
-from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
@@ -29,30 +27,6 @@ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-# Find js scaper script
-SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
- 'scrape-html-report.js')
-if not os.path.isfile(SCRAPE_JS):
- log.error("Unableto find oe-build-perf-report-scrape.js")
- sys.exit(1)
-
-
-class ReportError(Exception):
- """Local errors"""
- pass
-
-
-def check_utils():
- """Check that all needed utils are installed in the system"""
- missing = []
- for cmd in ('phantomjs', 'optipng'):
- if not shutil.which(cmd):
- missing.append(cmd)
- if missing:
- log.error("The following tools are missing: %s", ' '.join(missing))
- sys.exit(1)
-
-
def parse_args(argv):
"""Parse command line arguments"""
description = """Email build perf test report"""
@@ -77,137 +51,19 @@ def parse_args(argv):
"the email parts")
parser.add_argument('--text',
help="Plain text message")
- parser.add_argument('--html',
- help="HTML peport generated by oe-build-perf-report")
- parser.add_argument('--phantomjs-args', action='append',
- help="Extra command line arguments passed to PhantomJS")
args = parser.parse_args(argv)
- if not args.html and not args.text:
- parser.error("Please specify --html and/or --text")
+ if not args.text:
+ parser.error("Please specify --text")
return args
-def decode_png(infile, outfile):
- """Parse/decode/optimize png data from a html element"""
- with open(infile) as f:
- raw_data = f.read()
-
- # Grab raw base64 data
- b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
- b64_data = re.sub('">.+$', '', b64_data, 1)
-
- # Replace file with proper decoded png
- with open(outfile, 'wb') as f:
- f.write(base64.b64decode(b64_data))
-
- subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
-
-
-def mangle_html_report(infile, outfile, pngs):
- """Mangle html file into a email compatible format"""
- paste = True
- png_dir = os.path.dirname(outfile)
- with open(infile) as f_in:
- with open(outfile, 'w') as f_out:
- for line in f_in.readlines():
- stripped = line.strip()
- # Strip out scripts
- if stripped == '<!--START-OF-SCRIPTS-->':
- paste = False
- elif stripped == '<!--END-OF-SCRIPTS-->':
- paste = True
- elif paste:
- if re.match('^.+href="data:image/png;base64', stripped):
- # Strip out encoded pngs (as they're huge in size)
- continue
- elif 'www.gstatic.com' in stripped:
- # HACK: drop references to external static pages
- continue
-
- # Replace charts with <img> elements
- match = re.match('<div id="(?P<id>\w+)"', stripped)
- if match and match.group('id') in pngs:
- f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
- else:
- f_out.write(line)
-
-
-def scrape_html_report(report, outdir, phantomjs_extra_args=None):
- """Scrape html report into a format sendable by email"""
- tmpdir = tempfile.mkdtemp(dir='.')
- log.debug("Using tmpdir %s for phantomjs output", tmpdir)
-
- if not os.path.isdir(outdir):
- os.mkdir(outdir)
- if os.path.splitext(report)[1] not in ('.html', '.htm'):
- raise ReportError("Invalid file extension for report, needs to be "
- "'.html' or '.htm'")
-
- try:
- log.info("Scraping HTML report with PhangomJS")
- extra_args = phantomjs_extra_args if phantomjs_extra_args else []
- subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
- [SCRAPE_JS, report, tmpdir],
- stderr=subprocess.STDOUT)
-
- pngs = []
- images = []
- for fname in os.listdir(tmpdir):
- base, ext = os.path.splitext(fname)
- if ext == '.png':
- log.debug("Decoding %s", fname)
- decode_png(os.path.join(tmpdir, fname),
- os.path.join(outdir, fname))
- pngs.append(base)
- images.append(fname)
- elif ext in ('.html', '.htm'):
- report_file = fname
- else:
- log.warning("Unknown file extension: '%s'", ext)
- #shutil.move(os.path.join(tmpdir, fname), outdir)
-
- log.debug("Mangling html report file %s", report_file)
- mangle_html_report(os.path.join(tmpdir, report_file),
- os.path.join(outdir, report_file), pngs)
- return (os.path.join(outdir, report_file),
- [os.path.join(outdir, i) for i in images])
- finally:
- shutil.rmtree(tmpdir)
-
-def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
- blind_copy=[]):
- """Send email"""
+def send_email(text_fn, subject, recipients, copy=[], blind_copy=[]):
# Generate email message
- text_msg = html_msg = None
- if text_fn:
- with open(text_fn) as f:
- text_msg = MIMEText("Yocto build performance test report.\n" +
- f.read(), 'plain')
- if html_fn:
- html_msg = msg = MIMEMultipart('related')
- with open(html_fn) as f:
- html_msg.attach(MIMEText(f.read(), 'html'))
- for img_fn in image_fns:
- # Expect that content id is same as the filename
- cid = os.path.splitext(os.path.basename(img_fn))[0]
- with open(img_fn, 'rb') as f:
- image_msg = MIMEImage(f.read())
- image_msg['Content-ID'] = '<{}>'.format(cid)
- html_msg.attach(image_msg)
-
- if text_msg and html_msg:
- msg = MIMEMultipart('alternative')
- msg.attach(text_msg)
- msg.attach(html_msg)
- elif text_msg:
- msg = text_msg
- elif html_msg:
- msg = html_msg
- else:
- raise ReportError("Neither plain text nor html body specified")
+ with open(text_fn) as f:
+ msg = MIMEText("Yocto build performance test report.\n" + f.read(), 'plain')
pw_data = pwd.getpwuid(os.getuid())
full_name = pw_data.pw_gecos.split(',')[0]
@@ -234,8 +90,6 @@ def main(argv=None):
if args.debug:
log.setLevel(logging.DEBUG)
- check_utils()
-
if args.outdir:
outdir = args.outdir
if not os.path.exists(outdir):
@@ -245,25 +99,16 @@ def main(argv=None):
try:
log.debug("Storing email parts in %s", outdir)
- html_report = images = None
- if args.html:
- html_report, images = scrape_html_report(args.html, outdir,
- args.phantomjs_args)
-
if args.to:
log.info("Sending email to %s", ', '.join(args.to))
if args.cc:
log.info("Copying to %s", ', '.join(args.cc))
if args.bcc:
log.info("Blind copying to %s", ', '.join(args.bcc))
- send_email(args.text, html_report, images, args.subject,
- args.to, args.cc, args.bcc)
+ send_email(args.text, args.subject, args.to, args.cc, args.bcc)
except subprocess.CalledProcessError as err:
log.error("%s, with output:\n%s", str(err), err.output.decode())
return 1
- except ReportError as err:
- log.error(err)
- return 1
finally:
if not args.outdir:
log.debug("Wiping %s", outdir)
diff --git a/scripts/contrib/patchreview.py b/scripts/contrib/patchreview.py
index 62c509f51c..bceae06561 100755
--- a/scripts/contrib/patchreview.py
+++ b/scripts/contrib/patchreview.py
@@ -1,14 +1,25 @@
#! /usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
+import argparse
+import collections
+import json
+import os
+import os.path
+import pathlib
+import re
+import subprocess
+
# TODO
# - option to just list all broken files
# - test suite
# - validate signed-off-by
-status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied")
+status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied", "inactive-upstream")
class Result:
# Whether the patch has an Upstream-Status or not
@@ -33,20 +44,18 @@ def blame_patch(patch):
From a patch filename, return a list of "commit summary (author name <author
email>)" strings representing the history.
"""
- import subprocess
return subprocess.check_output(("git", "log",
"--follow", "--find-renames", "--diff-filter=A",
"--format=%s (%aN <%aE>)",
"--", patch)).decode("utf-8").splitlines()
-def patchreview(path, patches):
- import re, os.path
+def patchreview(patches):
# General pattern: start of line, optional whitespace, tag with optional
# hyphen or spaces, maybe a colon, some whitespace, then the value, all case
# insensitive.
sob_re = re.compile(r"^[\t ]*(Signed[-_ ]off[-_ ]by:?)[\t ]*(.+)", re.IGNORECASE | re.MULTILINE)
- status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*(\w*)", re.IGNORECASE | re.MULTILINE)
+ status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*([\w-]*)", re.IGNORECASE | re.MULTILINE)
cve_tag_re = re.compile(r"^[\t ]*(CVE:)[\t ]*(.*)", re.IGNORECASE | re.MULTILINE)
cve_re = re.compile(r"cve-[0-9]{4}-[0-9]{4,6}", re.IGNORECASE)
@@ -54,11 +63,10 @@ def patchreview(path, patches):
for patch in patches:
- fullpath = os.path.join(path, patch)
result = Result()
- results[fullpath] = result
+ results[patch] = result
- content = open(fullpath, encoding='ascii', errors='ignore').read()
+ content = open(patch, encoding='ascii', errors='ignore').read()
# Find the Signed-off-by tag
match = sob_re.search(content)
@@ -191,29 +199,56 @@ Patches in Pending state: %s""" % (total_patches,
def histogram(results):
from toolz import recipes, dicttoolz
import math
+
counts = recipes.countby(lambda r: r.upstream_status, results.values())
bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
for k in bars:
print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
+def find_layers(candidate):
+ # candidate can either be the path to a layer directly (eg meta-intel), or a
+ # repository that contains other layers (meta-arm). We can determine what by
+ # looking for a conf/layer.conf file. If that file exists then it's a layer,
+ # otherwise its a repository of layers and we can assume they're called
+ # meta-*.
+
+ if (candidate / "conf" / "layer.conf").exists():
+ return [candidate.absolute()]
+ else:
+ return [d.absolute() for d in candidate.iterdir() if d.is_dir() and (d.name == "meta" or d.name.startswith("meta-"))]
+
+# TODO these don't actually handle dynamic-layers/
+
+def gather_patches(layers):
+ patches = []
+ for directory in layers:
+ filenames = subprocess.check_output(("git", "-C", directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff"), universal_newlines=True).split()
+ patches += [os.path.join(directory, f) for f in filenames]
+ return patches
+
+def count_recipes(layers):
+ count = 0
+ for directory in layers:
+ output = subprocess.check_output(["git", "-C", directory, "ls-files", "recipes-*/**/*.bb"], universal_newlines=True)
+ count += len(output.splitlines())
+ return count
if __name__ == "__main__":
- import argparse, subprocess, os
-
args = argparse.ArgumentParser(description="Patch Review Tool")
args.add_argument("-b", "--blame", action="store_true", help="show blame for malformed patches")
args.add_argument("-v", "--verbose", action="store_true", help="show per-patch results")
args.add_argument("-g", "--histogram", action="store_true", help="show patch histogram")
args.add_argument("-j", "--json", help="update JSON")
- args.add_argument("directory", help="directory to scan")
+ args.add_argument("directory", type=pathlib.Path, metavar="DIRECTORY", help="directory to scan (layer, or repository of layers)")
args = args.parse_args()
- patches = subprocess.check_output(("git", "-C", args.directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff")).decode("utf-8").split()
- results = patchreview(args.directory, patches)
+ layers = find_layers(args.directory)
+ print(f"Found layers {' '.join((d.name for d in layers))}")
+ patches = gather_patches(layers)
+ results = patchreview(patches)
analyse(results, want_blame=args.blame, verbose=args.verbose)
if args.json:
- import json, os.path, collections
if os.path.isfile(args.json):
data = json.load(open(args.json))
else:
@@ -221,7 +256,11 @@ if __name__ == "__main__":
row = collections.Counter()
row["total"] = len(results)
- row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"]).decode("utf-8").strip()
+ row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"], universal_newlines=True).strip()
+ row["commit"] = subprocess.check_output(["git", "-C", args.directory, "rev-parse", "HEAD"], universal_newlines=True).strip()
+ row['commit_count'] = subprocess.check_output(["git", "-C", args.directory, "rev-list", "--count", "HEAD"], universal_newlines=True).strip()
+ row['recipe_count'] = count_recipes(layers)
+
for r in results.values():
if r.upstream_status in status_values:
row[r.upstream_status] += 1
@@ -231,7 +270,7 @@ if __name__ == "__main__":
row['malformed-sob'] += 1
data.append(row)
- json.dump(data, open(args.json, "w"))
+ json.dump(data, open(args.json, "w"), sort_keys=True, indent="\t")
if args.histogram:
print()
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh
index 23f238adf6..4012ac7ba7 100755
--- a/scripts/contrib/test_build_time.sh
+++ b/scripts/contrib/test_build_time.sh
@@ -97,7 +97,7 @@ if [ $? != 0 ] ; then
exit 251
fi
-if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then
+if [ "BB_ENV_PASSTHROUGH_ADDITIONS" != "" ] ; then
echo "WARNING: you are running after sourcing the build environment script, this is not recommended"
fi
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh
index 478e8b0d03..a2879d2336 100755
--- a/scripts/contrib/test_build_time_worker.sh
+++ b/scripts/contrib/test_build_time_worker.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# This is an example script to be used in conjunction with test_build_time.sh
diff --git a/scripts/contrib/verify-homepage.py b/scripts/contrib/verify-homepage.py
index 7bffa78e23..a90b5010bc 100755
--- a/scripts/contrib/verify-homepage.py
+++ b/scripts/contrib/verify-homepage.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# This script can be used to verify HOMEPAGE values for all recipes in
diff --git a/scripts/cp-noerror b/scripts/cp-noerror
index ab617c5d35..13a098eee0 100755
--- a/scripts/cp-noerror
+++ b/scripts/cp-noerror
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 8eefcf63a5..885105fab3 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -128,7 +128,7 @@ PROTO_RE="[a-z][a-z+]*://"
GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
REMOTE_URL=${REMOTE_URL%.git}
REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
-REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\4/\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -149,13 +149,10 @@ fi
WEB_URL=""
case "$REMOTE_URL" in
*git.yoctoproject.org*)
- WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
- ;;
- *git.pokylinux.org*)
- WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.yoctoproject.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*git.openembedded.org*)
- WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*github.com*)
WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
diff --git a/scripts/cross-intercept/ar b/scripts/cross-intercept/ar
new file mode 120000
index 0000000000..bc68ffd7a2
--- /dev/null
+++ b/scripts/cross-intercept/ar
@@ -0,0 +1 @@
+../native-intercept/ar \ No newline at end of file
diff --git a/scripts/crosstap b/scripts/crosstap
index 40856bc208..5aa72f14d4 100755
--- a/scripts/crosstap
+++ b/scripts/crosstap
@@ -353,7 +353,7 @@ bitbake workspace.
Anything after -- option is passed directly to stap.
-Legacy script invocation style supported but depreciated:
+Legacy script invocation style supported but deprecated:
%prog <user@hostname> <sytemtap-script> [systemtap options]
To enable most out of systemtap the following site.conf or local.conf
@@ -365,13 +365,13 @@ IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
USER_CLASSES += "image-combined-dbg"
# enables kernel debug symbols
-KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+KERNEL_EXTRA_FEATURES:append = " features/debug/debug-kernel.scc"
# minimal, just run-time systemtap configuration in target image
-PACKAGECONFIG_pn-systemtap = "monitor"
+PACKAGECONFIG:pn-systemtap = "monitor"
# add systemtap run-time into target image if it is not there yet
-IMAGE_INSTALL_append = " systemtap"
+IMAGE_INSTALL:append = " systemtap"
"""
option_parser = optparse.OptionParser(usage=usage)
diff --git a/scripts/devtool b/scripts/devtool
index 8a4f41bc37..60ea3e8298 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -100,10 +100,11 @@ def read_workspace():
_enable_workspace_layer(config.workspace_path, config, basepath)
logger.debug('Reading workspace in %s' % config.workspace_path)
- externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$')
+ externalsrc_re = re.compile(r'^EXTERNALSRC(:pn-([^ =]+))? *= *"([^"]*)"$')
for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
with open(fn, 'r') as f:
pnvalues = {}
+ pn = None
for line in f:
res = externalsrc_re.match(line.rstrip())
if res:
@@ -123,6 +124,9 @@ def read_workspace():
elif line.startswith('# srctreebase: '):
pnvalues['srctreebase'] = line.split(':', 1)[1].strip()
if pnvalues:
+ if not pn:
+ raise DevtoolError("Found *.bbappend in %s, but could not determine EXTERNALSRC:pn-*. "
+ "Maybe still using old syntax?" % config.workspace_path)
if not pnvalues.get('srctreebase', None):
pnvalues['srctreebase'] = pnvalues['srctree']
logger.debug('Found recipe %s' % pnvalues)
@@ -133,17 +137,27 @@ def create_workspace(args, config, basepath, workspace):
workspacedir = os.path.abspath(args.layerpath)
else:
workspacedir = os.path.abspath(os.path.join(basepath, 'workspace'))
- _create_workspace(workspacedir, config, basepath)
+ layerseries = None
+ if args.layerseries:
+ layerseries = args.layerseries
+ _create_workspace(workspacedir, config, basepath, layerseries)
if not args.create_only:
_enable_workspace_layer(workspacedir, config, basepath)
-def _create_workspace(workspacedir, config, basepath):
+def _create_workspace(workspacedir, config, basepath, layerseries=None):
import bb
confdir = os.path.join(workspacedir, 'conf')
if os.path.exists(os.path.join(confdir, 'layer.conf')):
logger.info('Specified workspace already set up, leaving as-is')
else:
+ if not layerseries:
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ layerseries = tinfoil.config_data.getVar('LAYERSERIES_CORENAMES')
+ finally:
+ tinfoil.shutdown()
+
# Add a config file
bb.utils.mkdirhier(confdir)
with open(os.path.join(confdir, 'layer.conf'), 'w') as f:
@@ -155,7 +169,7 @@ def _create_workspace(workspacedir, config, basepath):
f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n')
f.write('BBFILE_PRIORITY_workspacelayer = "99"\n')
- f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n')
+ f.write('LAYERSERIES_COMPAT_workspacelayer = "%s"\n' % layerseries)
# Add a README file
with open(os.path.join(workspacedir, 'README'), 'w') as f:
f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
@@ -285,8 +299,9 @@ def main():
return 2
# Search BBPATH first to allow layers to override plugins in scripts_path
- for path in global_args.bbpath.split(':') + [scripts_path]:
- pluginpath = os.path.join(path, 'lib', 'devtool')
+ pluginpaths = [os.path.join(path, 'lib', 'devtool') for path in global_args.bbpath.split(':') + [scripts_path]]
+ context.pluginpaths = pluginpaths
+ for pluginpath in pluginpaths:
scriptutils.load_plugins(logger, plugins, pluginpath)
subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
@@ -305,6 +320,7 @@ def main():
description='Sets up a new workspace. NOTE: other devtool subcommands will create a workspace automatically as needed, so you only need to use %(prog)s if you want to specify where the workspace should be located.',
group='advanced')
parser_create_workspace.add_argument('layerpath', nargs='?', help='Path in which the workspace layer should be created')
+ parser_create_workspace.add_argument('--layerseries', help='Layer series the workspace should be set to be compatible with')
parser_create_workspace.add_argument('--create-only', action="store_true", help='Only create the workspace layer, do not alter configuration')
parser_create_workspace.set_defaults(func=create_workspace, no_workspace=True)
@@ -314,10 +330,10 @@ def main():
args = parser.parse_args(unparsed_args, namespace=global_args)
- if not getattr(args, 'no_workspace', False):
- read_workspace()
-
try:
+ if not getattr(args, 'no_workspace', False):
+ read_workspace()
+
ret = args.func(args, config, basepath, workspace)
except DevtoolError as err:
if str(err):
diff --git a/scripts/esdk-tools/devtool b/scripts/esdk-tools/devtool
new file mode 120000
index 0000000000..176a01ca68
--- /dev/null
+++ b/scripts/esdk-tools/devtool
@@ -0,0 +1 @@
+../devtool \ No newline at end of file
diff --git a/scripts/esdk-tools/oe-find-native-sysroot b/scripts/esdk-tools/oe-find-native-sysroot
new file mode 120000
index 0000000000..d3493f3310
--- /dev/null
+++ b/scripts/esdk-tools/oe-find-native-sysroot
@@ -0,0 +1 @@
+../oe-find-native-sysroot \ No newline at end of file
diff --git a/scripts/esdk-tools/recipetool b/scripts/esdk-tools/recipetool
new file mode 120000
index 0000000000..60a95dd936
--- /dev/null
+++ b/scripts/esdk-tools/recipetool
@@ -0,0 +1 @@
+../recipetool \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu b/scripts/esdk-tools/runqemu
new file mode 120000
index 0000000000..ae7e7ad7c2
--- /dev/null
+++ b/scripts/esdk-tools/runqemu
@@ -0,0 +1 @@
+../runqemu \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-addptable2image b/scripts/esdk-tools/runqemu-addptable2image
new file mode 120000
index 0000000000..afcd00e79d
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-addptable2image
@@ -0,0 +1 @@
+../runqemu-addptable2image \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-export-rootfs b/scripts/esdk-tools/runqemu-export-rootfs
new file mode 120000
index 0000000000..a26fcf6110
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-export-rootfs
@@ -0,0 +1 @@
+../runqemu-export-rootfs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-extract-sdk b/scripts/esdk-tools/runqemu-extract-sdk
new file mode 120000
index 0000000000..cc858aaad5
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-extract-sdk
@@ -0,0 +1 @@
+../runqemu-extract-sdk \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-gen-tapdevs b/scripts/esdk-tools/runqemu-gen-tapdevs
new file mode 120000
index 0000000000..dbdf79134c
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-gen-tapdevs
@@ -0,0 +1 @@
+../runqemu-gen-tapdevs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifdown b/scripts/esdk-tools/runqemu-ifdown
new file mode 120000
index 0000000000..0097693ca3
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifdown
@@ -0,0 +1 @@
+../runqemu-ifdown \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifup b/scripts/esdk-tools/runqemu-ifup
new file mode 120000
index 0000000000..41026d2c0a
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifup
@@ -0,0 +1 @@
+../runqemu-ifup \ No newline at end of file
diff --git a/scripts/esdk-tools/wic b/scripts/esdk-tools/wic
new file mode 120000
index 0000000000..a9d908aa25
--- /dev/null
+++ b/scripts/esdk-tools/wic
@@ -0,0 +1 @@
+../wic \ No newline at end of file
diff --git a/scripts/gen-lockedsig-cache b/scripts/gen-lockedsig-cache
index cd8f9a4356..023015ec41 100755
--- a/scripts/gen-lockedsig-cache
+++ b/scripts/gen-lockedsig-cache
@@ -1,5 +1,8 @@
#!/usr/bin/env python3
#
+#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -94,8 +97,7 @@ print("Gathering file list took %.1fs" % elapsed)
print('Processing files')
for f in files:
sys.stdout.write('Processing %s... ' % f)
- _, ext = os.path.splitext(f)
- if not ext in ['.tgz', '.siginfo', '.sig']:
+ if not f.endswith(('.tar.zst', '.siginfo', '.sig')):
# Most likely a temp file, skip it
print('skipping')
continue
diff --git a/scripts/git b/scripts/git
new file mode 100755
index 0000000000..689adbf9dd
--- /dev/null
+++ b/scripts/git
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+# Wrapper around 'git' that doesn't think we are root
+
+import os
+import shutil
+import sys
+
+os.environ['PSEUDO_UNLOAD'] = '1'
+
+# calculate path to the real 'git'
+path = os.environ['PATH']
+# we need to remove our path but also any other copy of this script which
+# may be present, e.g. eSDK.
+replacements = [os.path.dirname(sys.argv[0])]
+for p in path.split(":"):
+ if p.endswith("/scripts"):
+ replacements.append(p)
+for r in replacements:
+ path = path.replace(r, '/ignoreme')
+real_git = shutil.which('git', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_git, 'git')
+
+os.execv(real_git, sys.argv)
diff --git a/scripts/install-buildtools b/scripts/install-buildtools
index 8554a5db67..2218f3ffac 100755
--- a/scripts/install-buildtools
+++ b/scripts/install-buildtools
@@ -57,9 +57,9 @@ logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
DEFAULT_INSTALL_DIR = os.path.join(os.path.split(scripts_path)[0],'buildtools')
DEFAULT_BASE_URL = 'http://downloads.yoctoproject.org/releases/yocto'
-DEFAULT_RELEASE = 'yocto-3.2_M3'
-DEFAULT_INSTALLER_VERSION = '3.1+snapshot'
-DEFAULT_BUILDDATE = '20200923'
+DEFAULT_RELEASE = 'yocto-4.1'
+DEFAULT_INSTALLER_VERSION = '4.1'
+DEFAULT_BUILDDATE = '202110XX'
# Python version sanity check
if not (sys.version_info.major == 3 and sys.version_info.minor >= 4):
@@ -154,6 +154,8 @@ def main():
group.add_argument('--without-extended-buildtools', action='store_false',
dest='with_extended_buildtools',
help='disable extended buildtools (traditional buildtools tarball)')
+ group.add_argument('--make-only', action='store_true',
+ help='only install make tarball')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--check', help='enable checksum validation',
default=True, action='store_true')
@@ -170,6 +172,9 @@ def main():
args = parser.parse_args()
+ if args.make_only:
+ args.with_extended_buildtools = False
+
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.quiet:
@@ -197,7 +202,10 @@ def main():
if not args.build_date:
logger.error("Milestone installers require --build-date")
else:
- if args.with_extended_buildtools:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ elif args.with_extended_buildtools:
filename = "%s-buildtools-extended-nativesdk-standalone-%s-%s.sh" % (
arch, args.installer_version, args.build_date)
else:
@@ -207,6 +215,8 @@ def main():
buildtools_url = "%s/milestones/%s/buildtools/%s" % (base_url, args.release, safe_filename)
# regular release SDK
else:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
if args.with_extended_buildtools:
filename = "%s-buildtools-extended-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
else:
@@ -303,7 +313,9 @@ def main():
if args.with_extended_buildtools and not m:
logger.info("Ignoring --with-extended-buildtools as filename "
"does not contain 'extended'")
- if args.with_extended_buildtools and m:
+ if args.make_only:
+ tool = 'make'
+ elif args.with_extended_buildtools and m:
tool = 'gcc'
else:
tool = 'tar'
diff --git a/scripts/lib/argparse_oe.py b/scripts/lib/argparse_oe.py
index 94a4ac5011..176b732bbc 100644
--- a/scripts/lib/argparse_oe.py
+++ b/scripts/lib/argparse_oe.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
index 4e8e2a8a93..ab77424cc7 100644
--- a/scripts/lib/build_perf/report.py
+++ b/scripts/lib/build_perf/report.py
@@ -4,7 +4,8 @@
# SPDX-License-Identifier: GPL-2.0-only
#
"""Handling of build perf test reports"""
-from collections import OrderedDict, Mapping, namedtuple
+from collections import OrderedDict, namedtuple
+from collections.abc import Mapping
from datetime import datetime, timezone
from numbers import Number
from statistics import mean, stdev, variance
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index c69b5bf4d7..6db60d5bcf 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -8,7 +8,7 @@ import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -79,8 +79,8 @@ class BSTask(dict):
return self['rusage']['ru_oublock']
@classmethod
- def from_file(cls, buildstat_file):
- """Read buildstat text file"""
+ def from_file(cls, buildstat_file, fallback_end=0):
+ """Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing."""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
@@ -108,7 +108,10 @@ class BSTask(dict):
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
- if end_time is not None and start_time is not None:
+ # If the task didn't finish, fill in the fallback end time if specified
+ if start_time and not end_time and fallback_end:
+ end_time = fallback_end
+ if start_time and end_time:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
@@ -226,25 +229,44 @@ class BuildStats(dict):
epoch = match.group('epoch')
return name, epoch, version, revision
+ @staticmethod
+ def parse_top_build_stats(path):
+ """
+ Parse the top-level build_stats file for build-wide start and duration.
+ """
+ start = elapsed = 0
+ with open(path) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Build Started':
+ start = float(val)
+ elif key == "Elapsed time":
+ elapsed = float(val.split()[0])
+ return start, elapsed
+
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
- if not os.path.isfile(os.path.join(path, 'build_stats')):
+ top_stats = os.path.join(path, 'build_stats')
+ if not os.path.isfile(top_stats):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
-
buildstats = cls()
+ build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats)
+ build_end = build_started + build_elapsed
+
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
- os.path.join(recipe_dir, task))
+ os.path.join(recipe_dir, task), build_end)
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index fe545607bb..62ecdfe390 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -16,6 +16,7 @@ class LayerType(Enum):
BSP = 0
DISTRO = 1
SOFTWARE = 2
+ CORE = 3
ERROR_NO_LAYER_CONF = 98
ERROR_BSP_DISTRO = 99
@@ -43,7 +44,7 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
ldata.setVar('LAYERDIR', layer_path)
try:
- ldata = bb.parse.handle(lconf, ldata, include=True)
+ ldata = bb.parse.handle(lconf, ldata, include=True, baseconfig=True)
except:
raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
ldata.expandVarref('LAYERDIR')
@@ -106,7 +107,13 @@ def _detect_layer(layer_path):
if distros:
is_distro = True
- if is_bsp and is_distro:
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ if layer_name == "meta" and "core" in layer['collections']:
+ layer['type'] = LayerType.CORE
+ layer['conf']['machines'] = machines
+ layer['conf']['distros'] = distros
+ elif is_bsp and is_distro:
layer['type'] = LayerType.ERROR_BSP_DISTRO
elif is_bsp:
layer['type'] = LayerType.BSP
@@ -117,8 +124,6 @@ def _detect_layer(layer_path):
else:
layer['type'] = LayerType.SOFTWARE
- layer['collections'] = _get_layer_collections(layer['path'])
-
return layer
def detect_layers(layer_directories, no_auto):
@@ -146,7 +151,7 @@ def detect_layers(layer_directories, no_auto):
return layers
-def _find_layer_depends(depend, layers):
+def _find_layer(depend, layers):
for layer in layers:
if 'collections' not in layer:
continue
@@ -156,7 +161,28 @@ def _find_layer_depends(depend, layers):
return layer
return None
-def add_layer_dependencies(bblayersconf, layer, layers, logger):
+def sanity_check_layers(layers, logger):
+ """
+ Check that we didn't find duplicate collection names, as the layer that will
+ be used is non-deterministic. The precise check is duplicate collections
+ with different patterns, as the same pattern being repeated won't cause
+ problems.
+ """
+ import collections
+
+ passed = True
+ seen = collections.defaultdict(set)
+ for layer in layers:
+ for name, data in layer.get("collections", {}).items():
+ seen[name].add(data["pattern"])
+
+ for name, patterns in seen.items():
+ if len(patterns) > 1:
+ passed = False
+ logger.error("Collection %s found multiple times: %s" % (name, ", ".join(patterns)))
+ return passed
+
+def get_layer_dependencies(layer, layers, logger):
def recurse_dependencies(depends, layer, layers, logger, ret = []):
logger.debug('Processing dependencies %s for layer %s.' % \
(depends, layer['name']))
@@ -166,7 +192,7 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
if depend == 'core':
continue
- layer_depend = _find_layer_depends(depend, layers)
+ layer_depend = _find_layer(depend, layers)
if not layer_depend:
logger.error('Layer %s depends on %s and isn\'t found.' % \
(layer['name'], depend))
@@ -203,6 +229,11 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
# Note: [] (empty) is allowed, None is not!
+ return layer_depends
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+
+ layer_depends = get_layer_dependencies(layer, layers, logger)
if layer_depends is None:
return False
else:
@@ -256,7 +287,7 @@ def check_command(error_msg, cmd, cwd=None):
raise RuntimeError(msg)
return output
-def get_signatures(builddir, failsafe=False, machine=None):
+def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
import re
# some recipes needs to be excluded like meta-world-pkgdata
@@ -267,13 +298,16 @@ def get_signatures(builddir, failsafe=False, machine=None):
sigs = {}
tune2tasks = {}
- cmd = 'BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
+ cmd = 'BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
+ if extravars:
+ cmd += extravars
+ cmd += ' '
if machine:
cmd += 'MACHINE=%s ' % machine
cmd += 'bitbake '
if failsafe:
cmd += '-k '
- cmd += '-S none world'
+ cmd += '-S lockedsigs world'
sigs_file = os.path.join(builddir, 'locked-sigs.inc')
if os.path.exists(sigs_file):
os.unlink(sigs_file)
@@ -290,8 +324,8 @@ def get_signatures(builddir, failsafe=False, machine=None):
else:
raise
- sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
- tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
current_tune = None
with open(sigs_file, 'r') as f:
for line in f.readlines():
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
index 7fd56f5d36..b76163fb56 100644
--- a/scripts/lib/checklayer/cases/bsp.py
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class BSPCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.BSP:
+ if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE):
raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
self.tc.layer['name'])
@@ -153,7 +153,7 @@ class BSPCheckLayer(OECheckLayerTestCase):
# do_build can be ignored: it is know to have
# different signatures in some cases, for example in
# the allarch ca-certificates due to RDEPENDS=openssl.
- # That particular dependency is whitelisted via
+ # That particular dependency is marked via
# SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
# in the sstate signature hash because filtering it
# out would be hard and running do_build multiple
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index b82304e361..97b16f78c8 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -6,15 +6,19 @@
import glob
import os
import unittest
+import re
from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
+ if self.tc.layer['type'] == LayerType.CORE:
+ raise unittest.SkipTest("Core layer's README is top level")
+
# The top-level README file may have a suffix (like README.rst or README.txt).
readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
- msg="Layer doesn't contains README file.")
+ msg="Layer doesn't contain a README file.")
# There might be more than one file matching the file pattern above
# (for example, README.rst and README-COPYING.rst). The one with the shortest
@@ -26,6 +30,16 @@ class CommonCheckLayer(OECheckLayerTestCase):
self.assertTrue(data,
msg="Layer contains a README file but it is empty.")
+ # If a layer's README references another README, then the checks below are not valid
+ if re.search('README', data, re.IGNORECASE):
+ return
+
+ self.assertIn('maintainer', data.lower())
+ self.assertIn('patch', data.lower())
+ # Check that there is an email address in the README
+ email_regex = re.compile(r"[^@]+@[^@]+")
+ self.assertTrue(email_regex.match(data))
+
def test_parse(self):
check_command('Layer %s failed to parse.' % self.tc.layer['name'],
'bitbake -p')
@@ -43,6 +57,36 @@ class CommonCheckLayer(OECheckLayerTestCase):
'''
get_signatures(self.td['builddir'], failsafe=False)
+ def test_world_inherit_class(self):
+ '''
+ This also does "bitbake -S none world" along with inheriting "yocto-check-layer"
+ class, which can do additional per-recipe test cases.
+ '''
+ msg = []
+ try:
+ get_signatures(self.td['builddir'], failsafe=False, machine=None, extravars='BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS INHERIT" INHERIT="yocto-check-layer"')
+ except RuntimeError as ex:
+ msg.append(str(ex))
+ if msg:
+ msg.insert(0, 'Layer %s failed additional checks from yocto-check-layer.bbclass\nSee below log for specific recipe parsing errors:\n' % \
+ self.tc.layer['name'])
+ self.fail('\n'.join(msg))
+
+ @unittest.expectedFailure
+ def test_patches_upstream_status(self):
+ import sys
+ sys.path.append(os.path.join(sys.path[0], '../../../../meta/lib/'))
+ import oe.qa
+ patches = []
+ for dirpath, dirs, files in os.walk(self.tc.layer['path']):
+ for filename in files:
+ if filename.endswith(".patch"):
+ ppath = os.path.join(dirpath, filename)
+ if oe.qa.check_upstream_status(ppath):
+ patches.append(ppath)
+ self.assertEqual(len(patches), 0 , \
+ msg="Found following patches with malformed or missing upstream status:\n%s" % '\n'.join([str(patch) for patch in patches]))
+
def test_signatures(self):
if self.tc.layer['type'] == LayerType.SOFTWARE and \
not self.tc.test_software_layer_signatures:
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
index f0bee5493c..a35332451c 100644
--- a/scripts/lib/checklayer/cases/distro.py
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -11,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class DistroCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.DISTRO:
+ if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE):
raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
index 702db669de..6133c1c5b4 100644
--- a/scripts/lib/devtool/__init__.py
+++ b/scripts/lib/devtool/__init__.py
@@ -78,12 +78,15 @@ def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
fakerootcmd = d.getVar('FAKEROOTCMD')
+ fakerootenv = d.getVar('FAKEROOTENV')
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, kwargs)
+
+def exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, **kwargs):
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
- fakerootenv = d.getVar('FAKEROOTENV')
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
@@ -233,6 +236,28 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
+ # if recipe unpacks another git repo inside S, we need to declare it as a regular git submodule now,
+ # so we will be able to tag branches on it and extract patches when doing finish/update on the recipe
+ stdout, _ = bb.process.run("git status --porcelain", cwd=repodir)
+ found = False
+ for line in stdout.splitlines():
+ if line.endswith("/"):
+ new_dir = line.split()[1]
+ for root, dirs, files in os.walk(os.path.join(repodir, new_dir)):
+ if ".git" in dirs + files:
+ (stdout, _) = bb.process.run('git remote', cwd=root)
+ remote = stdout.splitlines()[0]
+ (stdout, _) = bb.process.run('git remote get-url %s' % remote, cwd=root)
+ remote_url = stdout.splitlines()[0]
+ logger.error(os.path.relpath(os.path.join(root, ".."), root))
+ bb.process.run('git submodule add %s %s' % (remote_url, os.path.relpath(root, os.path.join(root, ".."))), cwd=os.path.join(root, ".."))
+ found = True
+ if found:
+ oe.patch.GitApplyTree.commitIgnored("Add additional submodule from SRC_URI", dir=os.path.join(root, ".."), d=d)
+ found = False
+ if os.path.exists(os.path.join(repodir, '.gitmodules')):
+ bb.process.run('git submodule foreach --recursive "git tag -f %s"' % basetag, cwd=repodir)
+
def recipe_to_append(recipefile, config, wildcard=False):
"""
Convert a recipe file to a bbappend file path within the workspace.
diff --git a/scripts/lib/devtool/build_image.py b/scripts/lib/devtool/build_image.py
index 9388abbacf..980f90ddd6 100644
--- a/scripts/lib/devtool/build_image.py
+++ b/scripts/lib/devtool/build_image.py
@@ -113,7 +113,7 @@ def build_image_task(config, basepath, workspace, image, add_packages=None, task
with open(appendfile, 'w') as afile:
if packages:
# include packages from workspace recipes into the image
- afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ afile.write('IMAGE_INSTALL:append = " %s"\n' % ' '.join(packages))
if not task:
logger.info('Building image %s with the following '
'additional packages: %s', image, ' '.join(packages))
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
index 6fe02fff2a..1cd4831d2b 100644
--- a/scripts/lib/devtool/build_sdk.py
+++ b/scripts/lib/devtool/build_sdk.py
@@ -13,7 +13,7 @@ import shutil
import errno
import sys
import tempfile
-from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+from devtool import DevtoolError
from devtool import build_image
logger = logging.getLogger('devtool')
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index e5af2c95ae..b5ca8f2c2f 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -16,7 +16,7 @@ import bb.utils
import argparse_oe
import oe.types
-from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import exec_fakeroot_no_d, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
@@ -133,16 +133,38 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
return '\n'.join(lines)
-
-
def deploy(args, config, basepath, workspace):
"""Entry point for the devtool 'deploy' subcommand"""
- import math
- import oe.recipeutils
- import oe.package
+ import oe.utils
check_workspace_recipe(workspace, args.recipename, checksrc=False)
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ try:
+ rd = tinfoil.parse_recipe(args.recipename)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+
+ srcdir = rd.getVar('D')
+ workdir = rd.getVar('WORKDIR')
+ path = rd.getVar('PATH')
+ strip_cmd = rd.getVar('STRIP')
+ libdir = rd.getVar('libdir')
+ base_libdir = rd.getVar('base_libdir')
+ max_process = oe.utils.get_bb_number_threads(rd)
+ fakerootcmd = rd.getVar('FAKEROOTCMD')
+ fakerootenv = rd.getVar('FAKEROOTENV')
+ finally:
+ tinfoil.shutdown()
+
+ return deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args)
+
+def deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args):
+ import math
+ import oe.package
+
try:
host, destdir = args.target.split(':')
except ValueError:
@@ -152,118 +174,108 @@ def deploy(args, config, basepath, workspace):
if not destdir.endswith('/'):
destdir += '/'
- tinfoil = setup_tinfoil(basepath=basepath)
- try:
- try:
- rd = tinfoil.parse_recipe(args.recipename)
- except Exception as e:
- raise DevtoolError('Exception parsing recipe %s: %s' %
- (args.recipename, e))
- recipe_outdir = rd.getVar('D')
- if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
- raise DevtoolError('No files to deploy - have you built the %s '
- 'recipe? If so, the install step has not installed '
- 'any files.' % args.recipename)
-
- if args.strip and not args.dry_run:
- # Fakeroot copy to new destination
- srcdir = recipe_outdir
- recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
- if os.path.isdir(recipe_outdir):
- bb.utils.remove(recipe_outdir, True)
- exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
- os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
- oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
- rd.getVar('base_libdir'), rd)
-
- filelist = []
- inodes = set({})
- ftotalsize = 0
- for root, _, files in os.walk(recipe_outdir):
- for fn in files:
- fstat = os.lstat(os.path.join(root, fn))
- # Get the size in kiB (since we'll be comparing it to the output of du -k)
- # MUST use lstat() here not stat() or getfilesize() since we don't want to
- # dereference symlinks
- if fstat.st_ino in inodes:
- fsize = 0
- else:
- fsize = int(math.ceil(float(fstat.st_size)/1024))
- inodes.add(fstat.st_ino)
- ftotalsize += fsize
- # The path as it would appear on the target
- fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
- filelist.append((fpath, fsize))
-
- if args.dry_run:
- print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
- for item, _ in filelist:
- print(' %s' % item)
- return 0
-
- extraoptions = ''
- if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
- if not args.show_status:
- extraoptions += ' -q'
-
- scp_sshexec = ''
- ssh_sshexec = 'ssh'
- if args.ssh_exec:
- scp_sshexec = "-S %s" % args.ssh_exec
- ssh_sshexec = args.ssh_exec
- scp_port = ''
- ssh_port = ''
- if args.port:
- scp_port = "-P %s" % args.port
- ssh_port = "-p %s" % args.port
-
- if args.key:
- extraoptions += ' -i %s' % args.key
-
- # In order to delete previously deployed files and have the manifest file on
- # the target, we write out a shell script and then copy it to the target
- # so we can then run it (piping tar output to it).
- # (We cannot use scp here, because it doesn't preserve symlinks.)
- tmpdir = tempfile.mkdtemp(prefix='devtool')
- try:
- tmpscript = '/tmp/devtool_deploy.sh'
- tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
- shellscript = _prepare_remote_script(deploy=True,
- verbose=args.show_status,
- nopreserve=args.no_preserve,
- nocheckspace=args.no_check_space)
- # Write out the script to a file
- with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
- f.write(shellscript)
- # Write out the file list
- with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
- f.write('%d\n' % ftotalsize)
- for fpath, fsize in filelist:
- f.write('%s %d\n' % (fpath, fsize))
- # Copy them to the target
- ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
- if ret != 0:
- raise DevtoolError('Failed to copy script to %s - rerun with -s to '
- 'get a complete error message' % args.target)
- finally:
- shutil.rmtree(tmpdir)
+ recipe_outdir = srcdir
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
+
+ if args.strip and not args.dry_run:
+ # Fakeroot copy to new destination
+ srcdir = recipe_outdir
+ recipe_outdir = os.path.join(workdir, 'devtool-deploy-target-stripped')
+ if os.path.isdir(recipe_outdir):
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "rm -rf %s" % recipe_outdir, shell=True)
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
+ os.environ['PATH'] = ':'.join([os.environ['PATH'], path or ''])
+ oe.package.strip_execs(args.recipename, recipe_outdir, strip_cmd, libdir, base_libdir, max_process)
+
+ filelist = []
+ inodes = set({})
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ fstat = os.lstat(os.path.join(root, fn))
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ if fstat.st_ino in inodes:
+ fsize = 0
+ else:
+ fsize = int(math.ceil(float(fstat.st_size)/1024))
+ inodes.add(fstat.st_ino)
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
+
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
- # Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
- if ret != 0:
- raise DevtoolError('Deploy failed - rerun with -s to get a complete '
- 'error message')
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
- logger.info('Successfully deployed %s' % recipe_outdir)
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
+
+ if args.key:
+ extraoptions += ' -i %s' % args.key
- files_list = []
- for root, _, files in os.walk(recipe_outdir):
- for filename in files:
- filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
- files_list.append(os.path.join(destdir, filename))
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
finally:
- tinfoil.shutdown()
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = exec_fakeroot_no_d(fakerootcmd, fakerootenv, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ logger.info('Successfully deployed %s' % recipe_outdir)
+
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
return 0
diff --git a/scripts/lib/devtool/ide_plugins/__init__.py b/scripts/lib/devtool/ide_plugins/__init__.py
new file mode 100644
index 0000000000..19c2f61c5f
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/__init__.py
@@ -0,0 +1,282 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin interface definition and helper functions"""
+
+import errno
+import json
+import logging
+import os
+import stat
+from enum import Enum, auto
+from devtool import DevtoolError
+from bb.utils import mkdirhier
+
+logger = logging.getLogger('devtool')
+
+
+class BuildTool(Enum):
+ UNDEFINED = auto()
+ CMAKE = auto()
+ MESON = auto()
+
+ @property
+ def is_c_ccp(self):
+ if self is BuildTool.CMAKE:
+ return True
+ if self is BuildTool.MESON:
+ return True
+ return False
+
+
+class GdbCrossConfig:
+ """Base class defining the GDB configuration generator interface
+
+ Generate a GDB configuration for a binary on the target device.
+ Only one instance per binary is allowed. This allows to assign unique port
+ numbers for all gdbserver instances.
+ """
+ _gdbserver_port_next = 1234
+ _binaries = []
+
+ def __init__(self, image_recipe, modified_recipe, binary, gdbserver_multi=True):
+ self.image_recipe = image_recipe
+ self.modified_recipe = modified_recipe
+ self.gdb_cross = modified_recipe.gdb_cross
+ self.binary = binary
+ if binary in GdbCrossConfig._binaries:
+ raise DevtoolError(
+ "gdbserver config for binary %s is already generated" % binary)
+ GdbCrossConfig._binaries.append(binary)
+ self.script_dir = modified_recipe.ide_sdk_scripts_dir
+ self.gdbinit_dir = os.path.join(self.script_dir, 'gdbinit')
+ self.gdbserver_multi = gdbserver_multi
+ self.binary_pretty = self.binary.replace(os.sep, '-').lstrip('-')
+ self.gdbserver_port = GdbCrossConfig._gdbserver_port_next
+ GdbCrossConfig._gdbserver_port_next += 1
+ self.id_pretty = "%d_%s" % (self.gdbserver_port, self.binary_pretty)
+ # gdbserver start script
+ gdbserver_script_file = 'gdbserver_' + self.id_pretty
+ if self.gdbserver_multi:
+ gdbserver_script_file += "_m"
+ self.gdbserver_script = os.path.join(
+ self.script_dir, gdbserver_script_file)
+ # gdbinit file
+ self.gdbinit = os.path.join(
+ self.gdbinit_dir, 'gdbinit_' + self.id_pretty)
+ # gdb start script
+ self.gdb_script = os.path.join(
+ self.script_dir, 'gdb_' + self.id_pretty)
+
+ def _gen_gdbserver_start_script(self):
+ """Generate a shell command starting the gdbserver on the remote device via ssh
+
+ GDB supports two modes:
+ multi: gdbserver remains running over several debug sessions
+ once: gdbserver terminates after the debugged process terminates
+ """
+ cmd_lines = ['#!/bin/sh']
+ if self.gdbserver_multi:
+ temp_dir = "TEMP_DIR=/tmp/gdbserver_%s; " % self.id_pretty
+ gdbserver_cmd_start = temp_dir
+ gdbserver_cmd_start += "test -f \\$TEMP_DIR/pid && exit 0; "
+ gdbserver_cmd_start += "mkdir -p \\$TEMP_DIR; "
+ gdbserver_cmd_start += "%s --multi :%s > \\$TEMP_DIR/log 2>&1 & " % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port)
+ gdbserver_cmd_start += "echo \\$! > \\$TEMP_DIR/pid;"
+
+ gdbserver_cmd_stop = temp_dir
+ gdbserver_cmd_stop += "test -f \\$TEMP_DIR/pid && kill \\$(cat \\$TEMP_DIR/pid); "
+ gdbserver_cmd_stop += "rm -rf \\$TEMP_DIR; "
+
+ gdbserver_cmd_l = []
+ gdbserver_cmd_l.append('if [ "$1" = "stop" ]; then')
+ gdbserver_cmd_l.append(' shift')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_stop))
+ gdbserver_cmd_l.append('else')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start))
+ gdbserver_cmd_l.append('fi')
+ gdbserver_cmd = os.linesep.join(gdbserver_cmd_l)
+ else:
+ gdbserver_cmd_start = "%s --once :%s %s" % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port, self.binary)
+ gdbserver_cmd = "%s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start)
+ cmd_lines.append(gdbserver_cmd)
+ GdbCrossConfig.write_file(self.gdbserver_script, cmd_lines, True)
+
+ def _gen_gdbinit_config(self):
+ """Generate a gdbinit file for this binary and the corresponding gdbserver configuration"""
+ gdbinit_lines = ['# This file is generated by devtool ide-sdk']
+ if self.gdbserver_multi:
+ target_help = '# gdbserver --multi :%d' % self.gdbserver_port
+ remote_cmd = 'target extended-remote'
+ else:
+ target_help = '# gdbserver :%d %s' % (
+ self.gdbserver_port, self.binary)
+ remote_cmd = 'target remote'
+ gdbinit_lines.append('# On the remote target:')
+ gdbinit_lines.append(target_help)
+ gdbinit_lines.append('# On the build machine:')
+ gdbinit_lines.append('# cd ' + self.modified_recipe.real_srctree)
+ gdbinit_lines.append(
+ '# ' + self.gdb_cross.gdb + ' -ix ' + self.gdbinit)
+
+ gdbinit_lines.append('set sysroot ' + self.modified_recipe.d)
+ gdbinit_lines.append('set substitute-path "/usr/include" "' +
+ os.path.join(self.modified_recipe.recipe_sysroot, 'usr', 'include') + '"')
+ # Disable debuginfod for now, the IDE configuration uses rootfs-dbg from the image workdir.
+ gdbinit_lines.append('set debuginfod enabled off')
+ if self.image_recipe.rootfs_dbg:
+ gdbinit_lines.append(
+ 'set solib-search-path "' + self.modified_recipe.solib_search_path_str(self.image_recipe) + '"')
+ # First: Search for sources of this recipe in the workspace folder
+ if self.modified_recipe.pn in self.modified_recipe.target_dbgsrc_dir:
+ gdbinit_lines.append('set substitute-path "%s" "%s"' %
+ (self.modified_recipe.target_dbgsrc_dir, self.modified_recipe.real_srctree))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if self.modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ gdbinit_lines.append('set substitute-path "/usr/src/debug" "%s"' % os.path.join(
+ self.image_recipe.rootfs_dbg, "usr", "src", "debug"))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+ gdbinit_lines.append(
+ '%s %s:%d' % (remote_cmd, self.gdb_cross.host, self.gdbserver_port))
+ gdbinit_lines.append('set remote exec-file ' + self.binary)
+ gdbinit_lines.append(
+ 'run ' + os.path.join(self.modified_recipe.d, self.binary))
+
+ GdbCrossConfig.write_file(self.gdbinit, gdbinit_lines)
+
+ def _gen_gdb_start_script(self):
+ """Generate a script starting GDB with the corresponding gdbinit configuration."""
+ cmd_lines = ['#!/bin/sh']
+ cmd_lines.append('cd ' + self.modified_recipe.real_srctree)
+ cmd_lines.append(self.gdb_cross.gdb + ' -ix ' +
+ self.gdbinit + ' "$@"')
+ GdbCrossConfig.write_file(self.gdb_script, cmd_lines, True)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+ self._gen_gdbinit_config()
+ self._gen_gdb_start_script()
+
+ @staticmethod
+ def write_file(script_file, cmd_lines, executable=False):
+ script_dir = os.path.dirname(script_file)
+ mkdirhier(script_dir)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ script_f.write(os.linesep)
+ if executable:
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ logger.info("Created: %s" % script_file)
+
+
+class IdeBase:
+ """Base class defining the interface for IDE plugins"""
+
+ def __init__(self):
+ self.ide_name = 'undefined'
+ self.gdb_cross_configs = []
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """Used to find the default ide handler if --ide is not passed"""
+ return 10
+
+ def setup_shared_sysroots(self, shared_env):
+ logger.warn("Shared sysroot mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ logger.warn("Modified recipe mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def initialize_gdb_cross_configs(self, image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfig):
+ binaries = modified_recipe.find_installed_binaries()
+ for binary in binaries:
+ gdb_cross_config = gdb_cross_config_class(
+ image_recipe, modified_recipe, binary)
+ gdb_cross_config.initialize()
+ self.gdb_cross_configs.append(gdb_cross_config)
+
+ @staticmethod
+ def gen_oe_scrtips_sym_link(modified_recipe):
+ # create a sym-link from sources to the scripts directory
+ if os.path.isdir(modified_recipe.ide_sdk_scripts_dir):
+ IdeBase.symlink_force(modified_recipe.ide_sdk_scripts_dir,
+ os.path.join(modified_recipe.real_srctree, 'oe-scripts'))
+
+ @staticmethod
+ def update_json_file(json_dir, json_file, update_dict):
+ """Update a json file
+
+ By default it uses the dict.update function. If this is not sutiable
+ the update function might be passed via update_func parameter.
+ """
+ json_path = os.path.join(json_dir, json_file)
+ logger.info("Updating IDE config file: %s (%s)" %
+ (json_file, json_path))
+ if not os.path.exists(json_dir):
+ os.makedirs(json_dir)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+ orig_dict.update(update_dict)
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ @staticmethod
+ def symlink_force(tgt, dst):
+ try:
+ os.symlink(tgt, dst)
+ except OSError as err:
+ if err.errno == errno.EEXIST:
+ if os.readlink(dst) != tgt:
+ os.remove(dst)
+ os.symlink(tgt, dst)
+ else:
+ raise err
+
+
+def get_devtool_deploy_opts(args):
+ """Filter args for devtool deploy-target args"""
+ if not args.target:
+ return None
+ devtool_deploy_opts = [args.target]
+ if args.no_host_check:
+ devtool_deploy_opts += ["-c"]
+ if args.show_status:
+ devtool_deploy_opts += ["-s"]
+ if args.no_preserve:
+ devtool_deploy_opts += ["-p"]
+ if args.no_check_space:
+ devtool_deploy_opts += ["--no-check-space"]
+ if args.ssh_exec:
+ devtool_deploy_opts += ["-e", args.ssh.exec]
+ if args.port:
+ devtool_deploy_opts += ["-P", args.port]
+ if args.key:
+ devtool_deploy_opts += ["-I", args.key]
+ if args.strip is False:
+ devtool_deploy_opts += ["--no-strip"]
+ return devtool_deploy_opts
diff --git a/scripts/lib/devtool/ide_plugins/ide_code.py b/scripts/lib/devtool/ide_plugins/ide_code.py
new file mode 100644
index 0000000000..a62b93224e
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_code.py
@@ -0,0 +1,463 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin for VSCode and VSCodium"""
+
+import json
+import logging
+import os
+import shutil
+from devtool.ide_plugins import BuildTool, IdeBase, GdbCrossConfig, get_devtool_deploy_opts
+
+logger = logging.getLogger('devtool')
+
+
+class GdbCrossConfigVSCode(GdbCrossConfig):
+ def __init__(self, image_recipe, modified_recipe, binary):
+ super().__init__(image_recipe, modified_recipe, binary, False)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+
+
+class IdeVSCode(IdeBase):
+ """Manage IDE configurations for VSCode
+
+ Modified recipe mode:
+ - cmake: use the cmake-preset generated by devtool ide-sdk
+ - meson: meson is called via a wrapper script generated by devtool ide-sdk
+
+ Shared sysroot mode:
+ In shared sysroot mode, the cross tool-chain is exported to the user's global configuration.
+ A workspace cannot be created because there is no recipe that defines how a workspace could
+ be set up.
+ - cmake: adds a cmake-kit to .local/share/CMakeTools/cmake-tools-kits.json
+ The cmake-kit uses the environment script and the tool-chain file
+ generated by meta-ide-support.
+ - meson: Meson needs manual workspace configuration.
+ """
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """If --ide is not passed this is the default plugin"""
+ if shutil.which('code'):
+ return 100
+ return 0
+
+ def setup_shared_sysroots(self, shared_env):
+ """Expose the toolchain of the shared sysroots SDK"""
+ datadir = shared_env.ide_support.datadir
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ standalone_sysroot_native = shared_env.build_sysroots.standalone_sysroot_native
+ vscode_ws_path = os.path.join(
+ os.environ['HOME'], '.local', 'share', 'CMakeTools')
+ cmake_kits_path = os.path.join(vscode_ws_path, 'cmake-tools-kits.json')
+ oecmake_generator = "Ninja"
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+
+ if not os.path.isdir(vscode_ws_path):
+ os.makedirs(vscode_ws_path)
+ cmake_kits_old = []
+ if os.path.exists(cmake_kits_path):
+ with open(cmake_kits_path, 'r', encoding='utf-8') as cmake_kits_file:
+ cmake_kits_old = json.load(cmake_kits_file)
+ cmake_kits = cmake_kits_old.copy()
+
+ cmake_kit_new = {
+ "name": "OE " + real_multimach_target_sys,
+ "environmentSetupScript": env_script,
+ "toolchainFile": standalone_sysroot_native + datadir + "/cmake/OEToolchainConfig.cmake",
+ "preferredGenerator": {
+ "name": oecmake_generator
+ }
+ }
+
+ def merge_kit(cmake_kits, cmake_kit_new):
+ i = 0
+ while i < len(cmake_kits):
+ if 'environmentSetupScript' in cmake_kits[i] and \
+ cmake_kits[i]['environmentSetupScript'] == cmake_kit_new['environmentSetupScript']:
+ cmake_kits[i] = cmake_kit_new
+ return
+ i += 1
+ cmake_kits.append(cmake_kit_new)
+ merge_kit(cmake_kits, cmake_kit_new)
+
+ if cmake_kits != cmake_kits_old:
+ logger.info("Updating: %s" % cmake_kits_path)
+ with open(cmake_kits_path, 'w', encoding='utf-8') as cmake_kits_file:
+ json.dump(cmake_kits, cmake_kits_file, indent=4)
+ else:
+ logger.info("Already up to date: %s" % cmake_kits_path)
+
+ cmake_native = os.path.join(
+ shared_env.build_sysroots.standalone_sysroot_native, 'usr', 'bin', 'cmake')
+ if os.path.isfile(cmake_native):
+ logger.info('cmake-kits call cmake by default. If the cmake provided by this SDK should be used, please add the following line to ".vscode/settings.json" file: "cmake.cmakePath": "%s"' % cmake_native)
+ else:
+ logger.error("Cannot find cmake native at: %s" % cmake_native)
+
+ def dot_code_dir(self, modified_recipe):
+ return os.path.join(modified_recipe.srctree, '.vscode')
+
+ def __vscode_settings_meson(self, settings_dict, modified_recipe):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ settings_dict["mesonbuild.mesonPath"] = modified_recipe.meson_wrapper
+
+ confopts = modified_recipe.mesonopts.split()
+ confopts += modified_recipe.meson_cross_file.split()
+ confopts += modified_recipe.extra_oemeson.split()
+ settings_dict["mesonbuild.configureOptions"] = confopts
+ settings_dict["mesonbuild.buildFolder"] = modified_recipe.b
+
+ def __vscode_settings_cmake(self, settings_dict, modified_recipe):
+ """Add cmake specific settings to settings.json.
+
+ Note: most settings are passed to the cmake preset.
+ """
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ settings_dict["cmake.configureOnOpen"] = True
+ settings_dict["cmake.sourceDirectory"] = modified_recipe.real_srctree
+
+ def vscode_settings(self, modified_recipe, image_recipe):
+ files_excludes = {
+ "**/.git/**": True,
+ "**/oe-logs/**": True,
+ "**/oe-workdir/**": True,
+ "**/source-date-epoch/**": True
+ }
+ python_exclude = [
+ "**/.git/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/source-date-epoch/**"
+ ]
+ files_readonly = {
+ modified_recipe.recipe_sysroot + '/**': True,
+ modified_recipe.recipe_sysroot_native + '/**': True,
+ }
+ if image_recipe.rootfs_dbg is not None:
+ files_readonly[image_recipe.rootfs_dbg + '/**'] = True
+ settings_dict = {
+ "files.watcherExclude": files_excludes,
+ "files.exclude": files_excludes,
+ "files.readonlyInclude": files_readonly,
+ "python.analysis.exclude": python_exclude
+ }
+ self.__vscode_settings_cmake(settings_dict, modified_recipe)
+ self.__vscode_settings_meson(settings_dict, modified_recipe)
+
+ settings_file = 'settings.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), settings_file, settings_dict)
+
+ def __vscode_extensions_cmake(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ recommendations += [
+ "twxs.cmake",
+ "ms-vscode.cmake-tools",
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def __vscode_extensions_meson(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ recommendations += [
+ 'mesonbuild.mesonbuild',
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def vscode_extensions(self, modified_recipe):
+ recommendations = []
+ self.__vscode_extensions_cmake(modified_recipe, recommendations)
+ self.__vscode_extensions_meson(modified_recipe, recommendations)
+ extensions_file = 'extensions.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), extensions_file, {"recommendations": recommendations})
+
+ def vscode_c_cpp_properties(self, modified_recipe):
+ properties_dict = {
+ "name": modified_recipe.recipe_id_pretty,
+ }
+ if modified_recipe.build_tool is BuildTool.CMAKE:
+ properties_dict["configurationProvider"] = "ms-vscode.cmake-tools"
+ elif modified_recipe.build_tool is BuildTool.MESON:
+ properties_dict["configurationProvider"] = "mesonbuild.mesonbuild"
+ properties_dict["compilerPath"] = os.path.join(modified_recipe.staging_bindir_toolchain, modified_recipe.cxx.split()[0])
+ else: # no C/C++ build
+ return
+
+ properties_dicts = {
+ "configurations": [
+ properties_dict
+ ],
+ "version": 4
+ }
+ prop_file = 'c_cpp_properties.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), prop_file, properties_dicts)
+
+ def vscode_launch_bin_dbg(self, gdb_cross_config):
+ modified_recipe = gdb_cross_config.modified_recipe
+
+ launch_config = {
+ "name": gdb_cross_config.id_pretty,
+ "type": "cppdbg",
+ "request": "launch",
+ "program": os.path.join(modified_recipe.d, gdb_cross_config.binary.lstrip('/')),
+ "stopAtEntry": True,
+ "cwd": "${workspaceFolder}",
+ "environment": [],
+ "externalConsole": False,
+ "MIMode": "gdb",
+ "preLaunchTask": gdb_cross_config.id_pretty,
+ "miDebuggerPath": modified_recipe.gdb_cross.gdb,
+ "miDebuggerServerAddress": "%s:%d" % (modified_recipe.gdb_cross.host, gdb_cross_config.gdbserver_port)
+ }
+
+ # Search for header files in recipe-sysroot.
+ src_file_map = {
+ "/usr/include": os.path.join(modified_recipe.recipe_sysroot, "usr", "include")
+ }
+ # First of all search for not stripped binaries in the image folder.
+ # These binaries are copied (and optionally stripped) by deploy-target
+ setup_commands = [
+ {
+ "description": "sysroot",
+ "text": "set sysroot " + modified_recipe.d
+ }
+ ]
+
+ if gdb_cross_config.image_recipe.rootfs_dbg:
+ launch_config['additionalSOLibSearchPath'] = modified_recipe.solib_search_path_str(
+ gdb_cross_config.image_recipe)
+ # First: Search for sources of this recipe in the workspace folder
+ if modified_recipe.pn in modified_recipe.target_dbgsrc_dir:
+ src_file_map[modified_recipe.target_dbgsrc_dir] = "${workspaceFolder}"
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ src_file_map["/usr/src/debug"] = os.path.join(
+ gdb_cross_config.image_recipe.rootfs_dbg, "usr", "src", "debug")
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+
+ launch_config['sourceFileMap'] = src_file_map
+ launch_config['setupCommands'] = setup_commands
+ return launch_config
+
+ def vscode_launch(self, modified_recipe):
+ """GDB Launch configuration for binaries (elf files)"""
+
+ configurations = []
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is modified_recipe:
+ configurations.append(self.vscode_launch_bin_dbg(gdb_cross_config))
+ launch_dict = {
+ "version": "0.2.0",
+ "configurations": configurations
+ }
+ launch_file = 'launch.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), launch_file, launch_dict)
+
+ def vscode_tasks_cpp(self, args, modified_recipe):
+ run_install_deploy = modified_recipe.gen_install_deploy_script(args)
+ install_task_name = "install && deploy-target %s" % modified_recipe.recipe_id_pretty
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": install_task_name,
+ "type": "shell",
+ "command": run_install_deploy,
+ "problemMatcher": []
+ }
+ ]
+ }
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ install_task_name
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks_fallback(self, args, modified_recipe):
+ oe_init_dir = modified_recipe.oe_init_dir
+ oe_init = ". %s %s > /dev/null && " % (modified_recipe.oe_init_build_env, modified_recipe.topdir)
+ dt_build = "devtool build "
+ dt_build_label = dt_build + modified_recipe.recipe_id_pretty
+ dt_build_cmd = dt_build + modified_recipe.bpn
+ clean_opt = " --clean"
+ dt_build_clean_label = dt_build + modified_recipe.recipe_id_pretty + clean_opt
+ dt_build_clean_cmd = dt_build + modified_recipe.bpn + clean_opt
+ dt_deploy = "devtool deploy-target "
+ dt_deploy_label = dt_deploy + modified_recipe.recipe_id_pretty
+ dt_deploy_cmd = dt_deploy + modified_recipe.bpn
+ dt_build_deploy_label = "devtool build & deploy-target %s" % modified_recipe.recipe_id_pretty
+ deploy_opts = ' '.join(get_devtool_deploy_opts(args))
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": dt_build_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_cmd)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_deploy_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s %s" % (
+ oe_init, dt_deploy_cmd, deploy_opts)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_build_deploy_label,
+ "dependsOrder": "sequence",
+ "dependsOn": [
+ dt_build_label,
+ dt_deploy_label
+ ],
+ "problemMatcher": [],
+ "group": {
+ "kind": "build",
+ "isDefault": True
+ }
+ },
+ {
+ "label": dt_build_clean_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_clean_cmd)
+ ],
+ "problemMatcher": []
+ }
+ ]
+ }
+ if modified_recipe.gdb_cross:
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ dt_build_deploy_label
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks(self, args, modified_recipe):
+ if modified_recipe.build_tool.is_c_ccp:
+ self.vscode_tasks_cpp(args, modified_recipe)
+ else:
+ self.vscode_tasks_fallback(args, modified_recipe)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ self.vscode_settings(modified_recipe, image_recipe)
+ self.vscode_extensions(modified_recipe)
+ self.vscode_c_cpp_properties(modified_recipe)
+ if args.target:
+ self.initialize_gdb_cross_configs(
+ image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfigVSCode)
+ self.vscode_launch(modified_recipe)
+ self.vscode_tasks(args, modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['code'] = IdeVSCode
diff --git a/scripts/lib/devtool/ide_plugins/ide_none.py b/scripts/lib/devtool/ide_plugins/ide_none.py
new file mode 100644
index 0000000000..f106c5a026
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_none.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk generic IDE plugin"""
+
+import os
+import logging
+from devtool.ide_plugins import IdeBase, GdbCrossConfig
+
+logger = logging.getLogger('devtool')
+
+
+class IdeNone(IdeBase):
+ """Generate some generic helpers for other IDEs
+
+ Modified recipe mode:
+ Generate some helper scripts for remote debugging with GDB
+
+ Shared sysroot mode:
+ A wrapper for bitbake meta-ide-support and bitbake build-sysroots
+ """
+
+ def __init__(self):
+ super().__init__()
+
+ def setup_shared_sysroots(self, shared_env):
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+ logger.info(
+ "To use this SDK please source this: %s" % env_script)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ """generate some helper scripts and config files
+
+ - Execute the do_install task
+ - Execute devtool deploy-target
+ - Generate a gdbinit file per executable
+ - Generate the oe-scripts sym-link
+ """
+ script_path = modified_recipe.gen_install_deploy_script(args)
+ logger.info("Created: %s" % script_path)
+
+ self.initialize_gdb_cross_configs(image_recipe, modified_recipe)
+
+ IdeBase.gen_oe_scrtips_sym_link(modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['none'] = IdeNone
diff --git a/scripts/lib/devtool/ide_sdk.py b/scripts/lib/devtool/ide_sdk.py
new file mode 100755
index 0000000000..7807b322b3
--- /dev/null
+++ b/scripts/lib/devtool/ide_sdk.py
@@ -0,0 +1,1070 @@
+# Development tool - ide-sdk command plugin
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk plugin"""
+
+import json
+import logging
+import os
+import re
+import shutil
+import stat
+import subprocess
+import sys
+from argparse import RawTextHelpFormatter
+from enum import Enum
+
+import scriptutils
+import bb
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError, parse_recipe
+from devtool.standard import get_real_srctree
+from devtool.ide_plugins import BuildTool
+
+
+logger = logging.getLogger('devtool')
+
+# dict of classes derived from IdeBase
+ide_plugins = {}
+
+
+class DevtoolIdeMode(Enum):
+ """Different modes are supported by the ide-sdk plugin.
+
+ The enum might be extended by more advanced modes in the future. Some ideas:
+ - auto: modified if all recipes are modified, shared if none of the recipes is modified.
+ - mixed: modified mode for modified recipes, shared mode for all other recipes.
+ """
+
+ modified = 'modified'
+ shared = 'shared'
+
+
+class TargetDevice:
+ """SSH remote login parameters"""
+
+ def __init__(self, args):
+ self.extraoptions = ''
+ if args.no_host_check:
+ self.extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ self.ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ self.ssh_sshexec = args.ssh_exec
+ self.ssh_port = ''
+ if args.port:
+ self.ssh_port = "-p %s" % args.port
+ if args.key:
+ self.extraoptions += ' -i %s' % args.key
+
+ self.target = args.target
+ target_sp = args.target.split('@')
+ if len(target_sp) == 1:
+ self.login = ""
+ self.host = target_sp[0]
+ elif len(target_sp) == 2:
+ self.login = target_sp[0]
+ self.host = target_sp[1]
+ else:
+ logger.error("Invalid target argument: %s" % args.target)
+
+
+class RecipeNative:
+ """Base class for calling bitbake to provide a -native recipe"""
+
+ def __init__(self, name, target_arch=None):
+ self.name = name
+ self.target_arch = target_arch
+ self.bootstrap_tasks = [self.name + ':do_addto_recipe_sysroot']
+ self.staging_bindir_native = None
+ self.target_sys = None
+ self.__native_bin = None
+
+ def _initialize(self, config, workspace, tinfoil):
+ """Get the parsed recipe"""
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+ self.staging_bindir_native = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_NATIVE'))
+ self.target_sys = recipe_d.getVar('TARGET_SYS')
+ return recipe_d
+
+ def initialize(self, config, workspace, tinfoil):
+ """Basic initialization that can be overridden by a derived class"""
+ self._initialize(config, workspace, tinfoil)
+
+ @property
+ def native_bin(self):
+ if not self.__native_bin:
+ raise DevtoolError("native binary name is not defined.")
+ return self.__native_bin
+
+
+class RecipeGdbCross(RecipeNative):
+ """Handle handle gdb-cross on the host and the gdbserver on the target device"""
+
+ def __init__(self, args, target_arch, target_device):
+ super().__init__('gdb-cross-' + target_arch, target_arch)
+ self.target_device = target_device
+ self.gdb = None
+ self.gdbserver_port_next = int(args.gdbserver_port_start)
+ self.config_db = {}
+
+ def __find_gdbserver(self, config, tinfoil):
+ """Absolute path of the gdbserver"""
+ recipe_d_gdb = parse_recipe(
+ config, tinfoil, 'gdb', appends=True, filter_workspace=False)
+ if not recipe_d_gdb:
+ raise DevtoolError("Parsing gdb recipe failed")
+ return os.path.join(recipe_d_gdb.getVar('bindir'), 'gdbserver')
+
+ def initialize(self, config, workspace, tinfoil):
+ super()._initialize(config, workspace, tinfoil)
+ gdb_bin = self.target_sys + '-gdb'
+ gdb_path = os.path.join(
+ self.staging_bindir_native, self.target_sys, gdb_bin)
+ self.gdb = gdb_path
+ self.gdbserver_path = self.__find_gdbserver(config, tinfoil)
+
+ @property
+ def host(self):
+ return self.target_device.host
+
+
+class RecipeImage:
+ """Handle some image recipe related properties
+
+ Most workflows require firmware that runs on the target device.
+ This firmware must be consistent with the setup of the host system.
+ In particular, the debug symbols must be compatible. For this, the
+ rootfs must be created as part of the SDK.
+ """
+
+ def __init__(self, name):
+ self.combine_dbg_image = False
+ self.gdbserver_missing = False
+ self.name = name
+ self.rootfs = None
+ self.__rootfs_dbg = None
+ self.bootstrap_tasks = [self.name + ':do_build']
+
+ def initialize(self, config, tinfoil):
+ image_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not image_d:
+ raise DevtoolError(
+ "Parsing image recipe %s failed" % self.name)
+
+ self.combine_dbg_image = bb.data.inherits_class(
+ 'image-combined-dbg', image_d)
+
+ workdir = image_d.getVar('WORKDIR')
+ self.rootfs = os.path.join(workdir, 'rootfs')
+ if image_d.getVar('IMAGE_GEN_DEBUGFS') == "1":
+ self.__rootfs_dbg = os.path.join(workdir, 'rootfs-dbg')
+
+ self.gdbserver_missing = 'gdbserver' not in image_d.getVar(
+ 'IMAGE_INSTALL')
+
+ @property
+ def debug_support(self):
+ return bool(self.rootfs_dbg)
+
+ @property
+ def rootfs_dbg(self):
+ if self.__rootfs_dbg and os.path.isdir(self.__rootfs_dbg):
+ return self.__rootfs_dbg
+ return None
+
+
+class RecipeMetaIdeSupport:
+ """For the shared sysroots mode meta-ide-support is needed
+
+ For use cases where just a cross tool-chain is required but
+ no recipe is used, devtool ide-sdk abstracts calling bitbake meta-ide-support
+ and bitbake build-sysroots. This also allows to expose the cross-toolchains
+ to IDEs. For example VSCode support different tool-chains with e.g. cmake-kits.
+ """
+
+ def __init__(self):
+ self.bootstrap_tasks = ['meta-ide-support:do_build']
+ self.topdir = None
+ self.datadir = None
+ self.deploy_dir_image = None
+ self.build_sys = None
+ # From toolchain-scripts
+ self.real_multimach_target_sys = None
+
+ def initialize(self, config, tinfoil):
+ meta_ide_support_d = parse_recipe(
+ config, tinfoil, 'meta-ide-support', appends=True, filter_workspace=False)
+ if not meta_ide_support_d:
+ raise DevtoolError("Parsing meta-ide-support recipe failed")
+
+ self.topdir = meta_ide_support_d.getVar('TOPDIR')
+ self.datadir = meta_ide_support_d.getVar('datadir')
+ self.deploy_dir_image = meta_ide_support_d.getVar(
+ 'DEPLOY_DIR_IMAGE')
+ self.build_sys = meta_ide_support_d.getVar('BUILD_SYS')
+ self.real_multimach_target_sys = meta_ide_support_d.getVar(
+ 'REAL_MULTIMACH_TARGET_SYS')
+
+
+class RecipeBuildSysroots:
+ """For the shared sysroots mode build-sysroots is needed"""
+
+ def __init__(self):
+ self.standalone_sysroot = None
+ self.standalone_sysroot_native = None
+ self.bootstrap_tasks = [
+ 'build-sysroots:do_build_target_sysroot',
+ 'build-sysroots:do_build_native_sysroot'
+ ]
+
+ def initialize(self, config, tinfoil):
+ build_sysroots_d = parse_recipe(
+ config, tinfoil, 'build-sysroots', appends=True, filter_workspace=False)
+ if not build_sysroots_d:
+ raise DevtoolError("Parsing build-sysroots recipe failed")
+ self.standalone_sysroot = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT')
+ self.standalone_sysroot_native = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT_NATIVE')
+
+
+class SharedSysrootsEnv:
+ """Handle the shared sysroots based workflow
+
+ Support the workflow with just a tool-chain without a recipe.
+ It's basically like:
+ bitbake some-dependencies
+ bitbake meta-ide-support
+ bitbake build-sysroots
+ Use the environment-* file found in the deploy folder
+ """
+
+ def __init__(self):
+ self.ide_support = None
+ self.build_sysroots = None
+
+ def initialize(self, ide_support, build_sysroots):
+ self.ide_support = ide_support
+ self.build_sysroots = build_sysroots
+
+ def setup_ide(self, ide):
+ ide.setup(self)
+
+
+class RecipeNotModified:
+ """Handling of recipes added to the Direct DSK shared sysroots."""
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_populate_sysroot']
+
+
+class RecipeModified:
+ """Handling of recipes in the workspace created by devtool modify"""
+ OE_INIT_BUILD_ENV = 'oe-init-build-env'
+
+ VALID_BASH_ENV_NAME_CHARS = re.compile(r"^[a-zA-Z0-9_]*$")
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_install']
+ self.gdb_cross = None
+ # workspace
+ self.real_srctree = None
+ self.srctree = None
+ self.ide_sdk_dir = None
+ self.ide_sdk_scripts_dir = None
+ self.bbappend = None
+ # recipe variables from d.getVar
+ self.b = None
+ self.base_libdir = None
+ self.bblayers = None
+ self.bpn = None
+ self.d = None
+ self.fakerootcmd = None
+ self.fakerootenv = None
+ self.libdir = None
+ self.max_process = None
+ self.package_arch = None
+ self.package_debug_split_style = None
+ self.path = None
+ self.pn = None
+ self.recipe_sysroot = None
+ self.recipe_sysroot_native = None
+ self.staging_incdir = None
+ self.strip_cmd = None
+ self.target_arch = None
+ self.target_dbgsrc_dir = None
+ self.topdir = None
+ self.workdir = None
+ self.recipe_id = None
+ # replicate bitbake build environment
+ self.exported_vars = None
+ self.cmd_compile = None
+ self.__oe_init_dir = None
+ # main build tool used by this recipe
+ self.build_tool = BuildTool.UNDEFINED
+ # build_tool = cmake
+ self.oecmake_generator = None
+ self.cmake_cache_vars = None
+ # build_tool = meson
+ self.meson_buildtype = None
+ self.meson_wrapper = None
+ self.mesonopts = None
+ self.extra_oemeson = None
+ self.meson_cross_file = None
+
+ def initialize(self, config, workspace, tinfoil):
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+
+ # Verify this recipe is built as externalsrc setup by devtool modify
+ workspacepn = check_workspace_recipe(
+ workspace, self.name, bbclassextend=True)
+ self.srctree = workspace[workspacepn]['srctree']
+ # Need to grab this here in case the source is within a subdirectory
+ self.real_srctree = get_real_srctree(
+ self.srctree, recipe_d.getVar('S'), recipe_d.getVar('WORKDIR'))
+ self.bbappend = workspace[workspacepn]['bbappend']
+
+ self.ide_sdk_dir = os.path.join(
+ config.workspace_path, 'ide-sdk', self.name)
+ if os.path.exists(self.ide_sdk_dir):
+ shutil.rmtree(self.ide_sdk_dir)
+ self.ide_sdk_scripts_dir = os.path.join(self.ide_sdk_dir, 'scripts')
+
+ self.b = recipe_d.getVar('B')
+ self.base_libdir = recipe_d.getVar('base_libdir')
+ self.bblayers = recipe_d.getVar('BBLAYERS').split()
+ self.bpn = recipe_d.getVar('BPN')
+ self.cxx = recipe_d.getVar('CXX')
+ self.d = recipe_d.getVar('D')
+ self.fakerootcmd = recipe_d.getVar('FAKEROOTCMD')
+ self.fakerootenv = recipe_d.getVar('FAKEROOTENV')
+ self.libdir = recipe_d.getVar('libdir')
+ self.max_process = int(recipe_d.getVar(
+ "BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ self.package_arch = recipe_d.getVar('PACKAGE_ARCH')
+ self.package_debug_split_style = recipe_d.getVar(
+ 'PACKAGE_DEBUG_SPLIT_STYLE')
+ self.path = recipe_d.getVar('PATH')
+ self.pn = recipe_d.getVar('PN')
+ self.recipe_sysroot = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT'))
+ self.recipe_sysroot_native = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT_NATIVE'))
+ self.staging_bindir_toolchain = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_TOOLCHAIN'))
+ self.staging_incdir = os.path.realpath(
+ recipe_d.getVar('STAGING_INCDIR'))
+ self.strip_cmd = recipe_d.getVar('STRIP')
+ self.target_arch = recipe_d.getVar('TARGET_ARCH')
+ self.target_dbgsrc_dir = recipe_d.getVar('TARGET_DBGSRC_DIR')
+ self.topdir = recipe_d.getVar('TOPDIR')
+ self.workdir = os.path.realpath(recipe_d.getVar('WORKDIR'))
+
+ self.__init_exported_variables(recipe_d)
+
+ if bb.data.inherits_class('cmake', recipe_d):
+ self.oecmake_generator = recipe_d.getVar('OECMAKE_GENERATOR')
+ self.__init_cmake_preset_cache(recipe_d)
+ self.build_tool = BuildTool.CMAKE
+ elif bb.data.inherits_class('meson', recipe_d):
+ self.meson_buildtype = recipe_d.getVar('MESON_BUILDTYPE')
+ self.mesonopts = recipe_d.getVar('MESONOPTS')
+ self.extra_oemeson = recipe_d.getVar('EXTRA_OEMESON')
+ self.meson_cross_file = recipe_d.getVar('MESON_CROSS_FILE')
+ self.build_tool = BuildTool.MESON
+
+ # Recipe ID is the identifier for IDE config sections
+ self.recipe_id = self.bpn + "-" + self.package_arch
+ self.recipe_id_pretty = self.bpn + ": " + self.package_arch
+
+ def append_to_bbappend(self, append_text):
+ with open(self.bbappend, 'a') as bbap:
+ bbap.write(append_text)
+
+ def remove_from_bbappend(self, append_text):
+ with open(self.bbappend, 'r') as bbap:
+ text = bbap.read()
+ new_text = text.replace(append_text, '')
+ with open(self.bbappend, 'w') as bbap:
+ bbap.write(new_text)
+
+ @staticmethod
+ def is_valid_shell_variable(var):
+ """Skip strange shell variables like systemd
+
+ prevent from strange bugs because of strange variables which
+ are not used in this context but break various tools.
+ """
+ if RecipeModified.VALID_BASH_ENV_NAME_CHARS.match(var):
+ bb.debug(1, "ignoring variable: %s" % var)
+ return True
+ return False
+
+ def debug_build_config(self, args):
+ """Explicitely set for example CMAKE_BUILD_TYPE to Debug if not defined otherwise"""
+ if self.build_tool is BuildTool.CMAKE:
+ append_text = os.linesep + \
+ 'OECMAKE_ARGS:append = " -DCMAKE_BUILD_TYPE:STRING=Debug"' + os.linesep
+ if args.debug_build_config and not 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ self.cmake_cache_vars['CMAKE_BUILD_TYPE'] = {
+ "type": "STRING",
+ "value": "Debug",
+ }
+ self.append_to_bbappend(append_text)
+ elif 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ del self.cmake_cache_vars['CMAKE_BUILD_TYPE']
+ self.remove_from_bbappend(append_text)
+ elif self.build_tool is BuildTool.MESON:
+ append_text = os.linesep + 'MESON_BUILDTYPE = "debug"' + os.linesep
+ if args.debug_build_config and self.meson_buildtype != "debug":
+ self.mesonopts.replace(
+ '--buildtype ' + self.meson_buildtype, '--buildtype debug')
+ self.append_to_bbappend(append_text)
+ elif self.meson_buildtype == "debug":
+ self.mesonopts.replace(
+ '--buildtype debug', '--buildtype plain')
+ self.remove_from_bbappend(append_text)
+ elif args.debug_build_config:
+ logger.warn(
+ "--debug-build-config is not implemented for this build tool yet.")
+
+ def solib_search_path(self, image):
+ """Search for debug symbols in the rootfs and rootfs-dbg
+
+ The debug symbols of shared libraries which are provided by other packages
+ are grabbed from the -dbg packages in the rootfs-dbg.
+
+ But most cross debugging tools like gdb, perf, and systemtap need to find
+ executable/library first and through it debuglink note find corresponding
+ symbols file. Therefore the library paths from the rootfs are added as well.
+
+ Note: For the devtool modified recipe compiled from the IDE, the debug
+ symbols are taken from the unstripped binaries in the image folder.
+ Also, devtool deploy-target takes the files from the image folder.
+ debug symbols in the image folder refer to the corresponding source files
+ with absolute paths of the build machine. Debug symbols found in the
+ rootfs-dbg are relocated and contain paths which refer to the source files
+ installed on the target device e.g. /usr/src/...
+ """
+ base_libdir = self.base_libdir.lstrip('/')
+ libdir = self.libdir.lstrip('/')
+ so_paths = [
+ # debug symbols for package_debug_split_style: debug-with-srcpkg or .debug
+ os.path.join(image.rootfs_dbg, base_libdir, ".debug"),
+ os.path.join(image.rootfs_dbg, libdir, ".debug"),
+ # debug symbols for package_debug_split_style: debug-file-directory
+ os.path.join(image.rootfs_dbg, "usr", "lib", "debug"),
+
+ # The binaries are required as well, the debug packages are not enough
+ # With image-combined-dbg.bbclass the binaries are copied into rootfs-dbg
+ os.path.join(image.rootfs_dbg, base_libdir),
+ os.path.join(image.rootfs_dbg, libdir),
+ # Without image-combined-dbg.bbclass the binaries are only in rootfs.
+ # Note: Stepping into source files located in rootfs-dbg does not
+ # work without image-combined-dbg.bbclass yet.
+ os.path.join(image.rootfs, base_libdir),
+ os.path.join(image.rootfs, libdir)
+ ]
+ return so_paths
+
+ def solib_search_path_str(self, image):
+ """Return a : separated list of paths usable by GDB's set solib-search-path"""
+ return ':'.join(self.solib_search_path(image))
+
+ def __init_exported_variables(self, d):
+ """Find all variables with export flag set.
+
+ This allows to generate IDE configurations which compile with the same
+ environment as bitbake does. That's at least a reasonable default behavior.
+ """
+ exported_vars = {}
+
+ vars = (key for key in d.keys() if not key.startswith(
+ "__") and not d.getVarFlag(key, "func", False))
+ for var in vars:
+ func = d.getVarFlag(var, "func", False)
+ if d.getVarFlag(var, 'python', False) and func:
+ continue
+ export = d.getVarFlag(var, "export", False)
+ unexport = d.getVarFlag(var, "unexport", False)
+ if not export and not unexport and not func:
+ continue
+ if unexport:
+ continue
+
+ val = d.getVar(var)
+ if val is None:
+ continue
+ if set(var) & set("-.{}+"):
+ logger.warn(
+ "Warning: Found invalid character in variable name %s", str(var))
+ continue
+ varExpanded = d.expand(var)
+ val = str(val)
+
+ if not RecipeModified.is_valid_shell_variable(varExpanded):
+ continue
+
+ if func:
+ code_line = "line: {0}, file: {1}\n".format(
+ d.getVarFlag(var, "lineno", False),
+ d.getVarFlag(var, "filename", False))
+ val = val.rstrip('\n')
+ logger.warn("Warning: exported shell function %s() is not exported (%s)" %
+ (varExpanded, code_line))
+ continue
+
+ if export:
+ exported_vars[varExpanded] = val.strip()
+ continue
+
+ self.exported_vars = exported_vars
+
+ def __init_cmake_preset_cache(self, d):
+ """Get the arguments passed to cmake
+
+ Replicate the cmake configure arguments with all details to
+ share on build folder between bitbake and SDK.
+ """
+ site_file = os.path.join(self.workdir, 'site-file.cmake')
+ if os.path.exists(site_file):
+ print("Warning: site-file.cmake is not supported")
+
+ cache_vars = {}
+ oecmake_args = d.getVar('OECMAKE_ARGS').split()
+ extra_oecmake = d.getVar('EXTRA_OECMAKE').split()
+ for param in oecmake_args + extra_oecmake:
+ d_pref = "-D"
+ if param.startswith(d_pref):
+ param = param[len(d_pref):]
+ else:
+ print("Error: expected a -D")
+ param_s = param.split('=', 1)
+ param_nt = param_s[0].split(':', 1)
+
+ def handle_undefined_variable(var):
+ if var.startswith('${') and var.endswith('}'):
+ return ''
+ else:
+ return var
+ # Example: FOO=ON
+ if len(param_nt) == 1:
+ cache_vars[param_s[0]] = handle_undefined_variable(param_s[1])
+ # Example: FOO:PATH=/tmp
+ elif len(param_nt) == 2:
+ cache_vars[param_nt[0]] = {
+ "type": param_nt[1],
+ "value": handle_undefined_variable(param_s[1]),
+ }
+ else:
+ print("Error: cannot parse %s" % param)
+ self.cmake_cache_vars = cache_vars
+
+ def cmake_preset(self):
+ """Create a preset for cmake that mimics how bitbake calls cmake"""
+ toolchain_file = os.path.join(self.workdir, 'toolchain.cmake')
+ cmake_executable = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'cmake')
+ self.cmd_compile = cmake_executable + " --build --preset " + self.recipe_id
+
+ preset_dict_configure = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "binaryDir": self.b,
+ "generator": self.oecmake_generator,
+ "toolchainFile": toolchain_file,
+ "cacheVariables": self.cmake_cache_vars,
+ "environment": self.exported_vars,
+ "cmakeExecutable": cmake_executable
+ }
+
+ preset_dict_build = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict_test = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict = {
+ "version": 3, # cmake 3.21, backward compatible with kirkstone
+ "configurePresets": [preset_dict_configure],
+ "buildPresets": [preset_dict_build],
+ "testPresets": [preset_dict_test]
+ }
+
+ # Finally write the json file
+ json_file = 'CMakeUserPresets.json'
+ json_path = os.path.join(self.real_srctree, json_file)
+ logger.info("Updating CMake preset: %s (%s)" % (json_file, json_path))
+ if not os.path.exists(self.real_srctree):
+ os.makedirs(self.real_srctree)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+
+ # Add or update the presets for the recipe and keep other presets
+ for k, v in preset_dict.items():
+ if isinstance(v, list):
+ update_preset = v[0]
+ preset_added = False
+ if k in orig_dict:
+ for index, orig_preset in enumerate(orig_dict[k]):
+ if 'name' in orig_preset:
+ if orig_preset['name'] == update_preset['name']:
+ logger.debug("Updating preset: %s" %
+ orig_preset['name'])
+ orig_dict[k][index] = update_preset
+ preset_added = True
+ break
+ else:
+ logger.debug("keeping preset: %s" %
+ orig_preset['name'])
+ else:
+ logger.warn("preset without a name found")
+ if not preset_added:
+ if not k in orig_dict:
+ orig_dict[k] = []
+ orig_dict[k].append(update_preset)
+ logger.debug("Added preset: %s" %
+ update_preset['name'])
+ else:
+ orig_dict[k] = v
+
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ def gen_meson_wrapper(self):
+ """Generate a wrapper script to call meson with the cross environment"""
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ meson_wrapper = os.path.join(self.ide_sdk_scripts_dir, 'meson')
+ meson_real = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'meson.real')
+ with open(meson_wrapper, 'w') as mwrap:
+ mwrap.write("#!/bin/sh" + os.linesep)
+ for var, val in self.exported_vars.items():
+ mwrap.write('export %s="%s"' % (var, val) + os.linesep)
+ mwrap.write("unset CC CXX CPP LD AR NM STRIP" + os.linesep)
+ private_temp = os.path.join(self.b, "meson-private", "tmp")
+ mwrap.write('mkdir -p "%s"' % private_temp + os.linesep)
+ mwrap.write('export TMPDIR="%s"' % private_temp + os.linesep)
+ mwrap.write('exec "%s" "$@"' % meson_real + os.linesep)
+ st = os.stat(meson_wrapper)
+ os.chmod(meson_wrapper, st.st_mode | stat.S_IEXEC)
+ self.meson_wrapper = meson_wrapper
+ self.cmd_compile = meson_wrapper + " compile -C " + self.b
+
+ def which(self, executable):
+ bin_path = shutil.which(executable, path=self.path)
+ if not bin_path:
+ raise DevtoolError(
+ 'Cannot find %s. Probably the recipe %s is not built yet.' % (executable, self.bpn))
+ return bin_path
+
+ @staticmethod
+ def is_elf_file(file_path):
+ with open(file_path, "rb") as f:
+ data = f.read(4)
+ if data == b'\x7fELF':
+ return True
+ return False
+
+ def find_installed_binaries(self):
+ """find all executable elf files in the image directory"""
+ binaries = []
+ d_len = len(self.d)
+ re_so = re.compile(r'.*\.so[.0-9]*$')
+ for root, _, files in os.walk(self.d, followlinks=False):
+ for file in files:
+ if os.path.islink(file):
+ continue
+ if re_so.match(file):
+ continue
+ abs_name = os.path.join(root, file)
+ if os.access(abs_name, os.X_OK) and RecipeModified.is_elf_file(abs_name):
+ binaries.append(abs_name[d_len:])
+ return sorted(binaries)
+
+ def gen_delete_package_dirs(self):
+ """delete folders of package tasks
+
+ This is a workaround for and issue with recipes having their sources
+ downloaded as file://
+ This likely breaks pseudo like:
+ path mismatch [3 links]: ino 79147802 db
+ .../build/tmp/.../cmake-example/1.0/package/usr/src/debug/
+ cmake-example/1.0-r0/oe-local-files/cpp-example-lib.cpp
+ .../build/workspace/sources/cmake-example/oe-local-files/cpp-example-lib.cpp
+ Since the files are anyway outdated lets deleted them (also from pseudo's db) to workaround this issue.
+ """
+ cmd_lines = ['#!/bin/sh']
+
+ # Set up the appropriate environment
+ newenv = dict(os.environ)
+ for varvalue in self.fakerootenv.split():
+ if '=' in varvalue:
+ splitval = varvalue.split('=', 1)
+ newenv[splitval[0]] = splitval[1]
+
+ # Replicate the environment variables from bitbake
+ for var, val in newenv.items():
+ if not RecipeModified.is_valid_shell_variable(var):
+ continue
+ cmd_lines.append('%s="%s"' % (var, val))
+ cmd_lines.append('export %s' % var)
+
+ # Delete the folders
+ pkg_dirs = ' '.join([os.path.join(self.workdir, d) for d in [
+ "package", "packages-split", "pkgdata", "sstate-install-package", "debugsources.list", "*.spec"]])
+ cmd = "%s rm -rf %s" % (self.fakerootcmd, pkg_dirs)
+ cmd_lines.append('%s || { "%s failed"; exit 1; }' % (cmd, cmd))
+
+ return self.write_script(cmd_lines, 'delete_package_dirs')
+
+ def gen_deploy_target_script(self, args):
+ """Generate a script which does what devtool deploy-target does
+
+ This script is much quicker than devtool target-deploy. Because it
+ does not need to start a bitbake server. All information from tinfoil
+ is hard-coded in the generated script.
+ """
+ cmd_lines = ['#!%s' % str(sys.executable)]
+ cmd_lines.append('import sys')
+ cmd_lines.append('devtool_sys_path = %s' % str(sys.path))
+ cmd_lines.append('devtool_sys_path.reverse()')
+ cmd_lines.append('for p in devtool_sys_path:')
+ cmd_lines.append(' if p not in sys.path:')
+ cmd_lines.append(' sys.path.insert(0, p)')
+ cmd_lines.append('from devtool.deploy import deploy_no_d')
+ args_filter = ['debug', 'dry_run', 'key', 'no_check_space', 'no_host_check',
+ 'no_preserve', 'port', 'show_status', 'ssh_exec', 'strip', 'target']
+ filtered_args_dict = {key: value for key, value in vars(
+ args).items() if key in args_filter}
+ cmd_lines.append('filtered_args_dict = %s' % str(filtered_args_dict))
+ cmd_lines.append('class Dict2Class(object):')
+ cmd_lines.append(' def __init__(self, my_dict):')
+ cmd_lines.append(' for key in my_dict:')
+ cmd_lines.append(' setattr(self, key, my_dict[key])')
+ cmd_lines.append('filtered_args = Dict2Class(filtered_args_dict)')
+ cmd_lines.append(
+ 'setattr(filtered_args, "recipename", "%s")' % self.bpn)
+ cmd_lines.append('deploy_no_d("%s", "%s", "%s", "%s", "%s", "%s", %d, "%s", "%s", filtered_args)' %
+ (self.d, self.workdir, self.path, self.strip_cmd,
+ self.libdir, self.base_libdir, self.max_process,
+ self.fakerootcmd, self.fakerootenv))
+ return self.write_script(cmd_lines, 'deploy_target')
+
+ def gen_install_deploy_script(self, args):
+ """Generate a script which does install and deploy"""
+ cmd_lines = ['#!/bin/bash']
+
+ cmd_lines.append(self.gen_delete_package_dirs())
+
+ # . oe-init-build-env $BUILDDIR
+ # Note: Sourcing scripts with arguments requires bash
+ cmd_lines.append('cd "%s" || { echo "cd %s failed"; exit 1; }' % (
+ self.oe_init_dir, self.oe_init_dir))
+ cmd_lines.append('. "%s" "%s" || { echo ". %s %s failed"; exit 1; }' % (
+ self.oe_init_build_env, self.topdir, self.oe_init_build_env, self.topdir))
+
+ # bitbake -c install
+ cmd_lines.append(
+ 'bitbake %s -c install --force || { echo "bitbake %s -c install --force failed"; exit 1; }' % (self.bpn, self.bpn))
+
+ # Self contained devtool deploy-target
+ cmd_lines.append(self.gen_deploy_target_script(args))
+
+ return self.write_script(cmd_lines, 'install_and_deploy')
+
+ def write_script(self, cmd_lines, script_name):
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ script_name_arch = script_name + '_' + self.recipe_id
+ script_file = os.path.join(self.ide_sdk_scripts_dir, script_name_arch)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ return script_file
+
+ @property
+ def oe_init_build_env(self):
+ """Find the oe-init-build-env used for this setup"""
+ oe_init_dir = self.oe_init_dir
+ if oe_init_dir:
+ return os.path.join(oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ return None
+
+ @property
+ def oe_init_dir(self):
+ """Find the directory where the oe-init-build-env is located
+
+ Assumption: There might be a layer with higher priority than poky
+ which provides to oe-init-build-env in the layer's toplevel folder.
+ """
+ if not self.__oe_init_dir:
+ for layer in reversed(self.bblayers):
+ result = subprocess.run(
+ ['git', 'rev-parse', '--show-toplevel'], cwd=layer, capture_output=True)
+ if result.returncode == 0:
+ oe_init_dir = result.stdout.decode('utf-8').strip()
+ oe_init_path = os.path.join(
+ oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ if os.path.exists(oe_init_path):
+ logger.debug("Using %s from: %s" % (
+ RecipeModified.OE_INIT_BUILD_ENV, oe_init_path))
+ self.__oe_init_dir = oe_init_dir
+ break
+ if not self.__oe_init_dir:
+ logger.error("Cannot find the bitbake top level folder")
+ return self.__oe_init_dir
+
+
+def ide_setup(args, config, basepath, workspace):
+ """Generate the IDE configuration for the workspace"""
+
+ # Explicitely passing some special recipes does not make sense
+ for recipe in args.recipenames:
+ if recipe in ['meta-ide-support', 'build-sysroots']:
+ raise DevtoolError("Invalid recipe: %s." % recipe)
+
+ # Collect information about tasks which need to be bitbaked
+ bootstrap_tasks = []
+ bootstrap_tasks_late = []
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ # define mode depending on recipes which need to be processed
+ recipes_image_names = []
+ recipes_modified_names = []
+ recipes_other_names = []
+ for recipe in args.recipenames:
+ try:
+ check_workspace_recipe(
+ workspace, recipe, bbclassextend=True)
+ recipes_modified_names.append(recipe)
+ except DevtoolError:
+ recipe_d = parse_recipe(
+ config, tinfoil, recipe, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing recipe %s failed" % recipe)
+ if bb.data.inherits_class('image', recipe_d):
+ recipes_image_names.append(recipe)
+ else:
+ recipes_other_names.append(recipe)
+
+ invalid_params = False
+ if args.mode == DevtoolIdeMode.shared:
+ if len(recipes_modified_names):
+ logger.error("In shared sysroots mode modified recipes %s cannot be handled." % str(
+ recipes_modified_names))
+ invalid_params = True
+ if args.mode == DevtoolIdeMode.modified:
+ if len(recipes_other_names):
+ logger.error("Only in shared sysroots mode not modified recipes %s can be handled." % str(
+ recipes_other_names))
+ invalid_params = True
+ if len(recipes_image_names) != 1:
+ logger.error(
+ "One image recipe is required as the rootfs for the remote development.")
+ invalid_params = True
+ for modified_recipe_name in recipes_modified_names:
+ if modified_recipe_name.startswith('nativesdk-') or modified_recipe_name.endswith('-native'):
+ logger.error(
+ "Only cross compiled recipes are support. %s is not cross." % modified_recipe_name)
+ invalid_params = True
+
+ if invalid_params:
+ raise DevtoolError("Invalid parameters are passed.")
+
+ # For the shared sysroots mode, add all dependencies of all the images to the sysroots
+ # For the modified mode provide one rootfs and the corresponding debug symbols via rootfs-dbg
+ recipes_images = []
+ for recipes_image_name in recipes_image_names:
+ logger.info("Using image: %s" % recipes_image_name)
+ recipe_image = RecipeImage(recipes_image_name)
+ recipe_image.initialize(config, tinfoil)
+ bootstrap_tasks += recipe_image.bootstrap_tasks
+ recipes_images.append(recipe_image)
+
+ # Provide a Direct SDK with shared sysroots
+ recipes_not_modified = []
+ if args.mode == DevtoolIdeMode.shared:
+ ide_support = RecipeMetaIdeSupport()
+ ide_support.initialize(config, tinfoil)
+ bootstrap_tasks += ide_support.bootstrap_tasks
+
+ logger.info("Adding %s to the Direct SDK sysroots." %
+ str(recipes_other_names))
+ for recipe_name in recipes_other_names:
+ recipe_not_modified = RecipeNotModified(recipe_name)
+ bootstrap_tasks += recipe_not_modified.bootstrap_tasks
+ recipes_not_modified.append(recipe_not_modified)
+
+ build_sysroots = RecipeBuildSysroots()
+ build_sysroots.initialize(config, tinfoil)
+ bootstrap_tasks_late += build_sysroots.bootstrap_tasks
+ shared_env = SharedSysrootsEnv()
+ shared_env.initialize(ide_support, build_sysroots)
+
+ recipes_modified = []
+ if args.mode == DevtoolIdeMode.modified:
+ logger.info("Setting up workspaces for modified recipe: %s" %
+ str(recipes_modified_names))
+ gdbs_cross = {}
+ for recipe_name in recipes_modified_names:
+ recipe_modified = RecipeModified(recipe_name)
+ recipe_modified.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += recipe_modified.bootstrap_tasks
+ recipes_modified.append(recipe_modified)
+
+ if recipe_modified.target_arch not in gdbs_cross:
+ target_device = TargetDevice(args)
+ gdb_cross = RecipeGdbCross(
+ args, recipe_modified.target_arch, target_device)
+ gdb_cross.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += gdb_cross.bootstrap_tasks
+ gdbs_cross[recipe_modified.target_arch] = gdb_cross
+ recipe_modified.gdb_cross = gdbs_cross[recipe_modified.target_arch]
+
+ finally:
+ tinfoil.shutdown()
+
+ if not args.skip_bitbake:
+ bb_cmd = 'bitbake '
+ if args.bitbake_k:
+ bb_cmd += "-k "
+ bb_cmd_early = bb_cmd + ' '.join(bootstrap_tasks)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_early, watch=True)
+ if bootstrap_tasks_late:
+ bb_cmd_late = bb_cmd + ' '.join(bootstrap_tasks_late)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_late, watch=True)
+
+ for recipe_image in recipes_images:
+ if (recipe_image.gdbserver_missing):
+ logger.warning(
+ "gdbserver not installed in image %s. Remote debugging will not be available" % recipe_image)
+
+ if recipe_image.combine_dbg_image is False:
+ logger.warning(
+ 'IMAGE_CLASSES += "image-combined-dbg" is missing for image %s. Remote debugging will not find debug symbols from rootfs-dbg.' % recipe_image)
+
+ # Instantiate the active IDE plugin
+ ide = ide_plugins[args.ide]()
+ if args.mode == DevtoolIdeMode.shared:
+ ide.setup_shared_sysroots(shared_env)
+ elif args.mode == DevtoolIdeMode.modified:
+ for recipe_modified in recipes_modified:
+ if recipe_modified.build_tool is BuildTool.CMAKE:
+ recipe_modified.cmake_preset()
+ if recipe_modified.build_tool is BuildTool.MESON:
+ recipe_modified.gen_meson_wrapper()
+ ide.setup_modified_recipe(
+ args, recipe_image, recipe_modified)
+ else:
+ raise DevtoolError("Must not end up here.")
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ global ide_plugins
+
+ # Search for IDE plugins in all sub-folders named ide_plugins where devtool seraches for plugins.
+ pluginpaths = [os.path.join(path, 'ide_plugins')
+ for path in context.pluginpaths]
+ ide_plugin_modules = []
+ for pluginpath in pluginpaths:
+ scriptutils.load_plugins(logger, ide_plugin_modules, pluginpath)
+
+ for ide_plugin_module in ide_plugin_modules:
+ if hasattr(ide_plugin_module, 'register_ide_plugin'):
+ ide_plugin_module.register_ide_plugin(ide_plugins)
+ # Sort plugins according to their priority. The first entry is the default IDE plugin.
+ ide_plugins = dict(sorted(ide_plugins.items(),
+ key=lambda p: p[1].ide_plugin_priority(), reverse=True))
+
+ parser_ide_sdk = subparsers.add_parser('ide-sdk', group='working', order=50, formatter_class=RawTextHelpFormatter,
+ help='Setup the SDK and configure the IDE')
+ parser_ide_sdk.add_argument(
+ 'recipenames', nargs='+', help='Generate an IDE configuration suitable to work on the given recipes.\n'
+ 'Depending on the --mode paramter different types of SDKs and IDE configurations are generated.')
+ parser_ide_sdk.add_argument(
+ '-m', '--mode', type=DevtoolIdeMode, default=DevtoolIdeMode.modified,
+ help='Different SDK types are supported:\n'
+ '- "' + DevtoolIdeMode.modified.name + '" (default):\n'
+ ' devtool modify creates a workspace to work on the source code of a recipe.\n'
+ ' devtool ide-sdk builds the SDK and generates the IDE configuration(s) in the workspace directorie(s)\n'
+ ' Usage example:\n'
+ ' devtool modify cmake-example\n'
+ ' devtool ide-sdk cmake-example core-image-minimal\n'
+ ' Start the IDE in the workspace folder\n'
+ ' At least one devtool modified recipe plus one image recipe are required:\n'
+ ' The image recipe is used to generate the target image and the remote debug configuration.\n'
+ '- "' + DevtoolIdeMode.shared.name + '":\n'
+ ' Usage example:\n'
+ ' devtool ide-sdk -m ' + DevtoolIdeMode.shared.name + ' recipe(s)\n'
+ ' This command generates a cross-toolchain as well as the corresponding shared sysroot directories.\n'
+ ' To use this tool-chain the environment-* file found in the deploy..image folder needs to be sourced into a shell.\n'
+ ' In case of VSCode and cmake the tool-chain is also exposed as a cmake-kit')
+ default_ide = list(ide_plugins.keys())[0]
+ parser_ide_sdk.add_argument(
+ '-i', '--ide', choices=ide_plugins.keys(), default=default_ide,
+ help='Setup the configuration for this IDE (default: %s)' % default_ide)
+ parser_ide_sdk.add_argument(
+ '-t', '--target', default='root@192.168.7.2',
+ help='Live target machine running an ssh server: user@hostname.')
+ parser_ide_sdk.add_argument(
+ '-G', '--gdbserver-port-start', default="1234", help='port where gdbserver is listening.')
+ parser_ide_sdk.add_argument(
+ '-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-e', '--ssh-exec', help='Executable to use in place of ssh')
+ parser_ide_sdk.add_argument(
+ '-P', '--port', help='Specify ssh port to use for connection to the target')
+ parser_ide_sdk.add_argument(
+ '-I', '--key', help='Specify ssh private key for connection to the target')
+ parser_ide_sdk.add_argument(
+ '--skip-bitbake', help='Generate IDE configuration but skip calling bibtake to update the SDK.', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-k', '--bitbake-k', help='Pass -k parameter to bitbake', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
+ parser_ide_sdk.add_argument(
+ '-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--debug-build-config', help='Use debug build flags, for example set CMAKE_BUILD_TYPE=Debug', action='store_true')
+ parser_ide_sdk.set_defaults(func=ide_setup)
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
index 95384c5333..18daef30c3 100644
--- a/scripts/lib/devtool/menuconfig.py
+++ b/scripts/lib/devtool/menuconfig.py
@@ -3,6 +3,8 @@
# Copyright (C) 2018 Xilinx
# Written by: Chandana Kalluri <ckalluri@xilinx.com>
#
+# SPDX-License-Identifier: MIT
+#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
@@ -43,7 +45,7 @@ def menuconfig(args, config, basepath, workspace):
return 1
check_workspace_recipe(workspace, args.component)
- pn = rd.getVar('PN', True)
+ pn = rd.getVar('PN')
if not rd.getVarFlag('do_menuconfig','task'):
raise DevtoolError("This recipe does not support menuconfig option")
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
index 3aa42a1466..9aefd7e354 100644
--- a/scripts/lib/devtool/sdk.py
+++ b/scripts/lib/devtool/sdk.py
@@ -207,7 +207,7 @@ def sdk_update(args, config, basepath, workspace):
if not sstate_mirrors:
with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
- f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ f.write('SSTATE_MIRRORS:append = " file://.* %s/sstate-cache/PATH"\n' % updateserver)
finally:
shutil.rmtree(tmpsdk_dir)
@@ -300,7 +300,8 @@ def sdk_install(args, config, basepath, workspace):
return 2
try:
- exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_native_sysroot', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_target_sysroot', watch=True)
except bb.process.ExecutionError as e:
raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
diff --git a/scripts/lib/devtool/search.py b/scripts/lib/devtool/search.py
index d24040df37..70b81cac5e 100644
--- a/scripts/lib/devtool/search.py
+++ b/scripts/lib/devtool/search.py
@@ -62,10 +62,11 @@ def search(args, config, basepath, workspace):
with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
for line in f:
if ': ' in line:
- splitline = line.split(':', 1)
+ splitline = line.split(': ', 1)
key = splitline[0]
value = splitline[1].strip()
- if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ key = key.replace(":" + pkg, "")
+ if key in ['PKG', 'DESCRIPTION', 'FILES_INFO', 'FILERPROVIDES']:
if keyword_rc.search(value):
match = True
break
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index 261d642d4a..6674e67267 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -147,6 +147,8 @@ def add(args, config, basepath, workspace):
extracmdopts += ' -a'
if args.npm_dev:
extracmdopts += ' --npm-dev'
+ if args.no_pypi:
+ extracmdopts += ' --no-pypi'
if args.mirrors:
extracmdopts += ' --mirrors'
if args.srcrev:
@@ -234,10 +236,14 @@ def add(args, config, basepath, workspace):
if args.fetchuri and not args.no_git:
setup_git_repo(srctree, args.version, 'devtool', d=tinfoil.config_data)
- initial_rev = None
+ initial_rev = {}
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_rev["."] = stdout.rstrip()
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse HEAD` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule) = line.split()
+ initial_rev[os.path.relpath(submodule, srctree)] = rev
if args.src_subdir:
srctree = os.path.join(srctree, args.src_subdir)
@@ -251,16 +257,17 @@ def add(args, config, basepath, workspace):
if b_is_s:
f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
+ for key, value in initial_rev.items():
+ f.write('\n# initial_rev %s: %s\n' % (key, value))
if args.binary:
- f.write('do_install_append() {\n')
+ f.write('do_install:append() {\n')
f.write(' rm -rf ${D}/.git\n')
f.write(' rm -f ${D}/singletask.lock\n')
f.write('}\n')
if bb.data.inherits_class('npm', rd):
- f.write('python do_configure_append() {\n')
+ f.write('python do_configure:append() {\n')
f.write(' pkgdir = d.getVar("NPM_PACKAGE")\n')
f.write(' lockfile = os.path.join(pkgdir, "singletask.lock")\n')
f.write(' bb.utils.remove(lockfile)\n')
@@ -318,10 +325,6 @@ def _check_compatible_recipe(pn, d):
raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
"not supported by this tool" % pn, 4)
- if bb.data.inherits_class('meta', d):
- raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
- "not supported by this tool" % pn, 4)
-
if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
# Not an incompatibility error per se, so we don't pass the error code
raise DevtoolError("externalsrc is currently enabled for the %s "
@@ -357,7 +360,7 @@ def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
-def _copy_file(src, dst, dry_run_outdir=None):
+def _copy_file(src, dst, dry_run_outdir=None, base_outdir=None):
"""Copy a file. Creates all the directory components of destination path."""
dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
@@ -457,7 +460,7 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
-def symlink_oelocal_files_srctree(rd,srctree):
+def symlink_oelocal_files_srctree(rd, srctree):
import oe.patch
if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
# If recipe extracts to ${WORKDIR}, symlink the files into the srctree
@@ -474,14 +477,14 @@ def symlink_oelocal_files_srctree(rd,srctree):
destpth = os.path.join(srctree, relpth, fn)
if os.path.exists(destpth):
os.unlink(destpth)
- os.symlink('oe-local-files/%s' % fn, destpth)
+ if relpth != '.':
+ back_relpth = os.path.relpath(local_files_dir, root)
+ os.symlink('%s/oe-local-files/%s/%s' % (back_relpth, relpth, fn), destpth)
+ else:
+ os.symlink('oe-local-files/%s' % fn, destpth)
addfiles.append(os.path.join(relpth, fn))
if addfiles:
- bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=rd)
- bb.process.run('git %s commit -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
-
+ oe.patch.GitApplyTree.commitIgnored("Add local file symlinks", dir=srctree, files=addfiles, d=rd)
def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
@@ -519,8 +522,10 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
history = d.varhistory.variable('SRC_URI')
for event in history:
if not 'flag' in event:
- if event['op'].startswith(('_append[', '_prepend[')):
- extra_overrides.append(event['op'].split('[')[1].split(']')[0])
+ if event['op'].startswith((':append[', ':prepend[')):
+ override = event['op'].split('[')[1].split(']')[0]
+ if not override.startswith('pn-'):
+ extra_overrides.append(override)
# We want to remove duplicate overrides. If a recipe had multiple
# SRC_URI_override += values it would cause mulitple instances of
# overrides. This doesn't play nicely with things like creating a
@@ -565,6 +570,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
logger.debug('writing append file %s' % appendfile)
with open(appendfile, 'a') as f:
f.write('###--- _extract_source\n')
+ f.write('deltask do_recipe_qa\n')
+ f.write('deltask do_recipe_qa_setscene\n')
+ f.write('ERROR_QA:remove = "patch-fuzz"\n')
f.write('DEVTOOL_TEMPDIR = "%s"\n' % tempdir)
f.write('DEVTOOL_DEVBRANCH = "%s"\n' % devbranch)
if not is_kernel_yocto:
@@ -582,6 +590,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
preservestampfile = os.path.join(sstate_manifests, 'preserve-stamps')
with open(preservestampfile, 'w') as f:
f.write(d.getVar('STAMP'))
+ tinfoil.modified_files()
try:
if is_kernel_yocto:
# We need to generate the kernel config
@@ -589,6 +598,16 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
else:
task = 'do_patch'
+ if 'noexec' in (d.getVarFlags(task, False) or []) or 'task' not in (d.getVarFlags(task, False) or []):
+ logger.info('The %s recipe has %s disabled. Running only '
+ 'do_configure task dependencies' % (pn, task))
+
+ if 'depends' in d.getVarFlags('do_configure', False):
+ pn = d.getVarFlags('do_configure', False)['depends']
+ pn = pn.replace('${PV}', d.getVar('PV'))
+ pn = pn.replace('${COMPILERDEP}', d.getVar('COMPILERDEP'))
+ task = None
+
# Run the fetch + unpack tasks
res = tinfoil.build_targets(pn,
task,
@@ -600,6 +619,17 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if not res:
raise DevtoolError('Extracting source for %s failed' % pn)
+ if not is_kernel_yocto and ('noexec' in (d.getVarFlags('do_patch', False) or []) or 'task' not in (d.getVarFlags('do_patch', False) or [])):
+ workshareddir = d.getVar('S')
+ if os.path.islink(srctree):
+ os.unlink(srctree)
+
+ os.symlink(workshareddir, srctree)
+
+ # The initial_rev file is created in devtool_post_unpack function that will not be executed if
+ # do_unpack/do_patch tasks are disabled so we have to directly say that source extraction was successful
+ return True, True
+
try:
with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
initial_rev = f.read()
@@ -623,9 +653,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
shutil.rmtree(workshareddir)
- oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
elif not os.path.exists(workshareddir):
- oe.path.copyhardlinktree(srcsubdir,workshareddir)
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
@@ -633,13 +663,13 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if sync:
bb.process.run('git fetch file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
- # Move oe-local-files directory to srctree
- # As the oe-local-files is not part of the constructed git tree,
- # remove them directly during the synchrounizating might surprise
- # the users. Instead, we move it to oe-local-files.bak and remind
- # user in the log message.
+ # Move the oe-local-files directory to srctree.
+ # As oe-local-files is not part of the constructed git tree,
+ # removing it directly during the synchronization might surprise
+ # the user. Instead, we move it to oe-local-files.bak and remind
+ # the user in the log message.
if os.path.exists(srctree_localdir + '.bak'):
- shutil.rmtree(srctree_localdir, srctree_localdir + '.bak')
+ shutil.rmtree(srctree_localdir + '.bak')
if os.path.exists(srctree_localdir):
logger.info('Backing up current local file directory %s' % srctree_localdir)
@@ -655,7 +685,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
- symlink_oelocal_files_srctree(d,srctree)
+ symlink_oelocal_files_srctree(d, srctree)
if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
@@ -721,14 +751,14 @@ def _check_preserve(config, recipename):
os.remove(removefile)
else:
tf.write(line)
- os.rename(newfile, origfile)
+ bb.utils.rename(newfile, origfile)
def get_staging_kver(srcdir):
# Kernel version from work-shared
kerver = []
staging_kerVer=""
if os.path.exists(srcdir) and os.listdir(srcdir):
- with open(os.path.join(srcdir,"Makefile")) as f:
+ with open(os.path.join(srcdir, "Makefile")) as f:
version = [next(f) for x in range(5)][1:4]
for word in version:
kerver.append(word.split('= ')[1].split('\n')[0])
@@ -738,10 +768,20 @@ def get_staging_kver(srcdir):
def get_staging_kbranch(srcdir):
staging_kbranch = ""
if os.path.exists(srcdir) and os.listdir(srcdir):
- (branch, _) = bb.process.run('git branch | grep \* | cut -d \' \' -f2', cwd=srcdir)
+ (branch, _) = bb.process.run('git branch | grep \\* | cut -d \' \' -f2', cwd=srcdir)
staging_kbranch = "".join(branch.split('\n')[0])
return staging_kbranch
+def get_real_srctree(srctree, s, workdir):
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(s)
+ workdir = os.path.abspath(workdir)
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+ return srctree
+
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
@@ -786,8 +826,8 @@ def modify(args, config, basepath, workspace):
_check_compatible_recipe(pn, rd)
- initial_rev = None
- commits = []
+ initial_revs = {}
+ commits = {}
check_commits = False
if bb.data.inherits_class('kernel-yocto', rd):
@@ -799,10 +839,10 @@ def modify(args, config, basepath, workspace):
staging_kerVer = get_staging_kver(srcdir)
staging_kbranch = get_staging_kbranch(srcdir)
if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
- oe.path.copyhardlinktree(srcdir,srctree)
+ oe.path.copyhardlinktree(srcdir, srctree)
workdir = rd.getVar('WORKDIR')
srcsubdir = rd.getVar('S')
- localfilesdir = os.path.join(srctree,'oe-local-files')
+ localfilesdir = os.path.join(srctree, 'oe-local-files')
# Move local source files into separate subdir
recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
local_files = oe.recipeutils.get_recipe_local_files(rd)
@@ -826,9 +866,9 @@ def modify(args, config, basepath, workspace):
for fname in local_files:
_move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
- f.write('# Ignore local files, by default. Remove this file ''if you want to commit the directory to Git\n*\n')
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n*\n')
- symlink_oelocal_files_srctree(rd,srctree)
+ symlink_oelocal_files_srctree(rd, srctree)
task = 'do_configure'
res = tinfoil.build_targets(pn, task, handle_events=True)
@@ -836,21 +876,30 @@ def modify(args, config, basepath, workspace):
# Copy .config to workspace
kconfpath = rd.getVar('B')
logger.info('Copying kernel config to workspace')
- shutil.copy2(os.path.join(kconfpath, '.config'),srctree)
+ shutil.copy2(os.path.join(kconfpath, '.config'), srctree)
# Set this to true, we still need to get initial_rev
# by parsing the git repo
args.no_extract = True
if not args.no_extract:
- initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- if not initial_rev:
+ initial_revs["."], _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ if not initial_revs["."]:
return 1
logger.info('Source tree extracted to %s' % srctree)
- # Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
- check_commits = True
+
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_revs["."], cwd=srctree)
+ commits["."] = stdout.split()
+ check_commits = True
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse devtool-base` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule_path) = line.split()
+ submodule = os.path.relpath(submodule_path, srctree)
+ initial_revs[submodule] = rev
+ (stdout, _) = bb.process.run('git rev-list --reverse devtool-base..HEAD', cwd=submodule_path)
+ commits[submodule] = stdout.split()
else:
if os.path.exists(os.path.join(srctree, '.git')):
# Check if it's a tree previously extracted by us. This is done
@@ -867,11 +916,11 @@ def modify(args, config, basepath, workspace):
for line in stdout.splitlines():
if line.startswith('*'):
(stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
- initial_rev = stdout.rstrip()
- if not initial_rev:
+ initial_revs["."] = stdout.rstrip()
+ if "." not in initial_revs:
# Otherwise, just grab the head revision
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_revs["."] = stdout.rstrip()
branch_patches = {}
if check_commits:
@@ -888,62 +937,81 @@ def modify(args, config, basepath, workspace):
seen_patches = []
for branch in branches:
branch_patches[branch] = []
- (stdout, _) = bb.process.run('git log devtool-base..%s' % branch, cwd=srctree)
- for line in stdout.splitlines():
- line = line.strip()
- if line.startswith(oe.patch.GitApplyTree.patch_line_prefix):
- origpatch = line[len(oe.patch.GitApplyTree.patch_line_prefix):].split(':', 1)[-1].strip()
- if not origpatch in seen_patches:
- seen_patches.append(origpatch)
- branch_patches[branch].append(origpatch)
+ (stdout, _) = bb.process.run('git rev-list devtool-base..%s' % branch, cwd=srctree)
+ for sha1 in stdout.splitlines():
+ notes = oe.patch.GitApplyTree.getNotes(srctree, sha1.strip())
+ origpatch = notes.get(oe.patch.GitApplyTree.original_patch)
+ if origpatch and origpatch not in seen_patches:
+ seen_patches.append(origpatch)
+ branch_patches[branch].append(origpatch)
# Need to grab this here in case the source is within a subdirectory
srctreebase = srctree
-
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree = os.path.join(srctree, srcsubdir)
+ srctree = get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ # if not present, add type=git-dependency to the secondary sources
+ # (non local files) so they can be extracted correctly when building a recipe after
+ # doing a devtool modify on it
+ src_uri = rd.getVar('SRC_URI').split()
+ src_uri_append = []
+ src_uri_remove = []
+
+ # Assume first entry is main source extracted in ${S} so skip it
+ src_uri = src_uri[1::]
+
+ # Add "type=git-dependency" to all non local sources
+ for url in src_uri:
+ if not url.startswith('file://') and not 'type=' in url:
+ src_uri_remove.append(url)
+ src_uri_append.append('%s;type=git-dependency' % url)
+
+ if src_uri_remove:
+ f.write('SRC_URI:remove = "%s"\n' % ' '.join(src_uri_remove))
+ f.write('SRC_URI:append = " %s"\n\n' % ' '.join(src_uri_append))
+
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n')
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
- f.write('FILESPATH_prepend := "%s:"\n' %
+ f.write('FILESPATH:prepend := "%s:"\n' %
os.path.join(srctreebase, 'oe-local-files'))
f.write('# srctreebase: %s\n' % srctreebase)
f.write('\ninherit externalsrc\n')
f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
if bb.data.inherits_class('kernel', rd):
f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
+ 'do_fetch do_unpack do_kernel_configcheck"\n')
f.write('\ndo_patch[noexec] = "1"\n')
- f.write('\ndo_configure_append() {\n'
+ f.write('\ndo_configure:append() {\n'
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
- if rd.getVarFlag('do_menuconfig','task'):
- f.write('\ndo_configure_append() {\n'
- ' if [ ! ${DEVTOOL_DISABLE_MENUCONFIG} ]; then\n'
- ' cp ${B}/.config ${S}/.config.baseline\n'
- ' ln -sfT ${B}/.config ${S}/.config.new\n'
+ f.write('\ndo_kernel_configme:prepend() {\n'
+ ' if [ -e ${S}/.config ]; then\n'
+ ' mv ${S}/.config ${S}/.config.old\n'
+ ' fi\n'
+ '}\n')
+ if rd.getVarFlag('do_menuconfig', 'task'):
+ f.write('\ndo_configure:append() {\n'
+ ' if [ ${@oe.types.boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG"))} = True ]; then\n'
+ ' cp ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.new\n'
' fi\n'
'}\n')
- if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
- for commit in commits:
- f.write('# commit: %s\n' % commit)
+ if initial_revs:
+ for name, rev in initial_revs.items():
+ f.write('\n# initial_rev %s: %s\n' % (name, rev))
+ if name in commits:
+ for commit in commits[name]:
+ f.write('# commit %s: %s\n' % (name, commit))
if branch_patches:
for branch in branch_patches:
if branch == args.branch:
@@ -1063,10 +1131,10 @@ def rename(args, config, basepath, workspace):
# Rename bbappend
logger.info('Renaming %s to %s' % (append, newappend))
- os.rename(append, newappend)
+ bb.utils.rename(append, newappend)
# Rename recipe file
logger.info('Renaming %s to %s' % (recipefile, newfile))
- os.rename(recipefile, newfile)
+ bb.utils.rename(recipefile, newfile)
# Rename source tree if it's the default path
appendmd5 = None
@@ -1166,44 +1234,56 @@ def _get_patchset_revs(srctree, recipe_path, initial_rev=None, force_patch_refre
branchname = stdout.rstrip()
# Parse initial rev from recipe if not specified
- commits = []
+ commits = {}
patches = []
+ initial_revs = {}
with open(recipe_path, 'r') as f:
for line in f:
- if line.startswith('# initial_rev:'):
- if not initial_rev:
- initial_rev = line.split(':')[-1].strip()
- elif line.startswith('# commit:') and not force_patch_refresh:
- commits.append(line.split(':')[-1].strip())
- elif line.startswith('# patches_%s:' % branchname):
- patches = line.split(':')[-1].strip().split(',')
-
- update_rev = initial_rev
- changed_revs = None
- if initial_rev:
+ pattern = r'^#\s.*\s(.*):\s([0-9a-fA-F]+)$'
+ match = re.search(pattern, line)
+ if match:
+ name = match.group(1)
+ rev = match.group(2)
+ if line.startswith('# initial_rev'):
+ if not (name == "." and initial_rev):
+ initial_revs[name] = rev
+ elif line.startswith('# commit') and not force_patch_refresh:
+ if name not in commits:
+ commits[name] = [rev]
+ else:
+ commits[name].append(rev)
+ elif line.startswith('# patches_%s:' % branchname):
+ patches = line.split(':')[-1].strip().split(',')
+
+ update_revs = dict(initial_revs)
+ changed_revs = {}
+ for name, rev in initial_revs.items():
# Find first actually changed revision
stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
- initial_rev, cwd=srctree)
+ rev, cwd=os.path.join(srctree, name))
newcommits = stdout.split()
- for i in range(min(len(commits), len(newcommits))):
- if newcommits[i] == commits[i]:
- update_rev = commits[i]
+ if name in commits:
+ for i in range(min(len(commits[name]), len(newcommits))):
+ if newcommits[i] == commits[name][i]:
+ update_revs[name] = commits[name][i]
try:
stdout, _ = bb.process.run('git cherry devtool-patched',
- cwd=srctree)
+ cwd=os.path.join(srctree, name))
except bb.process.ExecutionError as err:
stdout = None
if stdout is not None and not force_patch_refresh:
- changed_revs = []
for line in stdout.splitlines():
if line.startswith('+ '):
rev = line.split()[1]
if rev in newcommits:
- changed_revs.append(rev)
+ if name not in changed_revs:
+ changed_revs[name] = [rev]
+ else:
+ changed_revs[name].append(rev)
- return initial_rev, update_rev, changed_revs, patches
+ return initial_revs, update_revs, changed_revs, patches
def _remove_file_entries(srcuri, filelist):
"""Remove file:// entries from SRC_URI"""
@@ -1258,14 +1338,17 @@ def _remove_source_files(append, files, destpath, no_report_remove=False, dry_ru
raise
-def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
+def _export_patches(srctree, rd, start_revs, destdir, changed_revs=None):
"""Export patches from srctree to given location.
Returns three-tuple of dicts:
1. updated - patches that already exist in SRCURI
2. added - new patches that don't exist in SRCURI
3 removed - patches that exist in SRCURI but not in exported patches
- In each dict the key is the 'basepath' of the URI and value is the
- absolute path to the existing file in recipe space (if any).
+ In each dict the key is the 'basepath' of the URI and value is:
+ - for updated and added dicts, a dict with 2 optionnal keys:
+ - 'path': the absolute path to the existing file in recipe space (if any)
+ - 'patchdir': the directory in wich the patch should be applied (if any)
+ - for removed dict, the absolute path to the existing file in recipe space
"""
import oe.recipeutils
from oe.patch import GitApplyTree
@@ -1279,54 +1362,60 @@ def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
# Generate patches from Git, exclude local files directory
patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
- GitApplyTree.extractPatches(srctree, start_rev, destdir, patch_pathspec)
-
- new_patches = sorted(os.listdir(destdir))
- for new_patch in new_patches:
- # Strip numbering from patch names. If it's a git sequence named patch,
- # the numbers might not match up since we are starting from a different
- # revision This does assume that people are using unique shortlog
- # values, but they ought to be anyway...
- new_basename = seqpatch_re.match(new_patch).group(2)
- match_name = None
- for old_patch in existing_patches:
- old_basename = seqpatch_re.match(old_patch).group(2)
- old_basename_splitext = os.path.splitext(old_basename)
- if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
- old_patch_noext = os.path.splitext(old_patch)[0]
- match_name = old_patch_noext
- break
- elif new_basename == old_basename:
- match_name = old_patch
- break
- if match_name:
- # Rename patch files
- if new_patch != match_name:
- os.rename(os.path.join(destdir, new_patch),
- os.path.join(destdir, match_name))
- # Need to pop it off the list now before checking changed_revs
- oldpath = existing_patches.pop(old_patch)
- if changed_revs is not None:
- # Avoid updating patches that have not actually changed
- with open(os.path.join(destdir, match_name), 'r') as f:
- firstlineitems = f.readline().split()
- # Looking for "From <hash>" line
- if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
- if not firstlineitems[1] in changed_revs:
- continue
- # Recompress if necessary
- if oldpath.endswith(('.gz', '.Z')):
- bb.process.run(['gzip', match_name], cwd=destdir)
- if oldpath.endswith('.gz'):
- match_name += '.gz'
- else:
- match_name += '.Z'
- elif oldpath.endswith('.bz2'):
- bb.process.run(['bzip2', match_name], cwd=destdir)
- match_name += '.bz2'
- updated[match_name] = oldpath
- else:
- added[new_patch] = None
+ GitApplyTree.extractPatches(srctree, start_revs, destdir, patch_pathspec)
+ for dirpath, dirnames, filenames in os.walk(destdir):
+ new_patches = filenames
+ reldirpath = os.path.relpath(dirpath, destdir)
+ for new_patch in new_patches:
+ # Strip numbering from patch names. If it's a git sequence named patch,
+ # the numbers might not match up since we are starting from a different
+ # revision This does assume that people are using unique shortlog
+ # values, but they ought to be anyway...
+ new_basename = seqpatch_re.match(new_patch).group(2)
+ match_name = None
+ for old_patch in existing_patches:
+ old_basename = seqpatch_re.match(old_patch).group(2)
+ old_basename_splitext = os.path.splitext(old_basename)
+ if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+ old_patch_noext = os.path.splitext(old_patch)[0]
+ match_name = old_patch_noext
+ break
+ elif new_basename == old_basename:
+ match_name = old_patch
+ break
+ if match_name:
+ # Rename patch files
+ if new_patch != match_name:
+ bb.utils.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, match_name))
+ # Need to pop it off the list now before checking changed_revs
+ oldpath = existing_patches.pop(old_patch)
+ if changed_revs is not None and dirpath in changed_revs:
+ # Avoid updating patches that have not actually changed
+ with open(os.path.join(dirpath, match_name), 'r') as f:
+ firstlineitems = f.readline().split()
+ # Looking for "From <hash>" line
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs[dirpath]:
+ continue
+ # Recompress if necessary
+ if oldpath.endswith(('.gz', '.Z')):
+ bb.process.run(['gzip', match_name], cwd=destdir)
+ if oldpath.endswith('.gz'):
+ match_name += '.gz'
+ else:
+ match_name += '.Z'
+ elif oldpath.endswith('.bz2'):
+ bb.process.run(['bzip2', match_name], cwd=destdir)
+ match_name += '.bz2'
+ updated[match_name] = {'path' : oldpath}
+ if reldirpath != ".":
+ updated[match_name]['patchdir'] = reldirpath
+ else:
+ added[new_patch] = {}
+ if reldirpath != ".":
+ added[new_patch]['patchdir'] = reldirpath
+
return (updated, added, existing_patches)
@@ -1377,6 +1466,18 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
updated = OrderedDict()
added = OrderedDict()
removed = OrderedDict()
+
+ # Get current branch and return early with empty lists
+ # if on one of the override branches
+ # (local files are provided only for the main branch and processing
+ # them against lists from recipe overrides will result in mismatches
+ # and broken modifications to recipes).
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+ if branchname.startswith(override_branch_prefix):
+ return (updated, added, removed)
+
local_files_dir = os.path.join(srctreebase, 'oe-local-files')
git_files = _git_ls_tree(srctree)
if 'oe-local-files' in git_files:
@@ -1482,6 +1583,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
recipedir = os.path.basename(recipefile)
logger.info('Updating SRCREV in recipe %s%s' % (recipedir, dry_run_suffix))
+ # Get original SRCREV
+ old_srcrev = rd.getVar('SRCREV') or ''
+ if old_srcrev == "INVALID":
+ raise DevtoolError('Update mode srcrev is only valid for recipe fetched from an SCM repository')
+ old_srcrev = {'.': old_srcrev}
+
# Get HEAD revision
try:
stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
@@ -1508,13 +1615,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
if not no_remove:
# Find list of existing patches in recipe file
patches_dir = tempfile.mkdtemp(dir=tempdir)
- old_srcrev = rd.getVar('SRCREV') or ''
upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
patches_dir)
logger.debug('Patches: update %s, new %s, delete %s' % (dict(upd_p), dict(new_p), dict(del_p)))
# Remove deleted local files and "overlapping" patches
- remove_files = list(del_f.values()) + list(upd_p.values()) + list(del_p.values())
+ remove_files = list(del_f.values()) + [value["path"] for value in upd_p.values() if "path" in value] + [value["path"] for value in del_p.values() if "path" in value]
if remove_files:
removedentries = _remove_file_entries(srcuri, remove_files)[0]
update_srcuri = True
@@ -1528,11 +1634,10 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
patchfields['SRC_URI'] = '\\\n '.join(srcuri)
if dry_run_outdir:
logger.info('Creating bbappend (dry-run)')
- else:
- appendfile, destpath = oe.recipeutils.bbappend_recipe(
- rd, appendlayerdir, files, wildcardver=wildcard_version,
- extralines=patchfields, removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
+ extralines=patchfields, removevalues=removevalues,
+ redirect_output=dry_run_outdir)
else:
files_dir = _determine_files_dir(rd)
for basepath, path in upd_f.items():
@@ -1577,9 +1682,22 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
if not os.path.exists(append):
raise DevtoolError('unable to find workspace bbappend for recipe %s' %
recipename)
+ srctreebase = workspace[recipename]['srctreebase']
+ relpatchdir = os.path.relpath(srctreebase, srctree)
+ if relpatchdir == '.':
+ patchdir_params = {}
+ else:
+ patchdir_params = {'patchdir': relpatchdir}
- initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
- if not initial_rev:
+ def srcuri_entry(basepath, patchdir_params):
+ if patchdir_params:
+ paramstr = ';' + ';'.join('%s=%s' % (k,v) for k,v in patchdir_params.items())
+ else:
+ paramstr = ''
+ return 'file://%s%s' % (basepath, paramstr)
+
+ initial_revs, update_revs, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
+ if not initial_revs:
raise DevtoolError('Unable to find initial revision - please specify '
'it with --initial-rev')
@@ -1593,55 +1711,62 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- if filter_patches:
- upd_f = {}
- new_f = {}
- del_f = {}
- else:
- srctreebase = workspace[recipename]['srctreebase']
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
-
- remove_files = []
- if not no_remove:
- # Get all patches from source tree and check if any should be removed
- all_patches_dir = tempfile.mkdtemp(dir=tempdir)
- _, _, del_p = _export_patches(srctree, rd, initial_rev,
- all_patches_dir)
- # Remove deleted local files and patches
- remove_files = list(del_f.values()) + list(del_p.values())
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
- upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
+ upd_p, new_p, _ = _export_patches(srctree, rd, update_revs,
patches_dir, changed_revs)
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ _, _, del_p = _export_patches(srctree, rd, initial_revs,
+ all_patches_dir)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
new_p = OrderedDict()
upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
- remove_files = [f for f in remove_files if f in filter_patches]
+ del_p = OrderedDict((k,v) for k,v in del_p.items() if k in filter_patches)
+ remove_files = []
+ if not no_remove:
+ # Remove deleted local files and patches
+ remove_files = list(del_f.values()) + list(del_p.values())
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
+
if appendlayerdir:
files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
+
+ params = []
+ for file, param in files.items():
+ patchdir_param = dict(patchdir_params)
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ params.append(patchdir_param)
+
if files or remove_files:
removevalues = None
if remove_files:
removedentries, remaining = _remove_file_entries(
srcuri, remove_files)
if removedentries or remaining:
- remaining = ['file://' + os.path.basename(item) for
+ remaining = [srcuri_entry(os.path.basename(item), patchdir_params) for
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
appendfile, destpath = oe.recipeutils.bbappend_recipe(
rd, appendlayerdir, files,
wildcardver=wildcard_version,
removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ redirect_output=dry_run_outdir,
+ params=params)
else:
logger.info('No patches or local source files needed updating')
else:
@@ -1658,14 +1783,22 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
_move_file(os.path.join(local_files_dir, basepath), path,
dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
- for basepath, path in upd_p.items():
- patchfn = os.path.join(patches_dir, basepath)
+ for basepath, param in upd_p.items():
+ path = param['path']
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ patchdir_param = dict(patchdir_params)
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ patchfn = os.path.join(patches_dir, patchdir, basepath)
if os.path.dirname(path) + '/' == dl_dir:
# This is a a downloaded patch file - we now need to
# replace the entry in SRC_URI with our local version
logger.info('Replacing remote patch %s with updated local version' % basepath)
path = os.path.join(files_dir, basepath)
- _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+ _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath, patchdir_param))
updaterecipe = True
else:
logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
@@ -1679,15 +1812,23 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ srcuri.append(srcuri_entry(basepath, patchdir_params))
updaterecipe = True
- for basepath, path in new_p.items():
+ for basepath, param in new_p.items():
+ patchdir = param.get('patchdir', ".")
logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
- _move_file(os.path.join(patches_dir, basepath),
+ _move_file(os.path.join(patches_dir, patchdir, basepath),
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ params = dict(patchdir_params)
+ if patchdir != "." :
+ if params:
+ params['patchdir'] += patchdir
+ else:
+ params['patchdir'] = patchdir
+
+ srcuri.append(srcuri_entry(basepath, params))
updaterecipe = True
# Update recipe, if needed
if _remove_file_entries(srcuri, remove_files)[0]:
@@ -1744,6 +1885,8 @@ def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_ver
for line in stdout.splitlines():
branchname = line[2:]
if line.startswith('* '):
+ if 'HEAD' in line:
+ raise DevtoolError('Detached HEAD - please check out a branch, e.g., "devtool"')
startbranch = branchname
if branchname.startswith(override_branch_prefix):
override_branches.append(branchname)
@@ -1933,9 +2076,19 @@ def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
shutil.rmtree(srctreebase)
else:
# We don't want to risk wiping out any work in progress
- logger.info('Leaving source tree %s as-is; if you no '
- 'longer need it then please delete it manually'
- % srctreebase)
+ if srctreebase.startswith(os.path.join(config.workspace_path, 'sources')):
+ from datetime import datetime
+ preservesrc = os.path.join(config.workspace_path, 'attic', 'sources', "{}.{}".format(pn, datetime.now().strftime("%Y%m%d%H%M%S")))
+ logger.info('Preserving source tree in %s\nIf you no '
+ 'longer need it then please delete it manually.\n'
+ 'It is also possible to reuse it via devtool source tree argument.'
+ % preservesrc)
+ bb.utils.mkdirhier(os.path.dirname(preservesrc))
+ shutil.move(srctreebase, preservesrc)
+ else:
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
else:
# This is unlikely, but if it's empty we can just remove it
os.rmdir(srctreebase)
@@ -2195,6 +2348,7 @@ def register_commands(subparsers, context):
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
parser_add.add_argument('--npm-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--no-pypi', help='Do not inherit pypi class', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
group = parser_add.add_mutually_exclusive_group()
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index 0c1de8cdc7..fa5b8ef3c7 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -35,6 +35,8 @@ def _get_srctree(tmpdir):
dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
+ else:
+ raise DevtoolError("Cannot determine where the source tree is after unpacking in {}: {}".format(tmpdir,dirs))
return srctree
def _copy_source_code(orig, dest):
@@ -71,7 +73,8 @@ def _rename_recipe_dirs(oldpv, newpv, path):
if oldfile.find(oldpv) != -1:
newfile = oldfile.replace(oldpv, newpv)
if oldfile != newfile:
- os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
+ bb.utils.rename(os.path.join(path, oldfile),
+ os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
@@ -87,7 +90,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
-def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
+def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, revs, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
@@ -102,36 +105,38 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH:prepend := "%s:"\n' %
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
- if rev:
- f.write('# initial_rev: %s\n' % rev)
+ if revs:
+ for name, rev in revs.items():
+ f.write('# initial_rev %s: %s\n' % (name, rev))
if copied:
f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
-def _cleanup_on_error(rf, srctree):
- rfp = os.path.split(rf)[0] # recipe folder
- rfpp = os.path.split(rfp)[0] # recipes folder
- if os.path.exists(rfp):
- shutil.rmtree(rfp)
- if not len(os.listdir(rfpp)):
- os.rmdir(rfpp)
+def _cleanup_on_error(rd, srctree):
+ if os.path.exists(rd):
+ shutil.rmtree(rd)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree, keep_failure=False, extramsg=None):
- if rf and not keep_failure:
- _cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rd, srctree, keep_failure=False, extramsg=None):
+ if not keep_failure:
+ _cleanup_on_error(rd, srctree)
logger.error(e)
if extramsg:
logger.error(extramsg)
@@ -178,12 +183,16 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
- if uri.startswith('git://'):
+ paths = [srctree]
+ if uri.startswith('git://') or uri.startswith('gitsm://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
- md5 = None
- sha256 = None
+ __run('git submodule update --recursive')
+ __run('git submodule foreach \'git tag -f devtool-base-new\'')
+ (stdout, _) = __run('git submodule --quiet foreach \'echo $sm_path\'')
+ paths += [os.path.join(srctree, p) for p in stdout.splitlines()]
+ checksums = {}
_, _, _, _, _, params = bb.fetch2.decodeurl(uri)
srcsubdir_rel = params.get('destsuffix', 'git')
if not srcbranch:
@@ -191,14 +200,15 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
get_branch = [x.strip() for x in check_branch.splitlines()]
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
- if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
- # Even with the case where get_branch has multiple objects, if 'master' is one
- # of them, we should default take from 'master'
- srcbranch = ''
- elif len(get_branch) == 1:
- # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ if len(get_branch) == 1:
+ # If srcrev is on only ONE branch, then use that branch
srcbranch = get_branch[0]
+ elif 'main' in get_branch:
+ # If srcrev is on multiple branches, then choose 'main' if it is one of them
+ srcbranch = 'main'
+ elif 'master' in get_branch:
+ # Otherwise choose 'master' if it is one of the branches
+ srcbranch = 'master'
else:
# If get_branch contains more than one objects, then display error and exit.
mbrch = '\n ' + '\n '.join(get_branch)
@@ -215,9 +225,6 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
if ftmpdir and keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- md5 = checksums['md5sum']
- sha256 = checksums['sha256sum']
-
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
@@ -251,30 +258,50 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
__run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
- (stdout, _) = __run('git rev-parse HEAD')
- rev = stdout.rstrip()
+ revs = {}
+ for path in paths:
+ (stdout, _) = _run('git rev-parse HEAD', cwd=path)
+ revs[os.path.relpath(path, srctree)] = stdout.rstrip()
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
if patches:
logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
- __run('git checkout devtool-patched -b %s' % branch)
- skiptag = False
- try:
- __run('git rebase %s' % rev)
- except bb.process.ExecutionError as e:
- skiptag = True
- if 'conflict' in e.stdout:
- logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
- else:
- logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
- if not skiptag:
- if uri.startswith('git://'):
- suffix = 'new'
- else:
- suffix = newpv
- __run('git tag -f devtool-patched-%s' % suffix)
+ for path in paths:
+ _run('git checkout devtool-patched -b %s' % branch, cwd=path)
+ (stdout, _) = _run('git branch --list devtool-override-*', cwd=path)
+ branches_to_rebase = [branch] + stdout.split()
+ target_branch = revs[os.path.relpath(path, srctree)]
+
+ # There is a bug (or feature?) in git rebase where if a commit with
+ # a note is fully rebased away by being part of an old commit, the
+ # note is still attached to the old commit. Avoid this by making
+ # sure all old devtool related commits have a note attached to them
+ # (this assumes git config notes.rewriteMode is set to ignore).
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ if not oe.patch.GitApplyTree.getNotes(path, rev):
+ oe.patch.GitApplyTree.addNote(path, rev, "dummy")
+
+ for b in branches_to_rebase:
+ logger.info("Rebasing {} onto {}".format(b, target_branch))
+ _run('git checkout %s' % b, cwd=path)
+ try:
+ _run('git rebase %s' % target_branch, cwd=path)
+ except bb.process.ExecutionError as e:
+ if 'conflict' in e.stdout:
+ logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ _run('git rebase --abort', cwd=path)
+ else:
+ logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+
+ # Remove any dummy notes added above.
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ oe.patch.GitApplyTree.removeNote(path, rev, "dummy")
+
+ _run('git checkout %s' % branch, cwd=path)
if tmpsrctree:
if keep_temp:
@@ -284,7 +311,7 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
if tmpdir != tmpsrctree:
shutil.rmtree(tmpdir)
- return (rev, md5, sha256, srcbranch, srcsubdir_rel)
+ return (revs, checksums, srcbranch, srcsubdir_rel)
def _add_license_diff_to_recipe(path, diff):
notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
@@ -305,7 +332,7 @@ def _add_license_diff_to_recipe(path, diff):
f.write("\n#\n\n".encode())
f.write(orig_content)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
+def _create_new_recipe(newpv, checksums, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
@@ -336,7 +363,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
replacing = True
new_src_uri = []
for entry in src_uri:
- scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ try:
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ except bb.fetch2.MalformedUrl as e:
+ raise DevtoolError("Could not decode SRC_URI: {}".format(e))
if replacing and scheme in ['git', 'gitsm']:
branch = params.get('branch', 'master')
if rd.expand(branch) != srcbranch:
@@ -374,30 +404,39 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
addnames.append(params['name'])
# Find what's been set in the original recipe
oldnames = []
+ oldsums = []
noname = False
for varflag in rd.getVarFlags('SRC_URI'):
- if varflag.endswith(('.md5sum', '.sha256sum')):
- name = varflag.rsplit('.', 1)[0]
- if name not in oldnames:
- oldnames.append(name)
- elif varflag in ['md5sum', 'sha256sum']:
- noname = True
+ for checksum in checksums:
+ if varflag.endswith('.' + checksum):
+ name = varflag.rsplit('.', 1)[0]
+ if name not in oldnames:
+ oldnames.append(name)
+ oldsums.append(checksum)
+ elif varflag == checksum:
+ noname = True
+ oldsums.append(checksum)
# Even if SRC_URI has named entries it doesn't have to actually use the name
if noname and addnames and addnames[0] not in oldnames:
addnames = []
# Drop any old names (the name actually might include ${PV})
for name in oldnames:
if name not in newnames:
- newvalues['SRC_URI[%s.md5sum]' % name] = None
- newvalues['SRC_URI[%s.sha256sum]' % name] = None
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s.%s]' % (name, checksum)] = None
- if sha256:
- if addnames:
- nameprefix = '%s.' % addnames[0]
- else:
- nameprefix = ''
+ nameprefix = '%s.' % addnames[0] if addnames else ''
+
+ # md5sum is deprecated, remove any traces of it. If it was the only old
+ # checksum, then replace it with the default checksums.
+ if 'md5sum' in oldsums:
newvalues['SRC_URI[%smd5sum]' % nameprefix] = None
- newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
+ oldsums.remove('md5sum')
+ if not oldsums:
+ oldsums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
+
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s%s]' % (nameprefix, checksum)] = checksums[checksum]
if srcsubdir_new != srcsubdir_old:
s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
@@ -422,10 +461,11 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
+ tinfoil.modified_files()
try:
rd = tinfoil.parse_recipe_file(fullpath, False)
except bb.tinfoil.TinfoilCommandFailed as e:
- _upgrade_error(e, fullpath, srctree, keep_failure, 'Parsing of upgraded recipe failed')
+ _upgrade_error(e, os.path.dirname(fullpath), srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
@@ -434,7 +474,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
def _check_git_config():
def getconfig(name):
try:
- value = bb.process.run('git config --global %s' % name)[0].strip()
+ value = bb.process.run('git config %s' % name)[0].strip()
except bb.process.ExecutionError as e:
if e.exitcode == 1:
value = None
@@ -521,6 +561,8 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
+ srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
+
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
@@ -550,21 +592,20 @@ def upgrade(args, config, basepath, workspace):
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- old_licenses = _extract_licenses(srctree, (rd.getVar('LIC_FILES_CHKSUM') or ""))
+ old_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
- rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
+ rev2, checksums, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
- new_licenses = _extract_licenses(srctree, (rd.getVar('LIC_FILES_CHKSUM') or ""))
+ new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
- except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree, args.keep_failure)
- except DevtoolError as e:
- _upgrade_error(e, rf, srctree, args.keep_failure)
+ rf, copied = _create_new_recipe(args.version, checksums, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
+ except (bb.process.CmdError, DevtoolError) as e:
+ recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN'))
+ _upgrade_error(e, recipedir, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
- af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
@@ -574,6 +615,9 @@ def upgrade(args, config, basepath, workspace):
logger.info('New recipe is %s' % rf)
if license_diff:
logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
+ preferred_version = rd.getVar('PREFERRED_VERSION_%s' % rd.getVar('PN'))
+ if preferred_version:
+ logger.warning('Version is pinned to %s via PREFERRED_VERSION; it may need adjustment to match the new version before any further steps are taken' % preferred_version)
finally:
tinfoil.shutdown()
return 0
@@ -605,7 +649,7 @@ def check_upgrade_status(args, config, basepath, workspace):
for result in results:
# pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
if args.all or result[1] != 'MATCH':
- logger.info("{:25} {:15} {:15} {} {} {}".format( result[0],
+ print("{:25} {:15} {:15} {} {} {}".format( result[0],
result[2],
result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
result[4],
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
index e9d52bb67b..341e893305 100644
--- a/scripts/lib/recipetool/append.py
+++ b/scripts/lib/recipetool/append.py
@@ -18,6 +18,7 @@ import shutil
import scriptutils
import errno
from collections import defaultdict
+import difflib
logger = logging.getLogger('recipetool')
@@ -49,7 +50,7 @@ def find_target_file(targetpath, d, pkglist=None):
'/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
'/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
- '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
+ '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname:pn-base-files = "value" in configuration',}
for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
@@ -72,15 +73,15 @@ def find_target_file(targetpath, d, pkglist=None):
# This does assume that PN comes before other values, but that's a fairly safe assumption
for line in f:
if line.startswith('PN:'):
- pn = line.split(':', 1)[1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ pn = line.split(': ', 1)[1].strip()
+ elif line.startswith('FILES_INFO'):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
- elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
- scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
+ elif line.startswith('pkg_preinst:') or line.startswith('pkg_postinst:'):
+ scriptval = line.split(': ', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
@@ -299,7 +300,10 @@ def appendfile(args):
if st.st_mode & stat.S_IXUSR:
perms = '0755'
install = {args.newfile: (args.targetpath, perms)}
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ if sourcepath:
+ sourcepath = os.path.basename(sourcepath)
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: {'newname' : sourcepath}}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ tinfoil.modified_files()
return 0
else:
if alternative_pns:
@@ -327,6 +331,7 @@ def appendsrc(args, files, rd, extralines=None):
copyfiles = {}
extralines = extralines or []
+ params = []
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
@@ -337,25 +342,46 @@ def appendsrc(args, files, rd, extralines=None):
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
- source_uri = 'file://{0}'.format(os.path.basename(srcfile))
if src_destdir and src_destdir != '.':
- source_uri += ';subdir={0}'.format(src_destdir)
-
- simple = bb.fetch.URI(source_uri)
- simple.params = {}
- simple_str = str(simple)
- if simple_str in simplified:
- existing = simplified[simple_str]
- if source_uri != existing:
- logger.warning('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
- else:
- logger.warning('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ params.append({'subdir': src_destdir})
else:
- extralines.append('SRC_URI += {0}'.format(source_uri))
- copyfiles[newfile] = srcfile
-
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
-
+ params.append({})
+
+ copyfiles[newfile] = {'newname' : os.path.basename(srcfile)}
+
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ import tempfile
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+
+ appendfile, _ = oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines, params=params,
+ redirect_output=dry_run_outdir, update_original_recipe=args.update_recipe)
+ if not appendfile:
+ return
+ if args.dry_run:
+ output = ''
+ appendfilename = os.path.basename(appendfile)
+ newappendfile = appendfile
+ if appendfile and os.path.exists(appendfile):
+ with open(appendfile, 'r') as f:
+ oldlines = f.readlines()
+ else:
+ appendfile = '/dev/null'
+ oldlines = []
+
+ with open(os.path.join(dry_run_outdir, appendfilename), 'r') as f:
+ newlines = f.readlines()
+ diff = difflib.unified_diff(oldlines, newlines, appendfile, newappendfile)
+ difflines = list(diff)
+ if difflines:
+ output += ''.join(difflines)
+ if output:
+ logger.info('Diff of changed files:\n%s' % output)
+ else:
+ logger.info('No changed files')
+ tinfoil.modified_files()
def appendsrcfiles(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
@@ -435,6 +461,8 @@ def register_commands(subparsers):
help='Create/update a bbappend to add or replace source files',
description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
@@ -442,6 +470,8 @@ def register_commands(subparsers):
parents=[common_src],
help='Create/update a bbappend to add or replace a source file',
description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index 566c75369a..8e9ff38db6 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -115,8 +115,8 @@ class RecipeHandler(object):
for line in f:
if line.startswith('PN:'):
pn = line.split(':', 1)[-1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ elif line.startswith('FILES_INFO:%s:' % pkg):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in sorted(dictval):
if fullpth.startswith(includedir) and fullpth.endswith('.h'):
@@ -366,7 +366,7 @@ def supports_srcrev(uri):
def reformat_git_uri(uri):
'''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
checkuri = uri.split(';', 1)[0]
- if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://git(hub|lab).com/[^/]+/[^/]+/?$', checkuri):
# Appends scheme if the scheme is missing
if not '://' in uri:
uri = 'git://' + uri
@@ -423,6 +423,36 @@ def create_recipe(args):
storeTagName = ''
pv_srcpv = False
+ handled = []
+ classes = []
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading recipe handlers')
+ raw_handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_recipe_handlers'):
+ plugin.register_recipe_handlers(raw_handlers)
+ # Sort handlers by priority
+ handlers = []
+ for i, handler in enumerate(raw_handlers):
+ if isinstance(handler, tuple):
+ handlers.append((handler[0], handler[1], i))
+ else:
+ handlers.append((handler, 0, i))
+ handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
+ for handler, priority, _ in handlers:
+ logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ setattr(handler, '_devtool', args.devtool)
+ handlers = [item[0] for item in handlers]
+
+ fetchuri = None
+ for handler in handlers:
+ if hasattr(handler, 'process_url'):
+ ret = handler.process_url(args, classes, handled, extravalues)
+ if 'url' in handled and ret:
+ fetchuri = ret
+ break
+
if os.path.isfile(source):
source = 'file://%s' % os.path.abspath(source)
@@ -431,11 +461,12 @@ def create_recipe(args):
if re.match(r'https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
- fetchuri = reformat_git_uri(urldefrag(source)[0])
+ if not fetchuri:
+ fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
- fetchuri += ';subdir=${BP}'
+ fetchuri += ';subdir=${BPN}'
srcuri = fetchuri
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
@@ -478,6 +509,9 @@ def create_recipe(args):
storeTagName = params['tag']
params['nobranch'] = '1'
del params['tag']
+ # Assume 'master' branch if not set
+ if scheme in ['git', 'gitsm'] and 'branch' not in params and 'nobranch' not in params:
+ params['branch'] = 'master'
fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
@@ -527,10 +561,9 @@ def create_recipe(args):
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
# Even with the case where get_branch has multiple objects, if 'master' is one
# of them, we should default take from 'master'
- srcbranch = ''
+ srcbranch = 'master'
elif len(get_branch) == 1:
# If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
srcbranch = get_branch[0]
@@ -543,8 +576,8 @@ def create_recipe(args):
# Since we might have a value in srcbranch, we need to
# recontruct the srcuri to include 'branch' in params.
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
- if srcbranch:
- params['branch'] = srcbranch
+ if scheme in ['git', 'gitsm']:
+ params['branch'] = srcbranch or 'master'
if storeTagName and scheme in ['git', 'gitsm']:
# Check srcrev using tag and check validity of the tag
@@ -603,7 +636,7 @@ def create_recipe(args):
splitline = line.split()
if len(splitline) > 1:
if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
- srcuri = reformat_git_uri(splitline[1])
+ srcuri = reformat_git_uri(splitline[1]) + ';branch=master'
srcsubdir = 'git'
break
@@ -636,8 +669,6 @@ def create_recipe(args):
# We'll come back and replace this later in handle_license_vars()
lines_before.append('##LICENSE_PLACEHOLDER##')
- handled = []
- classes = []
# FIXME This is kind of a hack, we probably ought to be using bitbake to do this
pn = None
@@ -675,8 +706,10 @@ def create_recipe(args):
if not srcuri:
lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
lines_before.append('SRC_URI = "%s"' % srcuri)
+ shown_checksums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
for key, value in sorted(checksums.items()):
- lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
+ if key in shown_checksums:
+ lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
if srcuri and supports_srcrev(srcuri):
lines_before.append('')
lines_before.append('# Modify these as desired')
@@ -688,7 +721,7 @@ def create_recipe(args):
srcpvprefix = 'svnr'
else:
srcpvprefix = scheme
- lines_before.append('PV = "%s+%s${SRCPV}"' % (realpv or '1.0', srcpvprefix))
+ lines_before.append('PV = "%s+%s"' % (realpv or '1.0', srcpvprefix))
pv_srcpv = True
if not args.autorev and srcrev == '${AUTOREV}':
if os.path.exists(os.path.join(srctree, '.git')):
@@ -710,31 +743,12 @@ def create_recipe(args):
lines_after.append('')
if args.binary:
- lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
+ lines_after.append('INSANE_SKIP:${PN} += "already-stripped"')
lines_after.append('')
if args.npm_dev:
extravalues['NPM_INSTALL_DEV'] = 1
- # Find all plugins that want to register handlers
- logger.debug('Loading recipe handlers')
- raw_handlers = []
- for plugin in plugins:
- if hasattr(plugin, 'register_recipe_handlers'):
- plugin.register_recipe_handlers(raw_handlers)
- # Sort handlers by priority
- handlers = []
- for i, handler in enumerate(raw_handlers):
- if isinstance(handler, tuple):
- handlers.append((handler[0], handler[1], i))
- else:
- handlers.append((handler, 0, i))
- handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
- for handler, priority, _ in handlers:
- logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
- setattr(handler, '_devtool', args.devtool)
- handlers = [item[0] for item in handlers]
-
# Apply the handlers
if args.binary:
classes.append('bin_package')
@@ -743,6 +757,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
@@ -867,8 +885,10 @@ def create_recipe(args):
outlines.append('')
outlines.extend(lines_after)
+ outlines = [ line.rstrip('\n') +"\n" for line in outlines]
+
if extravalues:
- _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
+ _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=True)
if args.extract_to:
scriptutils.git_convert_standalone_clone(srctree)
@@ -884,7 +904,7 @@ def create_recipe(args):
log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
if outfile == '-':
- sys.stdout.write('\n'.join(outlines) + '\n')
+ sys.stdout.write(''.join(outlines) + '\n')
else:
with open(outfile, 'w') as f:
lastline = None
@@ -892,9 +912,10 @@ def create_recipe(args):
if not lastline and not line:
# Skip extra blank lines
continue
- f.write('%s\n' % line)
+ f.write('%s' % line)
lastline = line
log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
+ tinfoil.modified_files()
if tempsrc:
if args.keep_temp:
@@ -917,6 +938,22 @@ def split_value(value):
else:
return value
+def fixup_license(value):
+ # Ensure licenses with OR starts and ends with brackets
+ if '|' in value:
+ return '(' + value + ')'
+ return value
+
+def tidy_licenses(value):
+ """Flat, split and sort licenses"""
+ from oe.license import flattened_licenses
+ def _choose(a, b):
+ str_a, str_b = sorted((" & ".join(a), " & ".join(b)), key=str.casefold)
+ return ["(%s | %s)" % (str_a, str_b)]
+ if not isinstance(value, str):
+ value = " & ".join(value)
+ return sorted(list(set(flattened_licenses(value, _choose))), key=str.casefold)
+
def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lichandled = [x for x in handled if x[0] == 'license']
if lichandled:
@@ -930,10 +967,13 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines = []
if licvalues:
for licvalue in licvalues:
- if not licvalue[0] in licenses:
- licenses.append(licvalue[0])
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
- if licvalue[0] == 'Unknown':
+ if license == 'Unknown':
lic_unknown.append(licvalue[1])
if lic_unknown:
lines.append('#')
@@ -942,9 +982,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
for licfile in lic_unknown:
lines.append('# %s' % licfile)
- extra_license = split_value(extravalues.pop('LICENSE', []))
- if '&' in extra_license:
- extra_license.remove('&')
+ extra_license = tidy_licenses(extravalues.pop('LICENSE', ''))
if extra_license:
if licenses == ['Unknown']:
licenses = extra_license
@@ -985,7 +1023,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines.append('# instead of &. If there is any doubt, check the accompanying documentation')
lines.append('# to determine which situation is applicable.')
- lines.append('LICENSE = "%s"' % ' & '.join(licenses))
+ lines.append('LICENSE = "%s"' % ' & '.join(sorted(licenses, key=str.casefold)))
lines.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
lines.append('')
@@ -1002,118 +1040,170 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
handled.append(('license', licvalues))
return licvalues
-def get_license_md5sums(d, static_only=False):
+def get_license_md5sums(d, static_only=False, linenumbers=False):
import bb.utils
+ import csv
md5sums = {}
- if not static_only:
+ if not static_only and not linenumbers:
# Gather md5sums of license files in common license dir
commonlicdir = d.getVar('COMMON_LICENSE_DIR')
for fn in os.listdir(commonlicdir):
md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
md5sums[md5value] = fn
+
# The following were extracted from common values in various recipes
# (double checking the license against the license file itself, not just
# the LICENSE value in the recipe)
- md5sums['94d55d512a9ba36caa9b7df079bae19f'] = 'GPLv2'
- md5sums['b234ee4d69f5fce4486a80fdaf4a4263'] = 'GPLv2'
- md5sums['59530bdf33659b29e73d4adb9f9f6552'] = 'GPLv2'
- md5sums['0636e73ff0215e8d672dc4c32c317bb3'] = 'GPLv2'
- md5sums['eb723b61539feef013de476e68b5c50a'] = 'GPLv2'
- md5sums['751419260aa954499f7abaabaa882bbe'] = 'GPLv2'
- md5sums['393a5ca445f6965873eca0259a17f833'] = 'GPLv2'
- md5sums['12f884d2ae1ff87c09e5b7ccc2c4ca7e'] = 'GPLv2'
- md5sums['8ca43cbc842c2336e835926c2166c28b'] = 'GPLv2'
- md5sums['ebb5c50ab7cab4baeffba14977030c07'] = 'GPLv2'
- md5sums['c93c0550bd3173f4504b2cbd8991e50b'] = 'GPLv2'
- md5sums['9ac2e7cff1ddaf48b6eab6028f23ef88'] = 'GPLv2'
- md5sums['4325afd396febcb659c36b49533135d4'] = 'GPLv2'
- md5sums['18810669f13b87348459e611d31ab760'] = 'GPLv2'
- md5sums['d7810fab7487fb0aad327b76f1be7cd7'] = 'GPLv2' # the Linux kernel's COPYING file
- md5sums['bbb461211a33b134d42ed5ee802b37ff'] = 'LGPLv2.1'
- md5sums['7fbc338309ac38fefcd64b04bb903e34'] = 'LGPLv2.1'
- md5sums['4fbd65380cdd255951079008b364516c'] = 'LGPLv2.1'
- md5sums['2d5025d4aa3495befef8f17206a5b0a1'] = 'LGPLv2.1'
- md5sums['fbc093901857fcd118f065f900982c24'] = 'LGPLv2.1'
- md5sums['a6f89e2100d9b6cdffcea4f398e37343'] = 'LGPLv2.1'
- md5sums['d8045f3b8f929c1cb29a1e3fd737b499'] = 'LGPLv2.1'
- md5sums['fad9b3332be894bab9bc501572864b29'] = 'LGPLv2.1'
- md5sums['3bf50002aefd002f49e7bb854063f7e7'] = 'LGPLv2'
- md5sums['9f604d8a4f8e74f4f5140845a21b6674'] = 'LGPLv2'
- md5sums['5f30f0716dfdd0d91eb439ebec522ec2'] = 'LGPLv2'
- md5sums['55ca817ccb7d5b5b66355690e9abc605'] = 'LGPLv2'
- md5sums['252890d9eee26aab7b432e8b8a616475'] = 'LGPLv2'
- md5sums['3214f080875748938ba060314b4f727d'] = 'LGPLv2'
- md5sums['db979804f025cf55aabec7129cb671ed'] = 'LGPLv2'
- md5sums['d32239bcb673463ab874e80d47fae504'] = 'GPLv3'
- md5sums['f27defe1e96c2e1ecd4e0c9be8967949'] = 'GPLv3'
- md5sums['6a6a8e020838b23406c81b19c1d46df6'] = 'LGPLv3'
- md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
- md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
- md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
- md5sums['bfe1f75d606912a4111c90743d6c7325'] = 'MPL-1.1'
+
+ # Read license md5sums from csv file
+ scripts_path = os.path.dirname(os.path.realpath(__file__))
+ for path in (d.getVar('BBPATH').split(':')
+ + [os.path.join(scripts_path, '..', '..')]):
+ csv_path = os.path.join(path, 'lib', 'recipetool', 'licenses.csv')
+ if os.path.isfile(csv_path):
+ with open(csv_path, newline='') as csv_file:
+ fieldnames = ['md5sum', 'license', 'beginline', 'endline', 'md5']
+ reader = csv.DictReader(csv_file, delimiter=',', fieldnames=fieldnames)
+ for row in reader:
+ if linenumbers:
+ md5sums[row['md5sum']] = (
+ row['license'], row['beginline'], row['endline'], row['md5'])
+ else:
+ md5sums[row['md5sum']] = row['license']
+
return md5sums
-def crunch_license(licfile):
+def crunch_known_licenses(d):
'''
- Remove non-material text from a license file and then check
- its md5sum against a known list. This works well for licenses
- which contain a copyright statement, but is also a useful way
- to handle people's insistence upon reformatting the license text
- slightly (with no material difference to the text of the
- license).
+ Calculate the MD5 checksums for the crunched versions of all common
+ licenses. Also add additional known checksums.
'''
+
+ crunched_md5sums = {}
- import oe.utils
+ # common licenses
+ crunched_md5sums['ad4e9d34a2e966dfe9837f18de03266d'] = 'GFDL-1.1-only'
+ crunched_md5sums['d014fb11a34eb67dc717fdcfc97e60ed'] = 'GFDL-1.2-only'
+ crunched_md5sums['e020ca655b06c112def28e597ab844f1'] = 'GFDL-1.3-only'
- # Note: these are carefully constructed!
- license_title_re = re.compile(r'^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
- license_statement_re = re.compile(r'^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
- copyright_re = re.compile('^(#+)? *Copyright .*$')
-
- crunched_md5sums = {}
# The following two were gleaned from the "forever" npm package
crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
- crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
- # https://github.com/vasi/pixz/blob/master/LICENSE
- crunched_md5sums['2f03392b40bbe663597b5bd3cc5ebdb9'] = 'BSD-2-Clause'
# https://github.com/waffle-gl/waffle/blob/master/LICENSE.txt
- crunched_md5sums['e72e5dfef0b1a4ca8a3d26a60587db66'] = 'BSD-2-Clause'
+ crunched_md5sums['50fab24ce589d69af8964fdbfe414c60'] = 'BSD-2-Clause'
# https://github.com/spigwitmer/fakeds1963s/blob/master/LICENSE
- crunched_md5sums['8be76ac6d191671f347ee4916baa637e'] = 'GPLv2'
- # https://github.com/datto/dattobd/blob/master/COPYING
- # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/GPLv2.TXT
- crunched_md5sums['1d65c5ad4bf6489f85f4812bf08ae73d'] = 'GPLv2'
+ crunched_md5sums['88a4355858a1433fea99fae34a44da88'] = 'GPL-2.0-only'
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
- # http://git.neil.brown.name/?p=mdadm.git;a=blob;f=COPYING;h=d159169d1050894d3ea3b98e1c965c4058208fe1;hb=HEAD
- crunched_md5sums['fb530f66a7a89ce920f0e912b5b66d4b'] = 'GPLv2'
- # https://github.com/gkos/nrf24/blob/master/COPYING
- crunched_md5sums['7b6aaa4daeafdfa6ed5443fd2684581b'] = 'GPLv2'
- # https://github.com/josch09/resetusb/blob/master/COPYING
- crunched_md5sums['8b8ac1d631a4d220342e83bcf1a1fbc3'] = 'GPLv3'
+ crunched_md5sums['063b5c3ebb5f3aa4c85a2ed18a31fbe7'] = 'GPL-2.0-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv2.1
- crunched_md5sums['2ea316ed973ae176e502e2297b574bb3'] = 'LGPLv2.1'
+ crunched_md5sums['7f5202f4d44ed15dcd4915f5210417d8'] = 'LGPL-2.1-only'
# unixODBC-2.3.4 COPYING
- crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
+ crunched_md5sums['3debde09238a8c8e1f6a847e1ec9055b'] = 'LGPL-2.1-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
- crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ crunched_md5sums['f90c613c51aa35da4d79dd55fc724ceb'] = 'LGPL-3.0-only'
# https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/epl-v10
crunched_md5sums['efe2cb9a35826992b9df68224e3c2628'] = 'EPL-1.0'
- # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/edl-v10
- crunched_md5sums['0a9c78c0a398d1bbce4a166757d60387'] = 'EDL-1.0'
+
+ # https://raw.githubusercontent.com/jquery/esprima/3.1.3/LICENSE.BSD
+ crunched_md5sums['80fa7b56a28e8c902e6af194003220a5'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/npm/npm-install-checks/master/LICENSE
+ crunched_md5sums['e659f77bfd9002659e112d0d3d59b2c1'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/silverwind/default-gateway/4.2.0/LICENSE
+ crunched_md5sums['4c641f2d995c47f5cb08bdb4b5b6ea05'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/tad-lispy/node-damerau-levenshtein/v1.0.5/LICENSE
+ crunched_md5sums['2b8c039b2b9a25f0feb4410c4542d346'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/terser/terser/v3.17.0/LICENSE
+ crunched_md5sums['8bd23871802951c9ad63855151204c2c'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/alexei/sprintf.js/1.0.3/LICENSE
+ crunched_md5sums['008c22318c8ea65928bf730ddd0273e3'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/Caligatio/jsSHA/v3.2.0/LICENSE
+ crunched_md5sums['0e46634a01bfef056892949acaea85b1'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/d3/d3-path/v1.0.9/LICENSE
+ crunched_md5sums['b5f72aef53d3b2b432702c30b0215666'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/feross/ieee754/v1.1.13/LICENSE
+ crunched_md5sums['a39327c997c20da0937955192d86232d'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/joyent/node-extsprintf/v1.3.0/LICENSE
+ crunched_md5sums['721f23a96ff4161ca3a5f071bbe18108'] = 'MIT'
+ # https://raw.githubusercontent.com/pvorb/clone/v0.2.0/LICENSE
+ crunched_md5sums['b376d29a53c9573006b9970709231431'] = 'MIT'
+ # https://raw.githubusercontent.com/andris9/encoding/v0.1.12/LICENSE
+ crunched_md5sums['85d8a977ee9d7c5ab4ac03c9b95431c4'] = 'MIT-0'
+ # https://raw.githubusercontent.com/faye/websocket-driver-node/0.7.3/LICENSE.md
+ crunched_md5sums['b66384e7137e41a9b1904ef4d39703b6'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/less/less.js/v4.1.1/LICENSE
+ crunched_md5sums['b27575459e02221ccef97ec0bfd457ae'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/microsoft/TypeScript/v3.5.3/LICENSE.txt
+ crunched_md5sums['a54a1a6a39e7f9dbb4a23a42f5c7fd1c'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/request/request/v2.87.0/LICENSE
+ crunched_md5sums['1034431802e57486b393d00c5d262b8a'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/dchest/tweetnacl-js/v0.14.5/LICENSE
+ crunched_md5sums['75605e6bdd564791ab698fca65c94a4f'] = 'Unlicense'
+ # https://raw.githubusercontent.com/stackgl/gl-mat3/v2.0.0/LICENSE.md
+ crunched_md5sums['75512892d6f59dddb6d1c7e191957e9c'] = 'Zlib'
+
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR')
+ for fn in sorted(os.listdir(commonlicdir)):
+ md5value, lictext = crunch_license(os.path.join(commonlicdir, fn))
+ if md5value not in crunched_md5sums:
+ crunched_md5sums[md5value] = fn
+ elif fn != crunched_md5sums[md5value]:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s' rather than '%s'" % (md5value, crunched_md5sums[md5value], fn))
+ else:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s'" % (md5value, crunched_md5sums[md5value]))
+
+ return crunched_md5sums
+
+def crunch_license(licfile):
+ '''
+ Remove non-material text from a license file and then calculate its
+ md5sum. This works well for licenses that contain a copyright statement,
+ but is also a useful way to handle people's insistence upon reformatting
+ the license text slightly (with no material difference to the text of the
+ license).
+ '''
+
+ import oe.utils
+
+ # Note: these are carefully constructed!
+ license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
+ license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile(r'^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
+ disclaimer_re = re.compile(r'^ *\*? ?All [Rr]ights [Rr]eserved\.$')
+ email_re = re.compile(r'^.*<[\w\.-]*@[\w\.\-]*>$')
+ header_re = re.compile(r'^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
+ tag_re = re.compile(r'^ *@?\(?([Ll]icense|MIT)\)?$')
+ url_re = re.compile(r'^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
+
lictext = []
with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
# Drop opening statements
if copyright_re.match(line):
continue
+ elif disclaimer_re.match(line):
+ continue
+ elif email_re.match(line):
+ continue
+ elif header_re.match(line):
+ continue
+ elif tag_re.match(line):
+ continue
+ elif url_re.match(line):
+ continue
elif license_title_re.match(line):
continue
elif license_statement_re.match(line):
continue
- # Squash spaces, and replace smart quotes, double quotes
- # and backticks with single quotes
+ # Strip comment symbols
+ line = line.replace('*', '') \
+ .replace('#', '')
+ # Unify spelling
+ line = line.replace('sub-license', 'sublicense')
+ # Squash spaces
line = oe.utils.squashspaces(line.strip())
+ # Replace smart quotes, double quotes and backticks with single quotes
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
+ # Unify brackets
+ line = line.replace("{", "[").replace("}", "]")
if line:
lictext.append(line)
@@ -1124,31 +1214,40 @@ def crunch_license(licfile):
except UnicodeEncodeError:
md5val = None
lictext = ''
- license = crunched_md5sums.get(md5val, None)
- return license, md5val, lictext
+ return md5val, lictext
def guess_license(srctree, d):
import bb
md5sums = get_license_md5sums(d)
+ crunched_md5sums = crunch_known_licenses(d)
+
licenses = []
licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
+ skip_extensions = (".html", ".js", ".json", ".svg", ".ts", ".go")
licfiles = []
for root, dirs, files in os.walk(srctree):
for fn in files:
+ if fn.endswith(skip_extensions):
+ continue
for spec in licspecs:
if fnmatch.fnmatch(fn, spec):
fullpath = os.path.join(root, fn)
if not fullpath in licfiles:
licfiles.append(fullpath)
- for licfile in licfiles:
+ for licfile in sorted(licfiles):
md5value = bb.utils.md5_file(licfile)
license = md5sums.get(md5value, None)
if not license:
- license, crunched_md5, lictext = crunch_license(licfile)
- if not license:
+ crunched_md5, lictext = crunch_license(licfile)
+ license = crunched_md5sums.get(crunched_md5, None)
+ if lictext and not license:
license = 'Unknown'
- licenses.append((license, os.path.relpath(licfile, srctree), md5value))
+ logger.info("Please add the following line for '%s' to a 'lib/recipetool/licenses.csv' " \
+ "and replace `Unknown` with the license:\n" \
+ "%s,Unknown" % (os.path.relpath(licfile, srctree), md5value))
+ if license:
+ licenses.append((license, os.path.relpath(licfile, srctree), md5value))
# FIXME should we grab at least one source file with a license header and add that too?
@@ -1162,6 +1261,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
"""
pkglicenses = {pn: []}
for license, licpath, _ in licvalues:
+ license = fixup_license(license)
for pkgname, pkgpath in packages.items():
if licpath.startswith(pkgpath + '/'):
if pkgname in pkglicenses:
@@ -1174,11 +1274,14 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
pkglicenses[pn].append(license)
outlicenses = {}
for pkgname in packages:
- license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
- if license == 'Unknown' and pkgname in fallback_licenses:
+ # Assume AND operator between license files
+ license = ' & '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
+ if license == 'Unknown' and fallback_licenses and pkgname in fallback_licenses:
license = fallback_licenses[pkgname]
- outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
- outlicenses[pkgname] = license.split()
+ licenses = tidy_licenses(license)
+ license = ' & '.join(licenses)
+ outlines.append('LICENSE:%s = "%s"' % (pkgname, license))
+ outlicenses[pkgname] = licenses
return outlicenses
def read_pkgconfig_provides(d):
@@ -1311,6 +1414,7 @@ def register_commands(subparsers):
parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
parser_create.add_argument('--npm-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--no-pypi', action="store_true", help='Do not inherit pypi class')
parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index 35a97c9345..ec9d510e23 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -5,9 +5,9 @@
# SPDX-License-Identifier: GPL-2.0-only
#
+import os
import re
import logging
-import glob
from recipetool.create import RecipeHandler, validate_pv
logger = logging.getLogger('recipetool')
@@ -137,15 +137,15 @@ class CmakeRecipeHandler(RecipeHandler):
deps = []
unmappedpkgs = []
- proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
- pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
- pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
- findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
- findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
- checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
- include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
- subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
- dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+ proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
@@ -423,16 +423,16 @@ class AutotoolsRecipeHandler(RecipeHandler):
'makeinfo': 'texinfo',
}
- pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
- lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
- libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
- progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
- ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
- am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
- define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
- version_re = re.compile('([0-9.]+)')
+ pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile(r'([0-9.]+)')
defines = {}
def subst_defines(value):
@@ -545,7 +545,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
deps.append('zlib')
elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
deps.append('openssl')
- elif keyword == 'AX_LIB_CURL':
+ elif keyword in ('AX_LIB_CURL', 'LIBCURL_CHECK_CONFIG'):
deps.append('curl')
elif keyword == 'AX_LIB_BEECRYPT':
deps.append('beecrypt')
@@ -624,6 +624,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
'AX_CHECK_OPENSSL',
'AX_LIB_CRYPTO',
'AX_LIB_CURL',
+ 'LIBCURL_CHECK_CONFIG',
'AX_LIB_BEECRYPT',
'AX_LIB_EXPAT',
'AX_LIB_GCRYPT',
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index adfa377956..a807dafae5 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -8,9 +8,9 @@
import ast
import codecs
import collections
-import distutils.command.build_py
+import setuptools.command.build_py
import email
-import imp
+import importlib
import glob
import itertools
import logging
@@ -18,7 +18,11 @@ import os
import re
import sys
import subprocess
+import json
+import urllib.request
from recipetool.create import RecipeHandler
+from urllib.parse import urldefrag
+from recipetool.create import determine_from_url
logger = logging.getLogger('recipetool')
@@ -37,7 +41,334 @@ class PythonRecipeHandler(RecipeHandler):
assume_provided = ['builtins', 'os.path']
# Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
+ excluded_fields = []
+
+ classifier_license_map = {
+ 'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
+ 'License :: OSI Approved :: Apache Software License': 'Apache',
+ 'License :: OSI Approved :: Apple Public Source License': 'APSL',
+ 'License :: OSI Approved :: Artistic License': 'Artistic',
+ 'License :: OSI Approved :: Attribution Assurance License': 'AAL',
+ 'License :: OSI Approved :: BSD License': 'BSD-3-Clause',
+ 'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)': 'BSL-1.0',
+ 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)': 'CECILL-2.1',
+ 'License :: OSI Approved :: Common Development and Distribution License 1.0 (CDDL-1.0)': 'CDDL-1.0',
+ 'License :: OSI Approved :: Common Public License': 'CPL',
+ 'License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)': 'EPL-1.0',
+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)': 'EPL-2.0',
+ 'License :: OSI Approved :: Eiffel Forum License': 'EFL',
+ 'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
+ 'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
+ 'License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)': 'EUPL-1.2',
+ 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0-or-later',
+ 'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
+ 'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
+ 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0-only',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0-only',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0-or-later',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
+ 'License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND)': 'HPND',
+ 'License :: OSI Approved :: IBM Public License': 'IPL',
+ 'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
+ 'License :: OSI Approved :: Intel Open Source License': 'Intel',
+ 'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
+ 'License :: OSI Approved :: MIT License': 'MIT',
+ 'License :: OSI Approved :: MIT No Attribution License (MIT-0)': 'MIT-0',
+ 'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
+ 'License :: OSI Approved :: MirOS License (MirOS)': 'MirOS',
+ 'License :: OSI Approved :: Motosoto License': 'Motosoto',
+ 'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
+ 'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
+ 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)': 'MPL-2.0',
+ 'License :: OSI Approved :: Nethack General Public License': 'NGPL',
+ 'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
+ 'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
+ 'License :: OSI Approved :: Open Software License 3.0 (OSL-3.0)': 'OSL-3.0',
+ 'License :: OSI Approved :: PostgreSQL License': 'PostgreSQL',
+ 'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
+ 'License :: OSI Approved :: Python Software Foundation License': 'PSF-2.0',
+ 'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
+ 'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
+ 'License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)': 'OFL-1.1',
+ 'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
+ 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': 'SISSL',
+ 'License :: OSI Approved :: Sun Public License': 'SPL',
+ 'License :: OSI Approved :: The Unlicense (Unlicense)': 'Unlicense',
+ 'License :: OSI Approved :: Universal Permissive License (UPL)': 'UPL-1.0',
+ 'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
+ 'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
+ 'License :: OSI Approved :: W3C License': 'W3C',
+ 'License :: OSI Approved :: X.Net License': 'Xnet',
+ 'License :: OSI Approved :: Zope Public License': 'ZPL',
+ 'License :: OSI Approved :: zlib/libpng License': 'Zlib',
+ 'License :: Other/Proprietary License': 'Proprietary',
+ 'License :: Public Domain': 'PD',
+ }
+
+ def __init__(self):
+ pass
+
+ def process_url(self, args, classes, handled, extravalues):
+ """
+ Convert any pypi url https://pypi.org/project/<package>/<version> into https://files.pythonhosted.org/packages/source/...
+ which corresponds to the archive location, and add pypi class
+ """
+
+ if 'url' in handled:
+ return None
+
+ fetch_uri = None
+ source = args.source
+ required_version = args.version if args.version else None
+ match = re.match(r'https?://pypi.org/project/([^/]+)(?:/([^/]+))?/?$', urldefrag(source)[0])
+ if match:
+ package = match.group(1)
+ version = match.group(2) if match.group(2) else required_version
+
+ json_url = f"https://pypi.org/pypi/%s/json" % package
+ response = urllib.request.urlopen(json_url)
+ if response.status == 200:
+ data = json.loads(response.read())
+ if not version:
+ # grab latest version
+ version = data["info"]["version"]
+ pypi_package = data["info"]["name"]
+ for release in reversed(data["releases"][version]):
+ if release["packagetype"] == "sdist":
+ fetch_uri = release["url"]
+ break
+ else:
+ logger.warning("Cannot handle pypi url %s: cannot fetch package information using %s", source, json_url)
+ return None
+ else:
+ match = re.match(r'^https?://files.pythonhosted.org/packages.*/(.*)-.*$', source)
+ if match:
+ fetch_uri = source
+ pypi_package = match.group(1)
+ _, version = determine_from_url(fetch_uri)
+
+ if match and not args.no_pypi:
+ if required_version and version != required_version:
+ raise Exception("Version specified using --version/-V (%s) and version specified in the url (%s) do not match" % (required_version, version))
+ # This is optionnal if BPN looks like "python-<pypi_package>" or "python3-<pypi_package>" (see pypi.bbclass)
+ # but at this point we cannot know because because user can specify the output name of the recipe on the command line
+ extravalues["PYPI_PACKAGE"] = pypi_package
+ # If the tarball extension is not 'tar.gz' (default value in pypi.bblcass) whe should set PYPI_PACKAGE_EXT in the recipe
+ pypi_package_ext = re.match(r'.*%s-%s\.(.*)$' % (pypi_package, version), fetch_uri)
+ if pypi_package_ext:
+ pypi_package_ext = pypi_package_ext.group(1)
+ if pypi_package_ext != "tar.gz":
+ extravalues["PYPI_PACKAGE_EXT"] = pypi_package_ext
+
+ # Pypi class will handle S and SRC_URI variables, so remove them
+ # TODO: allow oe.recipeutils.patch_recipe_lines() to accept regexp so we can simplify the following to:
+ # extravalues['SRC_URI(?:\[.*?\])?'] = None
+ extravalues['S'] = None
+ extravalues['SRC_URI'] = None
+
+ classes.append('pypi')
+
+ handled.append('url')
+ return fetch_uri
+
+ def handle_classifier_license(self, classifiers, existing_licenses=""):
+
+ licenses = []
+ for classifier in classifiers:
+ if classifier in self.classifier_license_map:
+ license = self.classifier_license_map[classifier]
+ if license == 'Apache' and 'Apache-2.0' in existing_licenses:
+ license = 'Apache-2.0'
+ elif license == 'GPL':
+ if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
+ license = 'GPL-2.0'
+ elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
+ license = 'GPL-3.0'
+ elif license == 'LGPL':
+ if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
+ license = 'LGPL-2.1'
+ elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
+ license = 'LGPL-2.0'
+ elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
+ license = 'LGPL-3.0'
+ licenses.append(license)
+
+ if licenses:
+ return ' & '.join(licenses)
+
+ return None
+
+ def map_info_to_bbvar(self, info, extravalues):
+
+ # Map PKG-INFO & setup.py fields to bitbake variables
+ for field, values in info.items():
+ if field in self.excluded_fields:
+ continue
+
+ if field not in self.bbvar_map:
+ continue
+
+ if isinstance(values, str):
+ value = values
+ else:
+ value = ' '.join(str(v) for v in values if v)
+
+ bbvar = self.bbvar_map[field]
+ if bbvar == "PN":
+ # by convention python recipes start with "python3-"
+ if not value.startswith('python'):
+ value = 'python3-' + value
+
+ if bbvar not in extravalues and value:
+ extravalues[bbvar] = value
+
+ def apply_info_replacements(self, info):
+ if not self.replacements:
+ return
+
+ for variable, search, replace in self.replacements:
+ if variable not in info:
+ continue
+
+ def replace_value(search, replace, value):
+ if replace is None:
+ if re.search(search, value):
+ return None
+ else:
+ new_value = re.sub(search, replace, value)
+ if value != new_value:
+ return new_value
+ return value
+
+ value = info[variable]
+ if isinstance(value, str):
+ new_value = replace_value(search, replace, value)
+ if new_value is None:
+ del info[variable]
+ elif new_value != value:
+ info[variable] = new_value
+ elif hasattr(value, 'items'):
+ for dkey, dvalue in list(value.items()):
+ new_list = []
+ for pos, a_value in enumerate(dvalue):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ value[dkey] = new_list
+ else:
+ new_list = []
+ for pos, a_value in enumerate(value):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
+
+ if value != new_list:
+ info[variable] = new_list
+
+
+ def scan_python_dependencies(self, paths):
+ deps = set()
+ try:
+ dep_output = self.run_command(['pythondeps', '-d'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ for line in dep_output.splitlines():
+ line = line.rstrip()
+ dep, filename = line.split('\t', 1)
+ if filename.endswith('/setup.py'):
+ continue
+ deps.add(dep)
+
+ try:
+ provides_output = self.run_command(['pythondeps', '-p'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ provides_lines = (l.rstrip() for l in provides_output.splitlines())
+ provides = set(l for l in provides_lines if l and l != 'setup')
+ deps -= provides
+
+ return deps
+
+ def parse_pkgdata_for_python_packages(self):
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+
+ ldata = tinfoil.config_data.createCopy()
+ bb.parse.handle('classes-recipe/python3-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+
+ dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
+ python_dirs = [python_sitedir + os.sep,
+ os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
+ os.path.dirname(python_sitedir) + os.sep]
+ packages = {}
+ for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
+ files_info = None
+ with open(pkgdatafile, 'r') as f:
+ for line in f.readlines():
+ field, value = line.split(': ', 1)
+ if field.startswith('FILES_INFO'):
+ files_info = ast.literal_eval(value)
+ break
+ else:
+ continue
+
+ for fn in files_info:
+ for suffix in importlib.machinery.all_suffixes():
+ if fn.endswith(suffix):
+ break
+ else:
+ continue
+
+ if fn.startswith(dynload_dir + os.sep):
+ if '/.debug/' in fn:
+ continue
+ base = os.path.basename(fn)
+ provided = base.split('.', 1)[0]
+ packages[provided] = os.path.basename(pkgdatafile)
+ continue
+
+ for python_dir in python_dirs:
+ if fn.startswith(python_dir):
+ relpath = fn[len(python_dir):]
+ relstart, _, relremaining = relpath.partition(os.sep)
+ if relstart.endswith('.egg'):
+ relpath = relremaining
+ base, _ = os.path.splitext(relpath)
+
+ if '/.debug/' in base:
+ continue
+ if os.path.basename(base) == '__init__':
+ base = os.path.dirname(base)
+ base = base.replace(os.sep + os.sep, os.sep)
+ provided = base.replace(os.sep, '.')
+ packages[provided] = os.path.basename(pkgdatafile)
+ return packages
+
+ @classmethod
+ def run_command(cls, cmd, **popenargs):
+ if 'stderr' not in popenargs:
+ popenargs['stderr'] = subprocess.STDOUT
+ try:
+ return subprocess.check_output(cmd, **popenargs).decode('utf-8')
+ except OSError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
+ raise
+ except subprocess.CalledProcessError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
+ raise
+
+class PythonSetupPyRecipeHandler(PythonRecipeHandler):
bbvar_map = {
'Name': 'PN',
'Version': 'PV',
@@ -45,9 +376,9 @@ class PythonRecipeHandler(RecipeHandler):
'Summary': 'SUMMARY',
'Description': 'DESCRIPTION',
'License': 'LICENSE',
- 'Requires': 'RDEPENDS_${PN}',
- 'Provides': 'RPROVIDES_${PN}',
- 'Obsoletes': 'RREPLACES_${PN}',
+ 'Requires': 'RDEPENDS:${PN}',
+ 'Provides': 'RPROVIDES:${PN}',
+ 'Obsoletes': 'RREPLACES:${PN}',
}
# PN/PV are already set by recipetool core & desc can be extremely long
excluded_fields = [
@@ -75,6 +406,7 @@ class PythonRecipeHandler(RecipeHandler):
'Supported-Platform',
]
setuparg_multi_line_values = ['Description']
+
replacements = [
('License', r' +$', ''),
('License', r'^ +', ''),
@@ -95,71 +427,161 @@ class PythonRecipeHandler(RecipeHandler):
('Install-requires', r'\[[^\]]+\]$', ''),
]
- classifier_license_map = {
- 'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
- 'License :: OSI Approved :: Apache Software License': 'Apache',
- 'License :: OSI Approved :: Apple Public Source License': 'APSL',
- 'License :: OSI Approved :: Artistic License': 'Artistic',
- 'License :: OSI Approved :: Attribution Assurance License': 'AAL',
- 'License :: OSI Approved :: BSD License': 'BSD',
- 'License :: OSI Approved :: Common Public License': 'CPL',
- 'License :: OSI Approved :: Eiffel Forum License': 'EFL',
- 'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
- 'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
- 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0+',
- 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0',
- 'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
- 'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
- 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0',
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0+',
- 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0',
- 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0+',
- 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
- 'License :: OSI Approved :: IBM Public License': 'IPL',
- 'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
- 'License :: OSI Approved :: Intel Open Source License': 'Intel',
- 'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
- 'License :: OSI Approved :: MIT License': 'MIT',
- 'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
- 'License :: OSI Approved :: Motosoto License': 'Motosoto',
- 'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
- 'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
- 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)': 'MPL-2.0',
- 'License :: OSI Approved :: Nethack General Public License': 'NGPL',
- 'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
- 'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
- 'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
- 'License :: OSI Approved :: Python Software Foundation License': 'PSF',
- 'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
- 'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
- 'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
- 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': '-- Sun Industry Standards Source License (SISSL)',
- 'License :: OSI Approved :: Sun Public License': 'SPL',
- 'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
- 'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
- 'License :: OSI Approved :: W3C License': 'W3C',
- 'License :: OSI Approved :: X.Net License': 'Xnet',
- 'License :: OSI Approved :: Zope Public License': 'ZPL',
- 'License :: OSI Approved :: zlib/libpng License': 'Zlib',
- }
-
def __init__(self):
pass
+ def parse_setup_py(self, setupscript='./setup.py'):
+ with codecs.open(setupscript) as f:
+ info, imported_modules, non_literals, extensions = gather_setup_info(f)
+
+ def _map(key):
+ key = key.replace('_', '-')
+ key = key[0].upper() + key[1:]
+ if key in self.setup_parse_map:
+ key = self.setup_parse_map[key]
+ return key
+
+ # Naive mapping of setup() arguments to PKG-INFO field names
+ for d in [info, non_literals]:
+ for key, value in list(d.items()):
+ if key is None:
+ continue
+ new_key = _map(key)
+ if new_key != key:
+ del d[key]
+ d[new_key] = value
+
+ return info, 'setuptools' in imported_modules, non_literals, extensions
+
+ def get_setup_args_info(self, setupscript='./setup.py'):
+ cmd = ['python3', setupscript]
+ info = {}
+ keys = set(self.bbvar_map.keys())
+ keys |= set(self.setuparg_list_fields)
+ keys |= set(self.setuparg_multi_line_values)
+ grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
+ for index, keys in grouped_keys:
+ if index == (True, False):
+ # Splitlines output for each arg as a list value
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = [l.rstrip() for l in arg_info.splitlines()]
+ elif index == (False, True):
+ # Entire output for each arg
+ for key in keys:
+ arg = self.setuparg_map.get(key, key.lower())
+ try:
+ arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ info[key] = arg_info
+ else:
+ info.update(self.get_setup_byline(list(keys), setupscript))
+ return info
+
+ def get_setup_byline(self, fields, setupscript='./setup.py'):
+ info = {}
+
+ cmd = ['python3', setupscript]
+ cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
+ try:
+ info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ if len(fields) != len(info_lines):
+ logger.error('Mismatch between setup.py output lines and number of fields')
+ sys.exit(1)
+
+ for lineno, line in enumerate(info_lines):
+ line = line.rstrip()
+ info[fields[lineno]] = line
+ return info
+
+ def get_pkginfo(self, pkginfo_fn):
+ msg = email.message_from_file(open(pkginfo_fn, 'r'))
+ msginfo = {}
+ for field in msg.keys():
+ values = msg.get_all(field)
+ if len(values) == 1:
+ msginfo[field] = values[0]
+ else:
+ msginfo[field] = values
+ return msginfo
+
+ def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
+ if 'Package-dir' in setup_info:
+ package_dir = setup_info['Package-dir']
+ else:
+ package_dir = {}
+
+ dist = setuptools.Distribution()
+
+ class PackageDir(setuptools.command.build_py.build_py):
+ def __init__(self, package_dir):
+ self.package_dir = package_dir
+ self.dist = dist
+ super().__init__(self.dist)
+
+ pd = PackageDir(package_dir)
+ to_scan = []
+ if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
+ if 'Py-modules' in setup_info:
+ for module in setup_info['Py-modules']:
+ try:
+ package, module = module.rsplit('.', 1)
+ except ValueError:
+ package, module = '.', module
+ module_path = os.path.join(pd.get_package_dir(package), module + '.py')
+ to_scan.append(module_path)
+
+ if 'Packages' in setup_info:
+ for package in setup_info['Packages']:
+ to_scan.append(pd.get_package_dir(package))
+
+ if 'Scripts' in setup_info:
+ to_scan.extend(setup_info['Scripts'])
+ else:
+ logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
+
+ if not to_scan:
+ to_scan = ['.']
+
+ logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
+ mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
+ for dep in scanned_deps:
+ mapped = provided_packages.get(dep)
+ if mapped:
+ logger.debug('Mapped %s to %s' % (dep, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug('Could not map %s' % dep)
+ unmapped_deps.add(dep)
+ return mapped_deps, unmapped_deps
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+
if 'buildsystem' in handled:
return False
+ logger.debug("Trying setup.py parser")
+
# Check for non-zero size setup.py files
setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
for fn in setupfiles:
if os.path.getsize(fn):
break
else:
+ logger.debug("No setup.py found")
return False
# setup.py is always parsed to get at certain required information, such as
@@ -193,6 +615,18 @@ class PythonRecipeHandler(RecipeHandler):
continue
if line.startswith('['):
+ # PACKAGECONFIG must not contain expressions or whitespace
+ line = line.replace(" ", "")
+ line = line.replace(':', "")
+ line = line.replace('.', "-dot-")
+ line = line.replace('"', "")
+ line = line.replace('<', "-smaller-")
+ line = line.replace('>', "-bigger-")
+ line = line.replace('_', "-")
+ line = line.replace('(', "")
+ line = line.replace(')', "")
+ line = line.replace('!', "-not-")
+ line = line.replace('=', "-equals-")
current_feature = line[1:-1]
elif current_feature:
extras_req[current_feature].append(line)
@@ -226,51 +660,16 @@ class PythonRecipeHandler(RecipeHandler):
if license_str:
for i, line in enumerate(lines_before):
- if line.startswith('LICENSE = '):
+ if line.startswith('##LICENSE_PLACEHOLDER##'):
lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
break
if 'Classifier' in info:
- existing_licenses = info.get('License', '')
- licenses = []
- for classifier in info['Classifier']:
- if classifier in self.classifier_license_map:
- license = self.classifier_license_map[classifier]
- if license == 'Apache' and 'Apache-2.0' in existing_licenses:
- license = 'Apache-2.0'
- elif license == 'GPL':
- if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
- license = 'GPL-2.0'
- elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
- license = 'GPL-3.0'
- elif license == 'LGPL':
- if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
- license = 'LGPL-2.1'
- elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
- license = 'LGPL-2.0'
- elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
- license = 'LGPL-3.0'
- licenses.append(license)
-
- if licenses:
- info['License'] = ' & '.join(licenses)
+ license = self.handle_classifier_license(info['Classifier'], info.get('License', ''))
+ if license:
+ info['License'] = license
- # Map PKG-INFO & setup.py fields to bitbake variables
- for field, values in info.items():
- if field in self.excluded_fields:
- continue
-
- if field not in self.bbvar_map:
- continue
-
- if isinstance(values, str):
- value = values
- else:
- value = ' '.join(str(v) for v in values if v)
-
- bbvar = self.bbvar_map[field]
- if bbvar not in extravalues and value:
- extravalues[bbvar] = value
+ self.map_info_to_bbvar(info, extravalues)
mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
@@ -281,6 +680,7 @@ class PythonRecipeHandler(RecipeHandler):
lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('# The configs are might not correct, since PACKAGECONFIG does not support expressions as may used in requires.txt - they are just replaced by text.')
lines_after.append('#')
lines_after.append('# Uncomment this line to enable all the optional features.')
lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
@@ -301,7 +701,7 @@ class PythonRecipeHandler(RecipeHandler):
inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
if mapped_deps:
name = info.get('Name')
@@ -313,7 +713,7 @@ class PythonRecipeHandler(RecipeHandler):
lines_after.append('')
lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
lines_after.append('# python sources, and might not be 100% accurate.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
unmapped_deps -= set(extensions)
unmapped_deps -= set(self.assume_provided)
@@ -326,275 +726,283 @@ class PythonRecipeHandler(RecipeHandler):
handled.append('buildsystem')
- def get_pkginfo(self, pkginfo_fn):
- msg = email.message_from_file(open(pkginfo_fn, 'r'))
- msginfo = {}
- for field in msg.keys():
- values = msg.get_all(field)
- if len(values) == 1:
- msginfo[field] = values[0]
- else:
- msginfo[field] = values
- return msginfo
+class PythonPyprojectTomlRecipeHandler(PythonRecipeHandler):
+ """Base class to support PEP517 and PEP518
+
+ PEP517 https://peps.python.org/pep-0517/#source-trees
+ PEP518 https://peps.python.org/pep-0518/#build-system-table
+ """
+ # bitbake currently supports the 4 following backends
+ build_backend_map = {
+ "setuptools.build_meta": "python_setuptools_build_meta",
+ "poetry.core.masonry.api": "python_poetry_core",
+ "flit_core.buildapi": "python_flit_core",
+ "hatchling.build": "python_hatchling",
+ "maturin": "python_maturin",
+ "mesonpy": "python_mesonpy",
+ }
- def parse_setup_py(self, setupscript='./setup.py'):
- with codecs.open(setupscript) as f:
- info, imported_modules, non_literals, extensions = gather_setup_info(f)
+ # setuptools.build_meta and flit declare project metadata into the "project" section of pyproject.toml
+ # according to PEP-621: https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#declaring-project-metadata
+ # while poetry uses the "tool.poetry" section according to its official documentation: https://python-poetry.org/docs/pyproject/
+ # keys from "project" and "tool.poetry" sections are almost the same except for the HOMEPAGE which is "homepage" for tool.poetry
+ # and "Homepage" for "project" section. So keep both
+ bbvar_map = {
+ "name": "PN",
+ "version": "PV",
+ "Homepage": "HOMEPAGE",
+ "homepage": "HOMEPAGE",
+ "description": "SUMMARY",
+ "license": "LICENSE",
+ "dependencies": "RDEPENDS:${PN}",
+ "requires": "DEPENDS",
+ }
- def _map(key):
- key = key.replace('_', '-')
- key = key[0].upper() + key[1:]
- if key in self.setup_parse_map:
- key = self.setup_parse_map[key]
- return key
+ replacements = [
+ ("license", r" +$", ""),
+ ("license", r"^ +", ""),
+ ("license", r" ", "-"),
+ ("license", r"^GNU-", ""),
+ ("license", r"-[Ll]icen[cs]e(,?-[Vv]ersion)?", ""),
+ ("license", r"^UNKNOWN$", ""),
+ # Remove currently unhandled version numbers from these variables
+ ("requires", r"\[[^\]]+\]$", ""),
+ ("requires", r"^([^><= ]+).*", r"\1"),
+ ("dependencies", r"\[[^\]]+\]$", ""),
+ ("dependencies", r"^([^><= ]+).*", r"\1"),
+ ]
- # Naive mapping of setup() arguments to PKG-INFO field names
- for d in [info, non_literals]:
- for key, value in list(d.items()):
- if key is None:
- continue
- new_key = _map(key)
- if new_key != key:
- del d[key]
- d[new_key] = value
+ excluded_native_pkgdeps = [
+ # already provided by python_setuptools_build_meta.bbclass
+ "python3-setuptools-native",
+ "python3-wheel-native",
+ # already provided by python_poetry_core.bbclass
+ "python3-poetry-core-native",
+ # already provided by python_flit_core.bbclass
+ "python3-flit-core-native",
+ # already provided by python_mesonpy
+ "python3-meson-python-native",
+ ]
- return info, 'setuptools' in imported_modules, non_literals, extensions
+ # add here a list of known and often used packages and the corresponding bitbake package
+ known_deps_map = {
+ "setuptools": "python3-setuptools",
+ "wheel": "python3-wheel",
+ "poetry-core": "python3-poetry-core",
+ "flit_core": "python3-flit-core",
+ "setuptools-scm": "python3-setuptools-scm",
+ "hatchling": "python3-hatchling",
+ "hatch-vcs": "python3-hatch-vcs",
+ "meson-python" : "python3-meson-python",
+ }
- def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python3', setupscript]
- info = {}
- keys = set(self.bbvar_map.keys())
- keys |= set(self.setuparg_list_fields)
- keys |= set(self.setuparg_multi_line_values)
- grouped_keys = itertools.groupby(keys, lambda k: (k in self.setuparg_list_fields, k in self.setuparg_multi_line_values))
- for index, keys in grouped_keys:
- if index == (True, False):
- # Splitlines output for each arg as a list value
- for key in keys:
- arg = self.setuparg_map.get(key, key.lower())
- try:
- arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- info[key] = [l.rstrip() for l in arg_info.splitlines()]
- elif index == (False, True):
- # Entire output for each arg
- for key in keys:
- arg = self.setuparg_map.get(key, key.lower())
- try:
- arg_info = self.run_command(cmd + ['--' + arg], cwd=os.path.dirname(setupscript))
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- info[key] = arg_info
- else:
- info.update(self.get_setup_byline(list(keys), setupscript))
- return info
+ def __init__(self):
+ pass
- def get_setup_byline(self, fields, setupscript='./setup.py'):
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
info = {}
+ metadata = {}
- cmd = ['python3', setupscript]
- cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
- try:
- info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- if len(fields) != len(info_lines):
- logger.error('Mismatch between setup.py output lines and number of fields')
- sys.exit(1)
-
- for lineno, line in enumerate(info_lines):
- line = line.rstrip()
- info[fields[lineno]] = line
- return info
-
- def apply_info_replacements(self, info):
- for variable, search, replace in self.replacements:
- if variable not in info:
- continue
-
- def replace_value(search, replace, value):
- if replace is None:
- if re.search(search, value):
- return None
- else:
- new_value = re.sub(search, replace, value)
- if value != new_value:
- return new_value
- return value
-
- value = info[variable]
- if isinstance(value, str):
- new_value = replace_value(search, replace, value)
- if new_value is None:
- del info[variable]
- elif new_value != value:
- info[variable] = new_value
- elif hasattr(value, 'items'):
- for dkey, dvalue in list(value.items()):
- new_list = []
- for pos, a_value in enumerate(dvalue):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- value[dkey] = new_list
- else:
- new_list = []
- for pos, a_value in enumerate(value):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- info[variable] = new_list
-
- def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
- if 'Package-dir' in setup_info:
- package_dir = setup_info['Package-dir']
- else:
- package_dir = {}
-
- class PackageDir(distutils.command.build_py.build_py):
- def __init__(self, package_dir):
- self.package_dir = package_dir
-
- pd = PackageDir(package_dir)
- to_scan = []
- if not any(v in setup_non_literals for v in ['Py-modules', 'Scripts', 'Packages']):
- if 'Py-modules' in setup_info:
- for module in setup_info['Py-modules']:
- try:
- package, module = module.rsplit('.', 1)
- except ValueError:
- package, module = '.', module
- module_path = os.path.join(pd.get_package_dir(package), module + '.py')
- to_scan.append(module_path)
+ if 'buildsystem' in handled:
+ return False
- if 'Packages' in setup_info:
- for package in setup_info['Packages']:
- to_scan.append(pd.get_package_dir(package))
+ logger.debug("Trying pyproject.toml parser")
- if 'Scripts' in setup_info:
- to_scan.extend(setup_info['Scripts'])
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ["pyproject.toml"])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
else:
- logger.info("Scanning the entire source tree, as one or more of the following setup keywords are non-literal: py_modules, scripts, packages.")
-
- if not to_scan:
- to_scan = ['.']
-
- logger.info("Scanning paths for packages & dependencies: %s", ', '.join(to_scan))
+ logger.debug("No pyproject.toml found")
+ return False
- provided_packages = self.parse_pkgdata_for_python_packages()
- scanned_deps = self.scan_python_dependencies([os.path.join(srctree, p) for p in to_scan])
- mapped_deps, unmapped_deps = set(self.base_pkgdeps), set()
- for dep in scanned_deps:
- mapped = provided_packages.get(dep)
- if mapped:
- logger.debug('Mapped %s to %s' % (dep, mapped))
- mapped_deps.add(mapped)
- else:
- logger.debug('Could not map %s' % dep)
- unmapped_deps.add(dep)
- return mapped_deps, unmapped_deps
+ setupscript = os.path.join(srctree, "pyproject.toml")
- def scan_python_dependencies(self, paths):
- deps = set()
try:
- dep_output = self.run_command(['pythondeps', '-d'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- for line in dep_output.splitlines():
- line = line.rstrip()
- dep, filename = line.split('\t', 1)
- if filename.endswith('/setup.py'):
- continue
- deps.add(dep)
+ try:
+ import tomllib
+ except ImportError:
+ try:
+ import tomli as tomllib
+ except ImportError:
+ logger.error("Neither 'tomllib' nor 'tomli' could be imported, cannot scan pyproject.toml.")
+ return False
+
+ try:
+ with open(setupscript, "rb") as f:
+ config = tomllib.load(f)
+ except Exception:
+ logger.exception("Failed to parse pyproject.toml")
+ return False
+
+ build_backend = config["build-system"]["build-backend"]
+ if build_backend in self.build_backend_map:
+ classes.append(self.build_backend_map[build_backend])
+ else:
+ logger.error(
+ "Unsupported build-backend: %s, cannot use pyproject.toml. Will try to use legacy setup.py"
+ % build_backend
+ )
+ return False
- try:
- provides_output = self.run_command(['pythondeps', '-p'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
- else:
- provides_lines = (l.rstrip() for l in provides_output.splitlines())
- provides = set(l for l in provides_lines if l and l != 'setup')
- deps -= provides
+ licfile = ""
- return deps
+ if build_backend == "poetry.core.masonry.api":
+ if "tool" in config and "poetry" in config["tool"]:
+ metadata = config["tool"]["poetry"]
+ else:
+ if "project" in config:
+ metadata = config["project"]
+
+ if metadata:
+ for field, values in metadata.items():
+ if field == "license":
+ # For setuptools.build_meta and flit, licence is a table
+ # but for poetry licence is a string
+ # for hatchling, both table (jsonschema) and string (iniconfig) have been used
+ if build_backend == "poetry.core.masonry.api":
+ value = values
+ else:
+ value = values.get("text", "")
+ if not value:
+ licfile = values.get("file", "")
+ continue
+ elif field == "dependencies" and build_backend == "poetry.core.masonry.api":
+ # For poetry backend, "dependencies" section looks like:
+ # [tool.poetry.dependencies]
+ # requests = "^2.13.0"
+ # requests = { version = "^2.13.0", source = "private" }
+ # See https://python-poetry.org/docs/master/pyproject/#dependencies-and-dependency-groups for more details
+ # This class doesn't handle versions anyway, so we just get the dependencies name here and construct a list
+ value = []
+ for k in values.keys():
+ value.append(k)
+ elif isinstance(values, dict):
+ for k, v in values.items():
+ info[k] = v
+ continue
+ else:
+ value = values
- def parse_pkgdata_for_python_packages(self):
- suffixes = [t[0] for t in imp.get_suffixes()]
- pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ info[field] = value
- ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python3-dir.bbclass', ldata, True)
- python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+ # Grab the license value before applying replacements
+ license_str = info.get("license", "").strip()
- dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
- python_dirs = [python_sitedir + os.sep,
- os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
- os.path.dirname(python_sitedir) + os.sep]
- packages = {}
- for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
- files_info = None
- with open(pkgdatafile, 'r') as f:
- for line in f.readlines():
- field, value = line.split(': ', 1)
- if field == 'FILES_INFO':
- files_info = ast.literal_eval(value)
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith("##LICENSE_PLACEHOLDER##"):
+ lines_before.insert(
+ i, "# NOTE: License in pyproject.toml is: %s" % license_str
+ )
break
- else:
- continue
- for fn in files_info:
- for suffix in suffixes:
- if fn.endswith(suffix):
- break
- else:
- continue
+ info["requires"] = config["build-system"]["requires"]
+
+ self.apply_info_replacements(info)
+
+ if "classifiers" in info:
+ license = self.handle_classifier_license(
+ info["classifiers"], info.get("license", "")
+ )
+ if license:
+ if licfile:
+ lines = []
+ md5value = bb.utils.md5_file(os.path.join(srctree, licfile))
+ lines.append('LICENSE = "%s"' % license)
+ lines.append(
+ 'LIC_FILES_CHKSUM = "file://%s;md5=%s"'
+ % (licfile, md5value)
+ )
+ lines.append("")
+
+ # Replace the placeholder so we get the values in the right place in the recipe file
+ try:
+ pos = lines_before.index("##LICENSE_PLACEHOLDER##")
+ except ValueError:
+ pos = -1
+ if pos == -1:
+ lines_before.extend(lines)
+ else:
+ lines_before[pos : pos + 1] = lines
- if fn.startswith(dynload_dir + os.sep):
- if '/.debug/' in fn:
- continue
- base = os.path.basename(fn)
- provided = base.split('.', 1)[0]
- packages[provided] = os.path.basename(pkgdatafile)
- continue
+ handled.append(("license", [license, licfile, md5value]))
+ else:
+ info["license"] = license
- for python_dir in python_dirs:
- if fn.startswith(python_dir):
- relpath = fn[len(python_dir):]
- relstart, _, relremaining = relpath.partition(os.sep)
- if relstart.endswith('.egg'):
- relpath = relremaining
- base, _ = os.path.splitext(relpath)
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ provided_packages.update(self.known_deps_map)
+ native_mapped_deps, native_unmapped_deps = set(), set()
+ mapped_deps, unmapped_deps = set(), set()
- if '/.debug/' in base:
- continue
- if os.path.basename(base) == '__init__':
- base = os.path.dirname(base)
- base = base.replace(os.sep + os.sep, os.sep)
- provided = base.replace(os.sep, '.')
- packages[provided] = os.path.basename(pkgdatafile)
- return packages
+ if "requires" in info:
+ for require in info["requires"]:
+ mapped = provided_packages.get(require)
- @classmethod
- def run_command(cls, cmd, **popenargs):
- if 'stderr' not in popenargs:
- popenargs['stderr'] = subprocess.STDOUT
- try:
- return subprocess.check_output(cmd, **popenargs).decode('utf-8')
- except OSError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
- raise
- except subprocess.CalledProcessError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
- raise
+ if mapped:
+ logger.debug("Mapped %s to %s" % (require, mapped))
+ native_mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % require)
+ native_unmapped_deps.add(require)
+
+ info.pop("requires")
+
+ if native_mapped_deps != set():
+ native_mapped_deps = {
+ item + "-native" for item in native_mapped_deps
+ }
+ native_mapped_deps -= set(self.excluded_native_pkgdeps)
+ if native_mapped_deps != set():
+ info["requires"] = " ".join(sorted(native_mapped_deps))
+
+ if native_unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(native_unmapped_deps)
+ )
+
+ if "dependencies" in info:
+ for dependency in info["dependencies"]:
+ mapped = provided_packages.get(dependency)
+ if mapped:
+ logger.debug("Mapped %s to %s" % (dependency, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % dependency)
+ unmapped_deps.add(dependency)
+
+ info.pop("dependencies")
+
+ if mapped_deps != set():
+ if mapped_deps != set():
+ info["dependencies"] = " ".join(sorted(mapped_deps))
+
+ if unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# runtime dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(unmapped_deps)
+ )
+
+ self.map_info_to_bbvar(info, extravalues)
+
+ handled.append("buildsystem")
+ except Exception:
+ logger.exception("Failed to correctly handle pyproject.toml, falling back to another method")
+ return False
def gather_setup_info(fileobj):
@@ -710,5 +1118,7 @@ def has_non_literals(value):
def register_recipe_handlers(handlers):
- # We need to make sure this is ahead of the makefile fallback handler
- handlers.append((PythonRecipeHandler(), 70))
+ # We need to make sure these are ahead of the makefile fallback handler
+ # and the pyproject.toml handler ahead of the setup.py handler
+ handlers.append((PythonPyprojectTomlRecipeHandler(), 75))
+ handlers.append((PythonSetupPyRecipeHandler(), 70))
diff --git a/scripts/lib/recipetool/create_go.py b/scripts/lib/recipetool/create_go.py
new file mode 100644
index 0000000000..c560831442
--- /dev/null
+++ b/scripts/lib/recipetool/create_go.py
@@ -0,0 +1,779 @@
+# Recipe creation tool - go support plugin
+#
+# The code is based on golang internals. See the afftected
+# methods for further reference and information.
+#
+# Copyright (C) 2023 Weidmueller GmbH & Co KG
+# Author: Lukas Funke <lukas.funke@weidmueller.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+
+from collections import namedtuple
+from enum import Enum
+from html.parser import HTMLParser
+from recipetool.create import RecipeHandler, handle_license_vars
+from recipetool.create import guess_license, tidy_licenses, fixup_license
+from recipetool.create import determine_from_url
+from urllib.error import URLError
+
+import bb.utils
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+import shutil
+import tempfile
+import urllib.parse
+import urllib.request
+
+
+GoImport = namedtuple('GoImport', 'root vcs url suffix')
+logger = logging.getLogger('recipetool')
+CodeRepo = namedtuple(
+ 'CodeRepo', 'path codeRoot codeDir pathMajor pathPrefix pseudoMajor')
+
+tinfoil = None
+
+# Regular expression to parse pseudo semantic version
+# see https://go.dev/ref/mod#pseudo-versions
+re_pseudo_semver = re.compile(
+ r"^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)(?P<utc>\d{14})-(?P<commithash>[A-Za-z0-9]+)(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$")
+# Regular expression to parse semantic version
+re_semver = re.compile(
+ r"^v(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$")
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class GoRecipeHandler(RecipeHandler):
+ """Class to handle the go recipe creation"""
+
+ @staticmethod
+ def __ensure_go():
+ """Check if the 'go' command is available in the recipes"""
+ recipe = "go-native"
+ if not tinfoil.recipes_parsed:
+ tinfoil.parse_recipes()
+ try:
+ rd = tinfoil.parse_recipe(recipe)
+ except bb.providers.NoProvider:
+ bb.error(
+ "Nothing provides '%s' which is required for the build" % (recipe))
+ bb.note(
+ "You will likely need to add a layer that provides '%s'" % (recipe))
+ return None
+
+ bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+ gopath = os.path.join(bindir, 'go')
+
+ if not os.path.exists(gopath):
+ tinfoil.build_targets(recipe, 'addto_recipe_sysroot')
+
+ if not os.path.exists(gopath):
+ logger.error(
+ '%s required to process specified source, but %s did not seem to populate it' % 'go', recipe)
+ return None
+
+ return bindir
+
+ def __resolve_repository_static(self, modulepath):
+ """Resolve the repository in a static manner
+
+ The method is based on the go implementation of
+ `repoRootFromVCSPaths` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+
+ url = urllib.parse.urlparse("https://" + modulepath)
+ req = urllib.request.Request(url.geturl())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ # Some modulepath are just redirects to github (or some other vcs
+ # hoster). Therefore, we check if this modulepath redirects to
+ # somewhere else
+ if resp.geturl() != url.geturl():
+ bb.debug(1, "%s is redirectred to %s" %
+ (url.geturl(), resp.geturl()))
+ url = urllib.parse.urlparse(resp.geturl())
+ modulepath = url.netloc + url.path
+
+ except URLError as url_err:
+ # This is probably because the module path
+ # contains the subdir and major path. Thus,
+ # we ignore this error for now
+ logger.debug(
+ 1, "Failed to fetch page from [%s]: %s" % (url, str(url_err)))
+
+ host, _, _ = modulepath.partition('/')
+
+ class vcs(Enum):
+ pathprefix = "pathprefix"
+ regexp = "regexp"
+ type = "type"
+ repo = "repo"
+ check = "check"
+ schemelessRepo = "schemelessRepo"
+
+ # GitHub
+ vcsGitHub = {}
+ vcsGitHub[vcs.pathprefix] = "github.com"
+ vcsGitHub[vcs.regexp] = re.compile(
+ r'^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsGitHub[vcs.type] = "git"
+ vcsGitHub[vcs.repo] = "https://\\g<root>"
+
+ # Bitbucket
+ vcsBitbucket = {}
+ vcsBitbucket[vcs.pathprefix] = "bitbucket.org"
+ vcsBitbucket[vcs.regexp] = re.compile(
+ r'^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsBitbucket[vcs.type] = "git"
+ vcsBitbucket[vcs.repo] = "https://\\g<root>"
+
+ # IBM DevOps Services (JazzHub)
+ vcsIBMDevOps = {}
+ vcsIBMDevOps[vcs.pathprefix] = "hub.jazz.net/git"
+ vcsIBMDevOps[vcs.regexp] = re.compile(
+ r'^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsIBMDevOps[vcs.type] = "git"
+ vcsIBMDevOps[vcs.repo] = "https://\\g<root>"
+
+ # Git at Apache
+ vcsApacheGit = {}
+ vcsApacheGit[vcs.pathprefix] = "git.apache.org"
+ vcsApacheGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsApacheGit[vcs.type] = "git"
+ vcsApacheGit[vcs.repo] = "https://\\g<root>"
+
+ # Git at OpenStack
+ vcsOpenStackGit = {}
+ vcsOpenStackGit[vcs.pathprefix] = "git.openstack.org"
+ vcsOpenStackGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsOpenStackGit[vcs.type] = "git"
+ vcsOpenStackGit[vcs.repo] = "https://\\g<root>"
+
+ # chiselapp.com for fossil
+ vcsChiselapp = {}
+ vcsChiselapp[vcs.pathprefix] = "chiselapp.com"
+ vcsChiselapp[vcs.regexp] = re.compile(
+ r'^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$')
+ vcsChiselapp[vcs.type] = "fossil"
+ vcsChiselapp[vcs.repo] = "https://\\g<root>"
+
+ # General syntax for any server.
+ # Must be last.
+ vcsGeneralServer = {}
+ vcsGeneralServer[vcs.regexp] = re.compile(
+ "(?P<root>(?P<repo>([a-z0-9.\\-]+\\.)+[a-z0-9.\\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\\-]+)+?)\\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?(?P<suffix>[A-Za-z0-9_.\\-]+))*$")
+ vcsGeneralServer[vcs.schemelessRepo] = True
+
+ vcsPaths = [vcsGitHub, vcsBitbucket, vcsIBMDevOps,
+ vcsApacheGit, vcsOpenStackGit, vcsChiselapp,
+ vcsGeneralServer]
+
+ if modulepath.startswith("example.net") or modulepath == "rsc.io":
+ logger.warning("Suspicious module path %s" % modulepath)
+ return None
+ if modulepath.startswith("http:") or modulepath.startswith("https:"):
+ logger.warning("Import path should not start with %s %s" %
+ ("http", "https"))
+ return None
+
+ rootpath = None
+ vcstype = None
+ repourl = None
+ suffix = None
+
+ for srv in vcsPaths:
+ m = srv[vcs.regexp].match(modulepath)
+ if vcs.pathprefix in srv:
+ if host == srv[vcs.pathprefix]:
+ rootpath = m.group('root')
+ vcstype = srv[vcs.type]
+ repourl = m.expand(srv[vcs.repo])
+ suffix = m.group('suffix')
+ break
+ elif m and srv[vcs.schemelessRepo]:
+ rootpath = m.group('root')
+ vcstype = m[vcs.type]
+ repourl = m[vcs.repo]
+ suffix = m.group('suffix')
+ break
+
+ return GoImport(rootpath, vcstype, repourl, suffix)
+
+ def __resolve_repository_dynamic(self, modulepath):
+ """Resolve the repository root in a dynamic manner.
+
+ The method is based on the go implementation of
+ `repoRootForImportDynamic` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+ url = urllib.parse.urlparse("https://" + modulepath)
+
+ class GoImportHTMLParser(HTMLParser):
+
+ def __init__(self):
+ super().__init__()
+ self.__srv = []
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'meta' and list(
+ filter(lambda a: (a[0] == 'name' and a[1] == 'go-import'), attrs)):
+ content = list(
+ filter(lambda a: (a[0] == 'content'), attrs))
+ if content:
+ self.__srv = content[0][1].split()
+
+ @property
+ def import_prefix(self):
+ return self.__srv[0] if len(self.__srv) else None
+
+ @property
+ def vcs(self):
+ return self.__srv[1] if len(self.__srv) else None
+
+ @property
+ def repourl(self):
+ return self.__srv[2] if len(self.__srv) else None
+
+ url = url.geturl() + "?go-get=1"
+ req = urllib.request.Request(url)
+
+ try:
+ resp = urllib.request.urlopen(req)
+
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ parser = GoImportHTMLParser()
+ parser.feed(resp.read().decode('utf-8'))
+ parser.close()
+
+ return GoImport(parser.import_prefix, parser.vcs, parser.repourl, None)
+
+ def __resolve_from_golang_proxy(self, modulepath, version):
+ """
+ Resolves repository data from golang proxy
+ """
+ url = urllib.parse.urlparse("https://proxy.golang.org/"
+ + modulepath
+ + "/@v/"
+ + version
+ + ".info")
+
+ # Transform url to lower case, golang proxy doesn't like mixed case
+ req = urllib.request.Request(url.geturl().lower())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ golang_proxy_res = resp.read().decode('utf-8')
+ modinfo = json.loads(golang_proxy_res)
+
+ if modinfo and 'Origin' in modinfo:
+ origin = modinfo['Origin']
+ _root_url = urllib.parse.urlparse(origin['URL'])
+
+ # We normalize the repo URL since we don't want the scheme in it
+ _subdir = origin['Subdir'] if 'Subdir' in origin else None
+ _root, _, _ = self.__split_path_version(modulepath)
+ if _subdir:
+ _root = _root[:-len(_subdir)].strip('/')
+
+ _commit = origin['Hash']
+ _vcs = origin['VCS']
+ return (GoImport(_root, _vcs, _root_url.geturl(), None), _commit)
+
+ return None
+
+ def __resolve_repository(self, modulepath):
+ """
+ Resolves src uri from go module-path
+ """
+ repodata = self.__resolve_repository_static(modulepath)
+ if not repodata or not repodata.url:
+ repodata = self.__resolve_repository_dynamic(modulepath)
+ if not repodata or not repodata.url:
+ logger.error(
+ "Could not resolve repository for module path '%s'" % modulepath)
+ # There is no way to recover from this
+ sys.exit(14)
+ if repodata:
+ logger.debug(1, "Resolved download path for import '%s' => %s" % (
+ modulepath, repodata.url))
+ return repodata
+
+ def __split_path_version(self, path):
+ i = len(path)
+ dot = False
+ for j in range(i, 0, -1):
+ if path[j - 1] < '0' or path[j - 1] > '9':
+ break
+ if path[j - 1] == '.':
+ dot = True
+ break
+ i = j - 1
+
+ if i <= 1 or i == len(
+ path) or path[i - 1] != 'v' or path[i - 2] != '/':
+ return path, "", True
+
+ prefix, pathMajor = path[:i - 2], path[i - 2:]
+ if dot or len(
+ pathMajor) <= 2 or pathMajor[2] == '0' or pathMajor == "/v1":
+ return path, "", False
+
+ return prefix, pathMajor, True
+
+ def __get_path_major(self, pathMajor):
+ if not pathMajor:
+ return ""
+
+ if pathMajor[0] != '/' and pathMajor[0] != '.':
+ logger.error(
+ "pathMajor suffix %s passed to PathMajorPrefix lacks separator", pathMajor)
+
+ if pathMajor.startswith(".v") and pathMajor.endswith("-unstable"):
+ pathMajor = pathMajor[:len("-unstable") - 2]
+
+ return pathMajor[1:]
+
+ def __build_coderepo(self, repo, path):
+ codedir = ""
+ pathprefix, pathMajor, _ = self.__split_path_version(path)
+ if repo.root == path:
+ pathprefix = path
+ elif path.startswith(repo.root):
+ codedir = pathprefix[len(repo.root):].strip('/')
+
+ pseudoMajor = self.__get_path_major(pathMajor)
+
+ logger.debug("root='%s', codedir='%s', prefix='%s', pathMajor='%s', pseudoMajor='%s'",
+ repo.root, codedir, pathprefix, pathMajor, pseudoMajor)
+
+ return CodeRepo(path, repo.root, codedir,
+ pathMajor, pathprefix, pseudoMajor)
+
+ def __resolve_version(self, repo, path, version):
+ hash = None
+ coderoot = self.__build_coderepo(repo, path)
+
+ def vcs_fetch_all():
+ tmpdir = tempfile.mkdtemp()
+ clone_cmd = "%s clone --bare %s %s" % ('git', repo.url, tmpdir)
+ bb.process.run(clone_cmd)
+ log_cmd = "git log --all --pretty='%H %d' --decorate=short"
+ output, _ = bb.process.run(
+ log_cmd, shell=True, stderr=subprocess.PIPE, cwd=tmpdir)
+ bb.utils.prunedir(tmpdir)
+ return output.strip().split('\n')
+
+ def vcs_fetch_remote(tag):
+ # add * to grab ^{}
+ refs = {}
+ ls_remote_cmd = "git ls-remote -q --tags {} {}*".format(
+ repo.url, tag)
+ output, _ = bb.process.run(ls_remote_cmd)
+ output = output.strip().split('\n')
+ for line in output:
+ f = line.split(maxsplit=1)
+ if len(f) != 2:
+ continue
+
+ for prefix in ["HEAD", "refs/heads/", "refs/tags/"]:
+ if f[1].startswith(prefix):
+ refs[f[1][len(prefix):]] = f[0]
+
+ for key, hash in refs.items():
+ if key.endswith(r"^{}"):
+ refs[key.strip(r"^{}")] = hash
+
+ return refs[tag]
+
+ m_pseudo_semver = re_pseudo_semver.match(version)
+
+ if m_pseudo_semver:
+ remote_refs = vcs_fetch_all()
+ short_commit = m_pseudo_semver.group('commithash')
+ for l in remote_refs:
+ r = l.split(maxsplit=1)
+ sha1 = r[0] if len(r) else None
+ if not sha1:
+ logger.error(
+ "Ups: could not resolve abbref commit for %s" % short_commit)
+
+ elif sha1.startswith(short_commit):
+ hash = sha1
+ break
+ else:
+ m_semver = re_semver.match(version)
+ if m_semver:
+
+ def get_sha1_remote(re):
+ rsha1 = None
+ for line in remote_refs:
+ # Split lines of the following format:
+ # 22e90d9b964610628c10f673ca5f85b8c2a2ca9a (tag: sometag)
+ lineparts = line.split(maxsplit=1)
+ sha1 = lineparts[0] if len(lineparts) else None
+ refstring = lineparts[1] if len(
+ lineparts) == 2 else None
+ if refstring:
+ # Normalize tag string and split in case of multiple
+ # regs e.g. (tag: speech/v1.10.0, tag: orchestration/v1.5.0 ...)
+ refs = refstring.strip('(), ').split(',')
+ for ref in refs:
+ if re.match(ref.strip()):
+ rsha1 = sha1
+ return rsha1
+
+ semver = "v" + m_semver.group('major') + "."\
+ + m_semver.group('minor') + "."\
+ + m_semver.group('patch') \
+ + (("-" + m_semver.group('prerelease'))
+ if m_semver.group('prerelease') else "")
+
+ tag = os.path.join(
+ coderoot.codeDir, semver) if coderoot.codeDir else semver
+
+ # probe tag using 'ls-remote', which is faster than fetching
+ # complete history
+ hash = vcs_fetch_remote(tag)
+ if not hash:
+ # backup: fetch complete history
+ remote_refs = vcs_fetch_all()
+ hash = get_sha1_remote(
+ re.compile(fr"(tag:|HEAD ->) ({tag})"))
+
+ logger.debug(
+ "Resolving commit for tag '%s' -> '%s'", tag, hash)
+ return hash
+
+ def __generate_srcuri_inline_fcn(self, path, version, replaces=None):
+ """Generate SRC_URI functions for go imports"""
+
+ logger.info("Resolving repository for module %s", path)
+ # First try to resolve repo and commit from golang proxy
+ # Most info is already there and we don't have to go through the
+ # repository or even perform the version resolve magic
+ golang_proxy_info = self.__resolve_from_golang_proxy(path, version)
+ if golang_proxy_info:
+ repo = golang_proxy_info[0]
+ commit = golang_proxy_info[1]
+ else:
+ # Fallback
+ # Resolve repository by 'hand'
+ repo = self.__resolve_repository(path)
+ commit = self.__resolve_version(repo, path, version)
+
+ url = urllib.parse.urlparse(repo.url)
+ repo_url = url.netloc + url.path
+
+ coderoot = self.__build_coderepo(repo, path)
+
+ inline_fcn = "${@go_src_uri("
+ inline_fcn += f"'{repo_url}','{version}'"
+ if repo_url != path:
+ inline_fcn += f",path='{path}'"
+ if coderoot.codeDir:
+ inline_fcn += f",subdir='{coderoot.codeDir}'"
+ if repo.vcs != 'git':
+ inline_fcn += f",vcs='{repo.vcs}'"
+ if replaces:
+ inline_fcn += f",replaces='{replaces}'"
+ if coderoot.pathMajor:
+ inline_fcn += f",pathmajor='{coderoot.pathMajor}'"
+ inline_fcn += ")}"
+
+ return inline_fcn, commit
+
+ def __go_handle_dependencies(self, go_mod, srctree, localfilesdir, extravalues, d):
+
+ import re
+ src_uris = []
+ src_revs = []
+
+ def generate_src_rev(path, version, commithash):
+ src_rev = f"# {path}@{version} => {commithash}\n"
+ # Ups...maybe someone manipulated the source repository and the
+ # version or commit could not be resolved. This is a sign of
+ # a) the supply chain was manipulated (bad)
+ # b) the implementation for the version resolving didn't work
+ # anymore (less bad)
+ if not commithash:
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"#!!! Could not resolve version !!!\n"
+ src_rev += f"#!!! Possible supply chain attack !!!\n"
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"SRCREV_{path.replace('/', '.')} = \"{commithash}\""
+
+ return src_rev
+
+ # we first go over replacement list, because we are essentialy
+ # interested only in the replaced path
+ if go_mod['Replace']:
+ for replacement in go_mod['Replace']:
+ oldpath = replacement['Old']['Path']
+ path = replacement['New']['Path']
+ version = ''
+ if 'Version' in replacement['New']:
+ version = replacement['New']['Version']
+
+ if os.path.exists(os.path.join(srctree, path)):
+ # the module refers to the local path, remove it from requirement list
+ # because it's a local module
+ go_mod['Require'][:] = [v for v in go_mod['Require'] if v.get('Path') != oldpath]
+ else:
+ # Replace the path and the version, so we don't iterate replacement list anymore
+ for require in go_mod['Require']:
+ if require['Path'] == oldpath:
+ require.update({'Path': path, 'Version': version})
+ break
+
+ for require in go_mod['Require']:
+ path = require['Path']
+ version = require['Version']
+
+ inline_fcn, commithash = self.__generate_srcuri_inline_fcn(
+ path, version)
+ src_uris.append(inline_fcn)
+ src_revs.append(generate_src_rev(path, version, commithash))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ go_mods_basename = "%s-modules.inc" % pn
+
+ go_mods_filename = os.path.join(localfilesdir, go_mods_basename)
+ with open(go_mods_filename, "w") as f:
+ # We introduce this indirection to make the tests a little easier
+ f.write("SRC_URI += \"${GO_DEPENDENCIES_SRC_URI}\"\n")
+ f.write("GO_DEPENDENCIES_SRC_URI = \"\\\n")
+ for uri in src_uris:
+ f.write(" " + uri + " \\\n")
+ f.write("\"\n\n")
+ for rev in src_revs:
+ f.write(rev + "\n")
+
+ extravalues['extrafiles'][go_mods_basename] = go_mods_filename
+
+ def __go_run_cmd(self, cmd, cwd, d):
+ return bb.process.run(cmd, env=dict(os.environ, PATH=d.getVar('PATH')),
+ shell=True, cwd=cwd)
+
+ def __go_native_version(self, d):
+ stdout, _ = self.__go_run_cmd("go version", None, d)
+ m = re.match(r".*\sgo((\d+).(\d+).(\d+))\s([\w\/]*)", stdout)
+ major = int(m.group(2))
+ minor = int(m.group(3))
+ patch = int(m.group(4))
+
+ return major, minor, patch
+
+ def __go_mod_patch(self, srctree, localfilesdir, extravalues, d):
+
+ patchfilename = "go.mod.patch"
+ go_native_version_major, go_native_version_minor, _ = self.__go_native_version(
+ d)
+ self.__go_run_cmd("go mod tidy -go=%d.%d" %
+ (go_native_version_major, go_native_version_minor), srctree, d)
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ # Create patch in order to upgrade go version
+ self.__go_run_cmd("git diff go.mod > %s" % (patchfilename), srctree, d)
+ # Restore original state
+ self.__go_run_cmd("git checkout HEAD go.mod go.sum", srctree, d)
+
+ go_mod = json.loads(stdout)
+ tmpfile = os.path.join(localfilesdir, patchfilename)
+ shutil.move(os.path.join(srctree, patchfilename), tmpfile)
+
+ extravalues['extrafiles'][patchfilename] = tmpfile
+
+ return go_mod, patchfilename
+
+ def __go_mod_vendor(self, go_mod, srctree, localfilesdir, extravalues, d):
+ # Perform vendoring to retrieve the correct modules.txt
+ tmp_vendor_dir = tempfile.mkdtemp()
+
+ # -v causes to go to print modules.txt to stderr
+ _, stderr = self.__go_run_cmd(
+ "go mod vendor -v -o %s" % (tmp_vendor_dir), srctree, d)
+
+ modules_txt_basename = "modules.txt"
+ modules_txt_filename = os.path.join(localfilesdir, modules_txt_basename)
+ with open(modules_txt_filename, "w") as f:
+ f.write(stderr)
+
+ extravalues['extrafiles'][modules_txt_basename] = modules_txt_filename
+
+ licenses = []
+ lic_files_chksum = []
+ licvalues = guess_license(tmp_vendor_dir, d)
+ shutil.rmtree(tmp_vendor_dir)
+
+ if licvalues:
+ for licvalue in licvalues:
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
+ lic_files_chksum.append(
+ 'file://src/${GO_IMPORT}/vendor/%s;md5=%s' % (licvalue[1], licvalue[2]))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ licenses_basename = "%s-licenses.inc" % pn
+
+ licenses_filename = os.path.join(localfilesdir, licenses_basename)
+ with open(licenses_filename, "w") as f:
+ f.write("GO_MOD_LICENSES = \"%s\"\n\n" %
+ ' & '.join(sorted(licenses, key=str.casefold)))
+ # We introduce this indirection to make the tests a little easier
+ f.write("LIC_FILES_CHKSUM += \"${VENDORED_LIC_FILES_CHKSUM}\"\n")
+ f.write("VENDORED_LIC_FILES_CHKSUM = \"\\\n")
+ for lic in lic_files_chksum:
+ f.write(" " + lic + " \\\n")
+ f.write("\"\n")
+
+ extravalues['extrafiles'][licenses_basename] = licenses_filename
+
+ def process(self, srctree, classes, lines_before,
+ lines_after, handled, extravalues):
+
+ if 'buildsystem' in handled:
+ return False
+
+ files = RecipeHandler.checkfiles(srctree, ['go.mod'])
+ if not files:
+ return False
+
+ d = bb.data.createCopy(tinfoil.config_data)
+ go_bindir = self.__ensure_go()
+ if not go_bindir:
+ sys.exit(14)
+
+ d.prependVar('PATH', '%s:' % go_bindir)
+ handled.append('buildsystem')
+ classes.append("go-vendor")
+
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ go_mod = json.loads(stdout)
+ go_import = go_mod['Module']['Path']
+ go_version_match = re.match("([0-9]+).([0-9]+)", go_mod['Go'])
+ go_version_major = int(go_version_match.group(1))
+ go_version_minor = int(go_version_match.group(2))
+ src_uris = []
+
+ localfilesdir = tempfile.mkdtemp(prefix='recipetool-go-')
+ extravalues.setdefault('extrafiles', {})
+
+ # Use an explicit name determined from the module name because it
+ # might differ from the actual URL for replaced modules
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+
+ # go.mod files with version < 1.17 may not include all indirect
+ # dependencies. Thus, we have to upgrade the go version.
+ if go_version_major == 1 and go_version_minor < 17:
+ logger.warning(
+ "go.mod files generated by Go < 1.17 might have incomplete indirect dependencies.")
+ go_mod, patchfilename = self.__go_mod_patch(srctree, localfilesdir,
+ extravalues, d)
+ src_uris.append(
+ "file://%s;patchdir=src/${GO_IMPORT}" % (patchfilename))
+
+ # Check whether the module is vendored. If so, we have nothing to do.
+ # Otherwise we gather all dependencies and add them to the recipe
+ if not os.path.exists(os.path.join(srctree, "vendor")):
+
+ # Write additional $BPN-modules.inc file
+ self.__go_mod_vendor(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("LICENSE += \" & ${GO_MOD_LICENSES}\"")
+ lines_before.append("require %s-licenses.inc" % (pn))
+
+ self.__rewrite_src_uri(lines_before, ["file://modules.txt"])
+
+ self.__go_handle_dependencies(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("require %s-modules.inc" % (pn))
+
+ # Do generic license handling
+ handle_license_vars(srctree, lines_before, handled, extravalues, d)
+ self.__rewrite_lic_uri(lines_before)
+
+ lines_before.append("GO_IMPORT = \"{}\"".format(baseurl))
+ lines_before.append("SRCREV_FORMAT = \"${BPN}\"")
+
+ def __update_lines_before(self, updated, newlines, lines_before):
+ if updated:
+ del lines_before[:]
+ for line in newlines:
+ # Hack to avoid newlines that edit_metadata inserts
+ if line.endswith('\n'):
+ line = line[:-1]
+ lines_before.append(line)
+ return updated
+
+ def __rewrite_lic_uri(self, lines_before):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'LIC_FILES_CHKSUM':
+ new_licenses = []
+ licenses = origvalue.split('\\')
+ for license in licenses:
+ if not license:
+ logger.warning("No license file was detected for the main module!")
+ # the license list of the main recipe must be empty
+ # this can happen for example in case of CLOSED license
+ # Fall through to complete recipe generation
+ continue
+ license = license.strip()
+ uri, chksum = license.split(';', 1)
+ url = urllib.parse.urlparse(uri)
+ new_uri = os.path.join(
+ url.scheme + "://", "src", "${GO_IMPORT}", url.netloc + url.path) + ";" + chksum
+ new_licenses.append(new_uri)
+
+ return new_licenses, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(
+ lines_before, ['LIC_FILES_CHKSUM'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+ def __rewrite_src_uri(self, lines_before, additional_uris = []):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'SRC_URI':
+ src_uri = ["git://${GO_IMPORT};destsuffix=git/src/${GO_IMPORT};nobranch=1;name=${BPN};protocol=https"]
+ src_uri.extend(additional_uris)
+ return src_uri, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+
+def register_recipe_handlers(handlers):
+ handlers.append((GoRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/create_kmod.py b/scripts/lib/recipetool/create_kmod.py
index 85b5c48e53..cc00106961 100644
--- a/scripts/lib/recipetool/create_kmod.py
+++ b/scripts/lib/recipetool/create_kmod.py
@@ -113,7 +113,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
kdirpath, _ = check_target(compile_lines, install=False)
if manual_install or not install_lines:
- lines_after.append('EXTRA_OEMAKE_append_task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
+ lines_after.append('EXTRA_OEMAKE:append:task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
elif install_target and install_target != 'modules_install':
lines_after.append('MODULES_INSTALL_TARGET = "install"')
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
index 579b7ae48a..113a89f6a6 100644
--- a/scripts/lib/recipetool/create_npm.py
+++ b/scripts/lib/recipetool/create_npm.py
@@ -6,16 +6,20 @@
"""Recipe creation tool - npm module support plugin"""
import json
+import logging
import os
import re
import sys
import tempfile
import bb
from bb.fetch2.npm import NpmEnvironment
+from bb.fetch2.npm import npm_package
from bb.fetch2.npmsw import foreach_dependencies
from recipetool.create import RecipeHandler
+from recipetool.create import get_license_md5sums
from recipetool.create import guess_license
from recipetool.create import split_pkg_licenses
+logger = logging.getLogger('recipetool')
TINFOIL = None
@@ -28,15 +32,6 @@ class NpmRecipeHandler(RecipeHandler):
"""Class to handle the npm recipe creation"""
@staticmethod
- def _npm_name(name):
- """Generate a Yocto friendly npm name"""
- name = re.sub("/", "-", name)
- name = name.lower()
- name = re.sub(r"[^\-a-z0-9]", "", name)
- name = name.strip("-")
- return name
-
- @staticmethod
def _get_registry(lines):
"""Get the registry value from the 'npm://registry' url"""
registry = None
@@ -118,23 +113,32 @@ class NpmRecipeHandler(RecipeHandler):
licfiles = []
packages = {}
- def _licfiles_append(licfile):
- """Append 'licfile' to the license files list"""
- licfilepath = os.path.join(srctree, licfile)
- licmd5 = bb.utils.md5_file(licfilepath)
- licfiles.append("file://%s;md5=%s" % (licfile, licmd5))
-
# Handle the parent package
- _licfiles_append("package.json")
packages["${PN}"] = ""
+ def _licfiles_append_fallback_readme_files(destdir):
+ """Append README files as fallback to license files if a license files is missing"""
+
+ fallback = True
+ readmes = []
+ basedir = os.path.join(srctree, destdir)
+ for fn in os.listdir(basedir):
+ upper = fn.upper()
+ if upper.startswith("README"):
+ fullpath = os.path.join(basedir, fn)
+ readmes.append(fullpath)
+ if upper.startswith("COPYING") or "LICENCE" in upper or "LICENSE" in upper:
+ fallback = False
+ if fallback:
+ for readme in readmes:
+ licfiles.append(os.path.relpath(readme, srctree))
+
# Handle the dependencies
- def _handle_dependency(name, params, deptree):
- suffix = "-".join([self._npm_name(dep) for dep in deptree])
- destdirs = [os.path.join("node_modules", dep) for dep in deptree]
- destdir = os.path.join(*destdirs)
- _licfiles_append(os.path.join(destdir, "package.json"))
- packages["${PN}-" + suffix] = destdir
+ def _handle_dependency(name, params, destdir):
+ deptree = destdir.split('node_modules/')
+ suffix = "-".join([npm_package(dep) for dep in deptree])
+ packages["${PN}" + suffix] = destdir
+ _licfiles_append_fallback_readme_files(destdir)
with open(shrinkwrap_file, "r") as f:
shrinkwrap = json.load(f)
@@ -142,6 +146,23 @@ class NpmRecipeHandler(RecipeHandler):
foreach_dependencies(shrinkwrap, _handle_dependency, dev)
return licfiles, packages
+
+ # Handle the peer dependencies
+ def _handle_peer_dependency(self, shrinkwrap_file):
+ """Check if package has peer dependencies and show warning if it is the case"""
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ packages = shrinkwrap.get("packages", {})
+ peer_deps = packages.get("", {}).get("peerDependencies", {})
+
+ for peer_dep in peer_deps:
+ peer_dep_yocto_name = npm_package(peer_dep)
+ bb.warn(peer_dep + " is a peer dependencie of the actual package. " +
+ "Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool"
+ % peer_dep_yocto_name)
+
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
"""Handle the npm recipe creation"""
@@ -160,7 +181,7 @@ class NpmRecipeHandler(RecipeHandler):
if "name" not in data or "version" not in data:
return False
- extravalues["PN"] = self._npm_name(data["name"])
+ extravalues["PN"] = npm_package(data["name"])
extravalues["PV"] = data["version"]
if "description" in data:
@@ -204,6 +225,9 @@ class NpmRecipeHandler(RecipeHandler):
self._run_npm_install(d, srctree, registry, dev)
shrinkwrap_file = self._generate_shrinkwrap(d, srctree, dev)
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
if os.path.exists(lock_copy):
bb.utils.movefile(lock_copy, lock_file)
@@ -226,7 +250,8 @@ class NpmRecipeHandler(RecipeHandler):
value = origvalue.replace("version=" + data["version"], "version=${PV}")
value = value.replace("version=latest", "version=${PV}")
values = [line.strip() for line in value.strip('\n').splitlines()]
- values.append(url_recipe)
+ if "dependencies" in shrinkwrap.get("packages", {}).get("", {}):
+ values.append(url_recipe)
return values, None, 4, False
(_, newlines) = bb.utils.edit_metadata(lines_before, ["SRC_URI"], _handle_srcuri)
@@ -242,12 +267,42 @@ class NpmRecipeHandler(RecipeHandler):
bb.note("Handling licences ...")
(licfiles, packages) = self._handle_licenses(srctree, shrinkwrap_file, dev)
- extravalues["LIC_FILES_CHKSUM"] = licfiles
- split_pkg_licenses(guess_license(srctree, d), packages, lines_after, [])
+
+ def _guess_odd_license(licfiles):
+ import bb
+
+ md5sums = get_license_md5sums(d, linenumbers=True)
+
+ chksums = []
+ licenses = []
+ for licfile in licfiles:
+ f = os.path.join(srctree, licfile)
+ md5value = bb.utils.md5_file(f)
+ (license, beginline, endline, md5) = md5sums.get(md5value,
+ (None, "", "", ""))
+ if not license:
+ license = "Unknown"
+ logger.info("Please add the following line for '%s' to a "
+ "'lib/recipetool/licenses.csv' and replace `Unknown`, "
+ "`X`, `Y` and `MD5` with the license, begin line, "
+ "end line and partial MD5 checksum:\n" \
+ "%s,Unknown,X,Y,MD5" % (licfile, md5value))
+ chksums.append("file://%s%s%s;md5=%s" % (licfile,
+ ";beginline=%s" % (beginline) if beginline else "",
+ ";endline=%s" % (endline) if endline else "",
+ md5 if md5 else md5value))
+ licenses.append((license, licfile, md5value))
+ return (licenses, chksums)
+
+ (licenses, extravalues["LIC_FILES_CHKSUM"]) = _guess_odd_license(licfiles)
+ split_pkg_licenses([*licenses, *guess_license(srctree, d)], packages, lines_after)
classes.append("npm")
handled.append("buildsystem")
+ # Check if package has peer dependencies and inform the user
+ self._handle_peer_dependency(shrinkwrap_file)
+
return True
def register_recipe_handlers(handlers):
diff --git a/scripts/lib/recipetool/licenses.csv b/scripts/lib/recipetool/licenses.csv
new file mode 100644
index 0000000000..80851111b3
--- /dev/null
+++ b/scripts/lib/recipetool/licenses.csv
@@ -0,0 +1,37 @@
+0636e73ff0215e8d672dc4c32c317bb3,GPL-2.0-only
+12f884d2ae1ff87c09e5b7ccc2c4ca7e,GPL-2.0-only
+18810669f13b87348459e611d31ab760,GPL-2.0-only
+252890d9eee26aab7b432e8b8a616475,LGPL-2.0-only
+2d5025d4aa3495befef8f17206a5b0a1,LGPL-2.1-only
+3214f080875748938ba060314b4f727d,LGPL-2.0-only
+385c55653886acac3821999a3ccd17b3,Artistic-1.0 | GPL-2.0-only
+393a5ca445f6965873eca0259a17f833,GPL-2.0-only
+3b83ef96387f14655fc854ddc3c6bd57,Apache-2.0
+3bf50002aefd002f49e7bb854063f7e7,LGPL-2.0-only
+4325afd396febcb659c36b49533135d4,GPL-2.0-only
+4fbd65380cdd255951079008b364516c,LGPL-2.1-only
+54c7042be62e169199200bc6477f04d1,BSD-3-Clause
+55ca817ccb7d5b5b66355690e9abc605,LGPL-2.0-only
+59530bdf33659b29e73d4adb9f9f6552,GPL-2.0-only
+5f30f0716dfdd0d91eb439ebec522ec2,LGPL-2.0-only
+6a6a8e020838b23406c81b19c1d46df6,LGPL-3.0-only
+751419260aa954499f7abaabaa882bbe,GPL-2.0-only
+7fbc338309ac38fefcd64b04bb903e34,LGPL-2.1-only
+8ca43cbc842c2336e835926c2166c28b,GPL-2.0-only
+94d55d512a9ba36caa9b7df079bae19f,GPL-2.0-only
+9ac2e7cff1ddaf48b6eab6028f23ef88,GPL-2.0-only
+9f604d8a4f8e74f4f5140845a21b6674,LGPL-2.0-only
+a6f89e2100d9b6cdffcea4f398e37343,LGPL-2.1-only
+b234ee4d69f5fce4486a80fdaf4a4263,GPL-2.0-only
+bbb461211a33b134d42ed5ee802b37ff,LGPL-2.1-only
+bfe1f75d606912a4111c90743d6c7325,MPL-1.1-only
+c93c0550bd3173f4504b2cbd8991e50b,GPL-2.0-only
+d32239bcb673463ab874e80d47fae504,GPL-3.0-only
+d7810fab7487fb0aad327b76f1be7cd7,GPL-2.0-only
+d8045f3b8f929c1cb29a1e3fd737b499,LGPL-2.1-only
+db979804f025cf55aabec7129cb671ed,LGPL-2.0-only
+eb723b61539feef013de476e68b5c50a,GPL-2.0-only
+ebb5c50ab7cab4baeffba14977030c07,GPL-2.0-only
+f27defe1e96c2e1ecd4e0c9be8967949,GPL-3.0-only
+fad9b3332be894bab9bc501572864b29,LGPL-2.1-only
+fbc093901857fcd118f065f900982c24,LGPL-2.1-only
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
index f8e2ee75fb..b5ad335cae 100644
--- a/scripts/lib/recipetool/setvar.py
+++ b/scripts/lib/recipetool/setvar.py
@@ -49,6 +49,7 @@ def setvar(args):
for patch in patches:
for line in patch:
sys.stdout.write(line)
+ tinfoil.modified_files()
return 0
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
index eb3927ec82..15148ca288 100644
--- a/scripts/lib/resulttool/log.py
+++ b/scripts/lib/resulttool/log.py
@@ -28,12 +28,10 @@ def show_reproducible(result, reproducible, logger):
def log(args, logger):
results = resultutils.load_resultsdata(args.source)
- ptest_count = sum(1 for _, _, _, r in resultutils.test_run_results(results) if 'ptestresult.sections' in r)
- if ptest_count > 1 and not args.prepend_run:
- print("%i ptest sections found. '--prepend-run' is required" % ptest_count)
- return 1
-
for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.list_ptest:
+ print('\n'.join(sorted(r['ptestresult.sections'].keys())))
+
if args.dump_ptest:
for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
if sectname in r:
@@ -48,6 +46,9 @@ def log(args, logger):
os.makedirs(dest_dir, exist_ok=True)
dest = os.path.join(dest_dir, '%s.log' % name)
+ if os.path.exists(dest):
+ print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name)
+ continue
print(dest)
with open(dest, 'w') as f:
f.write(logdata)
@@ -86,6 +87,8 @@ def register_commands(subparsers):
parser.set_defaults(func=log)
parser.add_argument('source',
help='the results file/directory/URL to import')
+ parser.add_argument('--list-ptest', action='store_true',
+ help='list the ptest test names')
parser.add_argument('--ptest', action='append', default=[],
help='show logs for a ptest')
parser.add_argument('--dump-ptest', metavar='DIR',
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
index 9f952951b3..10e7d13841 100644
--- a/scripts/lib/resulttool/regression.py
+++ b/scripts/lib/resulttool/regression.py
@@ -7,15 +7,209 @@
#
import resulttool.resultutils as resultutils
-import json
from oeqa.utils.git import GitRepo
import oeqa.utils.gitarchive as gitarchive
-def compare_result(logger, base_name, target_name, base_result, target_result):
+METADATA_MATCH_TABLE = {
+ "oeselftest": "OESELFTEST_METADATA"
+}
+
+OESELFTEST_METADATA_GUESS_TABLE={
+ "trigger-build-posttrigger": {
+ "run_all_tests": False,
+ "run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "reproducible": {
+ "run_all_tests": False,
+ "run_tests":["reproducible"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "arch-qemu-quick": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-x86-or-x86_64": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-system"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-others": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-user"],
+ "exclude_tags": None
+ },
+ "selftest": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ },
+ "bringup": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ }
+}
+
+STATUS_STRINGS = {
+ "None": "No matching test result"
+}
+
+REGRESSIONS_DISPLAY_LIMIT=50
+
+MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------"
+ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------"
+
+def test_has_at_least_one_matching_tag(test, tag_list):
+ return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
+
+def all_tests_have_at_least_one_matching_tag(results, tag_list):
+ return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items())
+
+def any_test_have_any_matching_tag(results, tag_list):
+ return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values())
+
+def have_skipped_test(result, test_prefix):
+ return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix))
+
+def have_all_tests_skipped(result, test_prefixes_list):
+ return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list)
+
+def guess_oeselftest_metadata(results):
+ """
+ When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content.
+ Check results for specific values (absence/presence of oetags, number and name of executed tests...),
+ and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA
+ to it to allow proper test filtering.
+ This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less,
+ as new tests will have OESELFTEST_METADATA properly appended at test reporting time
+ """
+
+ if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results:
+ return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger']
+ elif all(result.startswith("reproducible") for result in results):
+ return OESELFTEST_METADATA_GUESS_TABLE['reproducible']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others']
+ elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]):
+ if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['selftest']
+ elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['bringup']
+
+ return None
+
+
+def metadata_matches(base_configuration, target_configuration):
+ """
+ For passed base and target, check test type. If test type matches one of
+ properties described in METADATA_MATCH_TABLE, compare metadata if it is
+ present in base. Return true if metadata matches, or if base lacks some
+ data (either TEST_TYPE or the corresponding metadata)
+ """
+ test_type = base_configuration.get('TEST_TYPE')
+ if test_type not in METADATA_MATCH_TABLE:
+ return True
+
+ metadata_key = METADATA_MATCH_TABLE.get(test_type)
+ if target_configuration.get(metadata_key) != base_configuration.get(metadata_key):
+ return False
+
+ return True
+
+
+def machine_matches(base_configuration, target_configuration):
+ return base_configuration.get('MACHINE') == target_configuration.get('MACHINE')
+
+
+def can_be_compared(logger, base, target):
+ """
+ Some tests are not relevant to be compared, for example some oeselftest
+ run with different tests sets or parameters. Return true if tests can be
+ compared
+ """
+ ret = True
+ base_configuration = base['configuration']
+ target_configuration = target['configuration']
+
+ # Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results.
+ if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration:
+ guess = guess_oeselftest_metadata(base['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}")
+ base_configuration['OESELFTEST_METADATA'] = guess
+ if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration:
+ guess = guess_oeselftest_metadata(target['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}")
+ target_configuration['OESELFTEST_METADATA'] = guess
+
+ # Test runs with LTP results in should only be compared with other runs with LTP tests in them
+ if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']):
+ ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result'])
+
+ return ret and metadata_matches(base_configuration, target_configuration) \
+ and machine_matches(base_configuration, target_configuration)
+
+def get_status_str(raw_status):
+ raw_status_lower = raw_status.lower() if raw_status else "None"
+ return STATUS_STRINGS.get(raw_status_lower, raw_status)
+
+def get_additional_info_line(new_pass_count, new_tests):
+ result=[]
+ if new_tests:
+ result.append(f'+{new_tests} test(s) present')
+ if new_pass_count:
+ result.append(f'+{new_pass_count} test(s) now passing')
+
+ if not result:
+ return ""
+
+ return ' -> ' + ', '.join(result) + '\n'
+
+def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
base_result = base_result.get('result')
target_result = target_result.get('result')
result = {}
+ new_tests = 0
+ regressions = {}
+ resultstring = ""
+ new_tests = 0
+ new_pass_count = 0
+
+ display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
+
if base_result and target_result:
for k in base_result:
base_testcase = base_result[k]
@@ -27,12 +221,47 @@ def compare_result(logger, base_name, target_name, base_result, target_result):
result[k] = {'base': base_status, 'target': target_status}
else:
logger.error('Failed to retrieved base test case status: %s' % k)
+
+ # Also count new tests that were not present in base results: it
+ # could be newly added tests, but it could also highlights some tests
+ # renames or fixed faulty ptests
+ for k in target_result:
+ if k not in base_result:
+ new_tests += 1
if result:
- resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
- for k in sorted(result):
- resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
+ # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
+ if new_pass_count < len(result):
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
+ # Differentiate each ptest kind when listing regressions
+ key_parts = k.split('.')
+ key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0]
+ # Append new regression to corresponding test family
+ regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
+ resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
+ for k in regressions:
+ resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
+ count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
+ resultstring += ''.join(regressions[k][:count_to_print])
+ if count_to_print < len(regressions[k]):
+ resultstring+=' [...]\n'
+ if new_pass_count > 0:
+ resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
+ if new_tests > 0:
+ resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
+ else:
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+ result = None
else:
- resultstring = "Match: %s\n %s" % (base_name, target_name)
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+
+ if not result:
+ additional_info = get_additional_info_line(new_pass_count, new_tests)
+ if additional_info:
+ resultstring += additional_info
+
return result, resultstring
def get_results(logger, source):
@@ -44,12 +273,38 @@ def regression(args, logger):
regression_common(args, logger, base_results, target_results)
+# Some test case naming is poor and contains random strings, particularly lttng/babeltrace.
+# Truncating the test names works since they contain file and line number identifiers
+# which allows us to match them without the random components.
+def fixup_ptest_names(results, logger):
+ for r in results:
+ for i in results[r]:
+ tests = list(results[r][i]['result'].keys())
+ for test in tests:
+ new = None
+ if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test:
+ new = test.split("_-_")[0]
+ elif test.startswith(("ptestresult.curl.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith(("ptestresult.dbus.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith("ptestresult.binutils") and "build-st-" in test:
+ new = test.split(" ")[0]
+ elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test:
+ new = ".".join(test.split(".")[:2])
+ if new:
+ results[r][i]['result'][new] = results[r][i]['result'][test]
+ del results[r][i]['result'][test]
+
def regression_common(args, logger, base_results, target_results):
if args.base_result_id:
base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
if args.target_result_id:
target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+ fixup_ptest_names(base_results, logger)
+ fixup_ptest_names(target_results, logger)
+
matches = []
regressions = []
notfound = []
@@ -62,7 +317,9 @@ def regression_common(args, logger, base_results, target_results):
# removing any pairs which match
for c in base.copy():
for b in target.copy():
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if not res:
matches.append(resstr)
base.remove(c)
@@ -71,15 +328,18 @@ def regression_common(args, logger, base_results, target_results):
# Should only now see regressions, we may not be able to match multiple pairs directly
for c in base:
for b in target:
- res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
if res:
regressions.append(resstr)
else:
notfound.append("%s not found in target" % a)
- print("\n".join(sorted(matches)))
print("\n".join(sorted(regressions)))
+ print("\n" + MISSING_TESTS_BANNER + "\n")
print("\n".join(sorted(notfound)))
-
+ print("\n" + ADDITIONAL_DATA_BANNER + "\n")
+ print("\n".join(sorted(matches)))
return 0
def regression_git(args, logger):
@@ -183,4 +443,5 @@ def register_commands(subparsers):
parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
parser_build.add_argument('--commit2', help="Revision to compare with")
parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+ parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index f0ca50ebe2..a349510ab8 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -176,7 +176,10 @@ class ResultsTextReport(object):
vals['sort'] = line['testseries'] + "_" + line['result_id']
vals['failed_testcases'] = line['failed_testcases']
for k in cols:
- vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
for k in maxlen:
if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 8917022d36..c5521d81bd 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -58,7 +58,11 @@ def append_resultsdata(results, f, configmap=store_map, configvars=extra_configv
testseries = posixpath.basename(posixpath.dirname(url.path))
else:
with open(f, "r") as filedata:
- data = json.load(filedata)
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
testseries = os.path.basename(os.path.dirname(f))
else:
data = f
@@ -142,7 +146,7 @@ def generic_get_log(sectionname, results, section):
return decode_log(ptest['log'])
def ptestresult_get_log(results, section):
- return generic_get_log('ptestresuls.sections', results, section)
+ return generic_get_log('ptestresult.sections', results, section)
def generic_get_rawlogs(sectname, results):
if sectname not in results:
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index f92255d8dc..f23e53cba9 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -5,7 +5,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-import argparse
import glob
import logging
import os
@@ -18,13 +17,14 @@ import sys
import tempfile
import threading
import importlib
-from importlib import machinery
+import importlib.machinery
+import importlib.util
class KeepAliveStreamHandler(logging.StreamHandler):
def __init__(self, keepalive=True, **kwargs):
super().__init__(**kwargs)
if keepalive is True:
- keepalive = 5000 # default timeout
+ keepalive = 5000 # default timeout
self._timeout = threading.Condition()
self._stop = False
@@ -35,9 +35,9 @@ class KeepAliveStreamHandler(logging.StreamHandler):
with self._timeout:
if not self._timeout.wait(keepalive):
self.emit(logging.LogRecord("keepalive", logging.INFO,
- None, None, "Keepalive message", None, None))
+ None, None, "Keepalive message", None, None))
- self._thread = threading.Thread(target = thread, daemon = True)
+ self._thread = threading.Thread(target=thread, daemon=True)
self._thread.start()
def close(self):
@@ -71,18 +71,19 @@ def logger_setup_color(logger, color='auto'):
for handler in logger.handlers:
if (isinstance(handler, logging.StreamHandler) and
- isinstance(handler.formatter, BBLogFormatter)):
+ isinstance(handler.formatter, BBLogFormatter)):
if color == 'always' or (color == 'auto' and handler.stream.isatty()):
handler.formatter.enable_color()
def load_plugins(logger, plugins, pluginpath):
-
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
- spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
+ spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath])
if spec:
- return spec.loader.load_module()
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
def plugin_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
@@ -176,6 +177,7 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
f.write('BB_STRICT_CHECKSUM = "ignore"\n')
f.write('SRC_URI = "%s"\n' % srcuri)
f.write('SRCREV = "%s"\n' % srcrev)
+ f.write('PV = "0.0+"\n')
f.write('WORKDIR = "%s"\n' % tmpworkdir)
# Set S out of the way so it doesn't get created under the workdir
f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
@@ -215,7 +217,8 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
for pathvar in pathvars:
path = rd.getVar(pathvar)
- shutil.rmtree(path)
+ if os.path.exists(path):
+ shutil.rmtree(path)
finally:
if fetchrecipe:
try:
@@ -274,6 +277,6 @@ def filter_src_subdirs(pth):
Used by devtool and recipetool.
"""
dirlist = os.listdir(pth)
- filterout = ['git.indirectionsymlink', 'source-date-epoch']
+ filterout = ['git.indirectionsymlink', 'source-date-epoch', 'sstate-install-recipe_qa']
dirlist = [x for x in dirlist if x not in filterout]
return dirlist
diff --git a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
index 7300e65e32..2fd286ff98 100644
--- a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
+++ b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
@@ -1,3 +1,3 @@
bootloader --ptable gpt
-part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.0
+part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.1
part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/
diff --git a/scripts/lib/wic/canned-wks/qemuloongarch.wks b/scripts/lib/wic/canned-wks/qemuloongarch.wks
new file mode 100644
index 0000000000..8465c7a8c0
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuloongarch.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for LoongArch QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index 22b45217f1..808997611a 100644
--- a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 console=tty console=ttyS0 "
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index 018815b966..674ccfc244 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -19,10 +19,10 @@ import os
import tempfile
import json
import subprocess
+import shutil
import re
from collections import namedtuple, OrderedDict
-from distutils.spawn import find_executable
from wic import WicError
from wic.filemap import sparse_copy
@@ -245,7 +245,7 @@ class Disk:
for path in pathlist.split(':'):
self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
- self.parted = find_executable("parted", self.paths)
+ self.parted = shutil.which("parted", path=self.paths)
if not self.parted:
raise WicError("Can't find executable parted")
@@ -283,7 +283,7 @@ class Disk:
"resize2fs", "mkswap", "mkdosfs", "debugfs","blkid"):
aname = "_%s" % name
if aname not in self.__dict__:
- setattr(self, aname, find_executable(name, self.paths))
+ setattr(self, aname, shutil.which(name, path=self.paths))
if aname not in self.__dict__ or self.__dict__[aname] is None:
raise WicError("Can't find executable '{}'".format(name))
return self.__dict__[aname]
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index 4d9da28172..85b39d5d74 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -46,6 +46,13 @@ def get_block_size(file_obj):
bsize = stat.st_blksize
else:
raise IOError("Unable to determine block size")
+
+ # The logic in this script only supports a maximum of a 4KB
+ # block size
+ max_block_size = 4 * 1024
+ if bsize > max_block_size:
+ bsize = max_block_size
+
return bsize
class ErrorNotSupp(Exception):
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index bd3a2b97df..163535e431 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -637,7 +637,7 @@ DESCRIPTION
oe-core: directdisk.bbclass and mkefidisk.sh. The difference
between wic and those examples is that with wic the functionality
of those scripts is implemented by a general-purpose partitioning
- 'language' based on Redhat kickstart syntax).
+ 'language' based on Red Hat kickstart syntax).
The initial motivation and design considerations that lead to the
current tool are described exhaustively in Yocto Bug #3847
@@ -840,8 +840,8 @@ DESCRIPTION
meanings. The commands are based on the Fedora kickstart
documentation but with modifications to reflect wic capabilities.
- http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
- http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#part-or-partition
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#bootloader
Commands
@@ -930,6 +930,7 @@ DESCRIPTION
ext4
btrfs
squashfs
+ erofs
swap
--fsoptions: Specifies a free-form string of options to be
@@ -939,6 +940,12 @@ DESCRIPTION
quotes. If not specified, the default string is
"defaults".
+ --fspassno: Specifies the order in which filesystem checks are done
+ at boot time by fsck. See fs_passno parameter of
+ fstab(5). This parameter will be copied into the
+ /etc/fstab file of the installed system. If not
+ specified the default value of "0" will be used.
+
--label label: Specifies the label to give to the filesystem
to be made on the partition. If the given
label is already in use by another filesystem,
@@ -990,6 +997,9 @@ DESCRIPTION
multiple partitions and we want to keep the right
permissions and usernames in all the partitions.
+ --no-fstab-update: This option is specific to wic. It does not update the
+ '/etc/fstab' stock file for the given partition.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
@@ -1108,7 +1118,7 @@ COMMAND:
TOPIC:
overview - Presents an overall overview of Wic
plugins - Presents an overview and API for Wic plugins
- kickstart - Presents a Wic kicstart file reference
+ kickstart - Presents a Wic kickstart file reference
Examples:
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index 913e3283dc..7ef3dc83dd 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -155,9 +155,11 @@ class KickStart():
part.add_argument('--change-directory')
part.add_argument("--extra-space", type=sizetype("M"))
part.add_argument('--fsoptions', dest='fsopts')
+ part.add_argument('--fspassno', dest='fspassno')
part.add_argument('--fstype', default='vfat',
choices=('ext2', 'ext3', 'ext4', 'btrfs',
- 'squashfs', 'vfat', 'msdos', 'swap'))
+ 'squashfs', 'vfat', 'msdos', 'erofs',
+ 'swap', 'none'))
part.add_argument('--mkfs-extraopts', default='')
part.add_argument('--label')
part.add_argument('--use-label', action='store_true')
@@ -169,6 +171,7 @@ class KickStart():
part.add_argument('--rootfs-dir')
part.add_argument('--type', default='primary',
choices = ('primary', 'logical'))
+ part.add_argument('--hidden', action='store_true')
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
@@ -184,11 +187,13 @@ class KickStart():
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
part.add_argument('--fsuuid')
+ part.add_argument('--no-fstab-update', action='store_true')
+ part.add_argument('--mbr', action='store_true')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
bootloader.add_argument('--configfile')
- bootloader.add_argument('--ptable', choices=('msdos', 'gpt'),
+ bootloader.add_argument('--ptable', choices=('msdos', 'gpt', 'gpt-hybrid'),
default='msdos')
bootloader.add_argument('--timeout', type=int)
bootloader.add_argument('--source')
@@ -229,6 +234,27 @@ class KickStart():
err = "%s:%d: SquashFS does not support LABEL" \
% (confpath, lineno)
raise KickStartError(err)
+ # erofs does not support filesystem labels
+ if parsed.fstype == 'erofs' and parsed.label:
+ err = "%s:%d: erofs does not support LABEL" % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.fstype == 'msdos' or parsed.fstype == 'vfat':
+ if parsed.fsuuid:
+ if parsed.fsuuid.upper().startswith('0X'):
+ if len(parsed.fsuuid) > 10:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
+ elif len(parsed.fsuuid) > 8:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
if parsed.use_label and not parsed.label:
err = "%s:%d: Must set the label with --label" \
% (confpath, lineno)
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index 4b08d649c6..1a7c140fa6 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -16,16 +16,17 @@ import logging
import os
import re
import subprocess
+import shutil
from collections import defaultdict
-from distutils import spawn
from wic import WicError
logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
-NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+NATIVE_RECIPES = {"bmaptool": "bmaptool",
+ "dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
"mcopy": "mtools",
@@ -35,6 +36,7 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"mkdosfs": "dosfstools",
"mkisofs": "cdrtools",
"mkfs.btrfs": "btrfs-tools",
+ "mkfs.erofs": "erofs-utils",
"mkfs.ext2": "e2fsprogs",
"mkfs.ext3": "e2fsprogs",
"mkfs.ext4": "e2fsprogs",
@@ -121,7 +123,7 @@ def find_executable(cmd, paths):
if provided and "%s-native" % recipe in provided:
return True
- return spawn.find_executable(cmd, paths)
+ return shutil.which(cmd, path=paths)
def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
@@ -138,9 +140,13 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
if pseudo:
cmd_and_args = pseudo + cmd_and_args
- native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin" % \
+ hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
+ target_sys = get_bitbake_var("TARGET_SYS")
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/usr/bin/%s:%s/bin:%s" % \
(native_sysroot, native_sysroot,
- native_sysroot, native_sysroot)
+ native_sysroot, native_sysroot, target_sys,
+ native_sysroot, hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index ebe250b00d..795707ec5d 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -33,6 +33,7 @@ class Partition():
self.include_path = args.include_path
self.change_directory = args.change_directory
self.fsopts = args.fsopts
+ self.fspassno = args.fspassno
self.fstype = args.fstype
self.label = args.label
self.use_label = args.use_label
@@ -54,6 +55,12 @@ class Partition():
self.uuid = args.uuid
self.fsuuid = args.fsuuid
self.type = args.type
+ self.no_fstab_update = args.no_fstab_update
+ self.updated_fstab_path = None
+ self.has_fstab = False
+ self.update_fstab_in_rootfs = False
+ self.hidden = args.hidden
+ self.mbr = args.mbr
self.lineno = lineno
self.source_file = ""
@@ -101,7 +108,7 @@ class Partition():
extra_blocks = self.extra_space
rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
+ rootfs_size = int(rootfs_size * self.overhead_factor)
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, self.mountpoint, rootfs_size)
@@ -118,12 +125,18 @@ class Partition():
return self.fixed_size if self.fixed_size else self.size
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
- bootimg_dir, kernel_dir, native_sysroot):
+ bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
+ self.updated_fstab_path = updated_fstab_path
+ if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"):
+ self.update_fstab_in_rootfs = True
+
if not self.source:
+ if self.fstype == "none" or self.no_table:
+ return
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
"specify a non-zero --size/--fixed-size for that "
@@ -134,9 +147,9 @@ class Partition():
native_sysroot)
self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
else:
- if self.fstype == 'squashfs':
- raise WicError("It's not possible to create empty squashfs "
- "partition '%s'" % (self.mountpoint))
+ if self.fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to create empty %s "
+ "partition '%s'" % (self.fstype, self.mountpoint))
rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
@@ -163,7 +176,7 @@ class Partition():
# Split sourceparams string of the form key1=val1[,key2=val2,...]
# into a dict. Also accepts valueless keys i.e. without =
splitted = self.sourceparams.split(',')
- srcparams_dict = dict(par.split('=', 1) for par in splitted if par)
+ srcparams_dict = dict((par.split('=', 1) + [None])[:2] for par in splitted if par)
plugin = PluginMgr.get_plugins('source')[self.source]
plugin.do_configure_partition(self, srcparams_dict, creator,
@@ -207,11 +220,21 @@ class Partition():
p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
if (pseudo_dir):
+ # Canonicalize the ignore paths. This corresponds to
+ # calling oe.path.canonicalize(), which is used in bitbake.conf.
+ ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",")
+ canonical_paths = []
+ for path in ignore_paths:
+ if "$" not in path:
+ trailing_slash = path.endswith("/") and "/" or ""
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+ ignore_paths = ",".join(canonical_paths)
+
pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
- pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % (rootfs + "," + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or ""))
+ pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths
pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
else:
pseudo = None
@@ -237,7 +260,7 @@ class Partition():
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
- method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo)
self.source_file = rootfs
# get the rootfs size in the right units for kickstart (kB)
@@ -245,7 +268,7 @@ class Partition():
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
- def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
@@ -261,6 +284,20 @@ class Partition():
extraopts = self.mkfs_extraopts or "-F -i 8192"
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ if pseudo:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s;%s " % (sde_time, pseudo)
+ else:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s; " % sde_time
+
+ # Set hash_seed to generate deterministic directory indexes
+ namespace = uuid.UUID("e7429877-e7b3-4a68-a5c9-2f2fdf33d460")
+ if self.fsuuid:
+ namespace = uuid.UUID(self.fsuuid)
+ hash_seed = str(uuid.uuid5(namespace, str(sde_time)))
+ extraopts += " -E hash_seed=%s" % hash_seed
+
label_str = ""
if self.label:
label_str = "-L %s" % self.label
@@ -269,10 +306,45 @@ class Partition():
(self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ with open(debugfs_script_path, "w") as f:
+ f.write("cd etc\n")
+ f.write("rm fstab\n")
+ f.write("write %s fstab\n" % (self.updated_fstab_path))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH')))
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ files = []
+ for root, dirs, others in os.walk(rootfs_dir):
+ base = root.replace(rootfs_dir, "").rstrip(os.sep)
+ files += [ "/" if base == "" else base ]
+ files += [ base + "/" + n for n in dirs + others ]
+ with open(debugfs_script_path, "w") as f:
+ f.write("set_current_time %s\n" % (sde_time))
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time))
+ f.write("set_inode_field /etc/fstab mtime_extra 0\n")
+ for file in set(files):
+ for time in ["atime", "ctime", "crtime"]:
+ f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time))
+ f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time))
+ for time in ["wtime", "mkfs_time", "lastcheck"]:
+ f.write("set_super_value %s %s\n" % (time, sde_time))
+ for time in ["mtime", "first_error_time", "last_error_time"]:
+ f.write("set_super_value %s 0\n" % (time))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
+ def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
@@ -295,7 +367,7 @@ class Partition():
self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a msdos/vfat rootfs partition.
@@ -311,8 +383,6 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
@@ -324,12 +394,16 @@ class Partition():
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
exec_native_cmd(mcopy_cmd, native_sysroot)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_rootfs_vfat = prepare_rootfs_msdos
- def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
@@ -339,6 +413,19 @@ class Partition():
(rootfs_dir, rootfs, extraopts)
exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
+ def prepare_rootfs_erofs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a erofs rootfs partition.
+ """
+ extraopts = self.mkfs_extraopts or ''
+ erofs_cmd = "mkfs.erofs %s -U %s %s %s" % \
+ (extraopts, self.fsuuid, rootfs, rootfs_dir)
+ exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot):
+ pass
+
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -358,6 +445,8 @@ class Partition():
(self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -388,8 +477,6 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
@@ -419,3 +506,37 @@ class Partition():
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
exec_native_cmd(mkswap_cmd, native_sysroot)
+
+ def check_for_Y2038_problem(self, rootfs, native_sysroot):
+ """
+ Check if the filesystem is affected by the Y2038 problem
+ (Y2038 problem = 32 bit time_t overflow in January 2038)
+ """
+ def get_err_str(part):
+ err = "The {} filesystem {} has no Y2038 support."
+ if part.mountpoint:
+ args = [part.fstype, "mounted at %s" % part.mountpoint]
+ elif part.label:
+ args = [part.fstype, "labeled '%s'" % part.label]
+ elif part.part_name:
+ args = [part.fstype, "in partition '%s'" % part.part_name]
+ else:
+ args = [part.fstype, "in partition %s" % part.num]
+ return err.format(*args)
+
+ # ext2 and ext3 are always affected by the Y2038 problem
+ if self.fstype in ["ext2", "ext3"]:
+ logger.warn(get_err_str(self))
+ return
+
+ ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot)
+
+ # if ext4 is affected by the Y2038 problem depends on the inode size
+ for line in out.splitlines():
+ if line.startswith("Inode size:"):
+ size = int(line.split(":")[1].strip())
+ if size < 256:
+ logger.warn("%s Inodes (of size %d) are too small." %
+ (get_err_str(self), size))
+ break
+
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index d9b4e57747..b64568339b 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -9,9 +9,11 @@ __all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
+import types
from collections import defaultdict
-from importlib.machinery import SourceFileLoader
+import importlib
+import importlib.util
from wic import WicError
from wic.misc import get_bitbake_var
@@ -54,7 +56,9 @@ class PluginMgr:
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
- SourceFileLoader(mname, mpath).load_module()
+ spec = importlib.util.spec_from_file_location(mname, mpath)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
return PLUGINS.get(ptype)
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 55db826e93..a1d152659b 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -54,15 +54,16 @@ class DirectPlugin(ImagerPlugin):
self.native_sysroot = native_sysroot
self.oe_builddir = oe_builddir
+ self.debug = options.debug
self.outdir = options.outdir
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
- self.original_fstab = None
+ self.updated_fstab_path = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
- self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self.workdir = self.setup_workdir(options.workdir)
self._image = None
self.ptable_format = self.ks.bootloader.ptable
self.parts = self.ks.partitions
@@ -76,7 +77,18 @@ class DirectPlugin(ImagerPlugin):
image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
self._image = PartitionedImage(image_path, self.ptable_format,
- self.parts, self.native_sysroot)
+ self.parts, self.native_sysroot,
+ options.extra_space)
+
+ def setup_workdir(self, workdir):
+ if workdir:
+ if os.path.exists(workdir):
+ raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir))
+
+ os.makedirs(workdir)
+ return workdir
+ else:
+ return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
def do_create(self):
"""
@@ -90,11 +102,8 @@ class DirectPlugin(ImagerPlugin):
finally:
self.cleanup()
- def _write_fstab(self, image_rootfs):
- """overriden to generate fstab (temporarily) in rootfs. This is called
- from _create, make sure it doesn't get called from
- BaseImage.create()
- """
+ def update_fstab(self, image_rootfs):
+ """Assume partition order same as in wks"""
if not image_rootfs:
return
@@ -104,20 +113,11 @@ class DirectPlugin(ImagerPlugin):
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
- self.original_fstab = fstab_lines.copy()
-
- if self._update_fstab(fstab_lines, self.parts):
- with open(fstab_path, "w") as fstab:
- fstab.writelines(fstab_lines)
- else:
- self.original_fstab = None
- def _update_fstab(self, fstab_lines, parts):
- """Assume partition order same as in wks"""
updated = False
- for part in parts:
+ for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/":
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
@@ -138,13 +138,20 @@ class DirectPlugin(ImagerPlugin):
device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
opts = part.fsopts if part.fsopts else "defaults"
+ passno = part.fspassno if part.fspassno else "0"
line = "\t".join([device_name, part.mountpoint, part.fstype,
- opts, "0", "0"]) + "\n"
+ opts, "0", passno]) + "\n"
fstab_lines.append(line)
updated = True
- return updated
+ if updated:
+ self.updated_fstab_path = os.path.join(self.workdir, "fstab")
+ with open(self.updated_fstab_path, "w") as f:
+ f.writelines(fstab_lines)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ fstab_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ os.utime(self.updated_fstab_path, (fstab_time, fstab_time))
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
@@ -160,7 +167,7 @@ class DirectPlugin(ImagerPlugin):
a partitioned image.
"""
if not self.no_fstab_update:
- self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+ self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
@@ -256,6 +263,8 @@ class DirectPlugin(ImagerPlugin):
if part.mountpoint == "/":
if part.uuid:
return "PARTUUID=%s" % part.uuid
+ elif part.label and self.ptable_format != 'msdos':
+ return "PARTLABEL=%s" % part.label
else:
suffix = 'p' if part.disk.startswith('mmcblk') else ''
return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
@@ -273,14 +282,9 @@ class DirectPlugin(ImagerPlugin):
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
- #Restore original fstab
- if self.original_fstab:
- fstab_path = self.rootfs_dir.get("ROOTFS_DIR") + "/etc/fstab"
- with open(fstab_path, "w") as fstab:
- fstab.writelines(self.original_fstab)
-
- # remove work directory
- shutil.rmtree(self.workdir, ignore_errors=True)
+ # remove work directory when it is not in debugging mode
+ if not self.debug:
+ shutil.rmtree(self.workdir, ignore_errors=True)
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
@@ -296,7 +300,7 @@ class PartitionedImage():
Partitioned image in a file.
"""
- def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+ def __init__(self, path, ptable_format, partitions, native_sysroot=None, extra_space=0):
self.path = path # Path to the image file
self.numpart = 0 # Number of allocated partitions
self.realpart = 0 # Number of partitions in the partition table
@@ -309,7 +313,10 @@ class PartitionedImage():
# all partitions (in bytes)
self.ptable_format = ptable_format # Partition table format
# Disk system identifier
- self.identifier = random.SystemRandom().randint(1, 0xffffffff)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.identifier = random.Random(int(os.getenv('SOURCE_DATE_EPOCH'))).randint(1, 0xffffffff)
+ else:
+ self.identifier = random.SystemRandom().randint(1, 0xffffffff)
self.partitions = partitions
self.partimages = []
@@ -317,6 +324,7 @@ class PartitionedImage():
self.sector_size = SECTOR_SIZE
self.native_sysroot = native_sysroot
num_real_partitions = len([p for p in self.partitions if not p.no_table])
+ self.extra_space = extra_space
# calculate the real partition number, accounting for partitions not
# in the partition table and logical partitions
@@ -334,7 +342,7 @@ class PartitionedImage():
# generate parition and filesystem UUIDs
for part in self.partitions:
if not part.uuid and part.use_uuid:
- if self.ptable_format == 'gpt':
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
part.uuid = str(uuid.uuid4())
else: # msdos partition table
part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
@@ -343,6 +351,13 @@ class PartitionedImage():
part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
else:
part.fsuuid = str(uuid.uuid4())
+ else:
+ #make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ if part.fsuuid.upper().startswith("0X"):
+ part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0")
+ else:
+ part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0")
def prepare(self, imager):
"""Prepare an image. Call prepare method of all image partitions."""
@@ -351,7 +366,8 @@ class PartitionedImage():
# sizes before we can add them and do the layout.
part.prepare(imager, imager.workdir, imager.oe_builddir,
imager.rootfs_dir, imager.bootimg_dir,
- imager.kernel_dir, imager.native_sysroot)
+ imager.kernel_dir, imager.native_sysroot,
+ imager.updated_fstab_path)
# Converting kB to sectors for parted
part.size_sec = part.disk_size * 1024 // self.sector_size
@@ -382,6 +398,10 @@ class PartitionedImage():
raise WicError("setting custom partition type is not " \
"implemented for msdos partitions")
+ if part.mbr and self.ptable_format != 'gpt-hybrid':
+ raise WicError("Partition may only be included in MBR with " \
+ "a gpt-hybrid partition table")
+
# Get the disk where the partition is located
self.numpart += 1
if not part.no_table:
@@ -390,7 +410,7 @@ class PartitionedImage():
if self.numpart == 1:
if self.ptable_format == "msdos":
overhead = MBR_OVERHEAD
- elif self.ptable_format == "gpt":
+ elif self.ptable_format in ("gpt", "gpt-hybrid"):
overhead = GPT_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
@@ -474,10 +494,11 @@ class PartitionedImage():
# Once all the partitions have been layed out, we can calculate the
# minumim disk size
self.min_size = self.offset
- if self.ptable_format == "gpt":
+ if self.ptable_format in ("gpt", "gpt-hybrid"):
self.min_size += GPT_OVERHEAD
self.min_size *= self.sector_size
+ self.min_size += self.extra_space
def _create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
@@ -494,22 +515,49 @@ class PartitionedImage():
return exec_native_cmd(cmd, self.native_sysroot)
+ def _write_identifier(self, device, identifier):
+ logger.debug("Set disk identifier %x", identifier)
+ with open(device, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(identifier.to_bytes(4, 'little'))
+
+ def _make_disk(self, device, ptable_format, min_size):
+ logger.debug("Creating sparse file %s", device)
+ with open(device, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), min_size)
+
+ logger.debug("Initializing partition table for %s", device)
+ exec_native_cmd("parted -s %s mklabel %s" % (device, ptable_format),
+ self.native_sysroot)
+
+ def _write_disk_guid(self):
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.disk_guid = uuid.UUID(int=int(os.getenv('SOURCE_DATE_EPOCH')))
+ else:
+ self.disk_guid = uuid.uuid4()
+
+ logger.debug("Set disk guid %s", self.disk_guid)
+ sfdisk_cmd = "sfdisk --disk-id %s %s" % (self.path, self.disk_guid)
+ exec_native_cmd(sfdisk_cmd, self.native_sysroot)
+
def create(self):
- logger.debug("Creating sparse file %s", self.path)
- with open(self.path, 'w') as sparse:
- os.ftruncate(sparse.fileno(), self.min_size)
+ self._make_disk(self.path,
+ "gpt" if self.ptable_format == "gpt-hybrid" else self.ptable_format,
+ self.min_size)
- logger.debug("Initializing partition table for %s", self.path)
- exec_native_cmd("parted -s %s mklabel %s" %
- (self.path, self.ptable_format), self.native_sysroot)
+ self._write_identifier(self.path, self.identifier)
+ self._write_disk_guid()
- logger.debug("Set disk identifier %x", self.identifier)
- with open(self.path, 'r+b') as img:
- img.seek(0x1B8)
- img.write(self.identifier.to_bytes(4, 'little'))
+ if self.ptable_format == "gpt-hybrid":
+ mbr_path = self.path + ".mbr"
+ self._make_disk(mbr_path, "msdos", self.min_size)
+ self._write_identifier(mbr_path, self.identifier)
logger.debug("Creating partitions")
+ hybrid_mbr_part_num = 0
+
for part in self.partitions:
if part.num == 0:
continue
@@ -554,11 +602,19 @@ class PartitionedImage():
self._create_partition(self.path, part.type,
parted_fs_type, part.start, part.size_sec)
- if part.part_name:
+ if self.ptable_format == "gpt-hybrid" and part.mbr:
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+ self._create_partition(mbr_path, "primary",
+ parted_fs_type, part.start, part.size_sec)
+
+ if self.ptable_format in ("gpt", "gpt-hybrid") and (part.part_name or part.label):
+ partition_label = part.part_name if part.part_name else part.label
logger.debug("partition %d: set name to %s",
- part.num, part.part_name)
+ part.num, partition_label)
exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
- (part.num, part.part_name,
+ (part.num, partition_label,
self.path), self.native_sysroot)
if part.part_type:
@@ -568,32 +624,55 @@ class PartitionedImage():
(part.num, part.part_type,
self.path), self.native_sysroot)
- if part.uuid and self.ptable_format == "gpt":
+ if part.uuid and self.ptable_format in ("gpt", "gpt-hybrid"):
logger.debug("partition %d: set UUID to %s",
part.num, part.uuid)
exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
(part.num, part.uuid, self.path),
self.native_sysroot)
- if part.label and self.ptable_format == "gpt":
- logger.debug("partition %d: set name to %s",
- part.num, part.label)
- exec_native_cmd("parted -s %s name %d %s" % \
- (self.path, part.num, part.label),
- self.native_sysroot)
-
if part.active:
- flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+ flag_name = "legacy_boot" if self.ptable_format in ('gpt', 'gpt-hybrid') else "boot"
logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
flag_name, part.num, self.path)
exec_native_cmd("parted -s %s set %d %s on" % \
(self.path, part.num, flag_name),
self.native_sysroot)
+ if self.ptable_format == 'gpt-hybrid' and part.mbr:
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (mbr_path, hybrid_mbr_part_num, "boot"),
+ self.native_sysroot)
if part.system_id:
exec_native_cmd("sfdisk --part-type %s %s %s" % \
(self.path, part.num, part.system_id),
self.native_sysroot)
+ if part.hidden and self.ptable_format == "gpt":
+ logger.debug("Set hidden attribute for partition '%s' on disk '%s'",
+ part.num, self.path)
+ exec_native_cmd("sfdisk --part-attrs %s %s RequiredPartition" % \
+ (self.path, part.num),
+ self.native_sysroot)
+
+ if self.ptable_format == "gpt-hybrid":
+ # Write a protective GPT partition
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+
+ # parted cannot directly create a protective GPT partition, so
+ # create with an arbitrary type, then change it to the correct type
+ # with sfdisk
+ self._create_partition(mbr_path, "primary", "fat32", 1, GPT_OVERHEAD)
+ exec_native_cmd("sfdisk --part-type %s %d 0xee" % (mbr_path, hybrid_mbr_part_num),
+ self.native_sysroot)
+
+ # Copy hybrid MBR
+ with open(mbr_path, "rb") as mbr_file:
+ with open(self.path, "r+b") as image_file:
+ mbr = mbr_file.read(512)
+ image_file.write(mbr)
+
def cleanup(self):
pass
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index cdc72543c2..13a9cddf4e 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -12,6 +12,7 @@
import logging
import os
+import tempfile
import shutil
import re
@@ -34,6 +35,26 @@ class BootimgEFIPlugin(SourcePlugin):
name = 'bootimg-efi'
@classmethod
+ def _copy_additional_files(cls, hdddir, initrd, dtb):
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ if initrd:
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ logger.debug("Ignoring missing initrd")
+
+ if dtb:
+ if ';' in dtb:
+ raise WicError("Only one DTB supported, exiting")
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, dtb, hdddir)
+ exec_cmd(cp_cmd, True)
+
+ @classmethod
def do_configure_grubefi(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific (grub-efi) config
@@ -52,18 +73,9 @@ class BootimgEFIPlugin(SourcePlugin):
"get it from %s." % configfile)
initrd = source_params.get('initrd')
+ dtb = source_params.get('dtb')
- if initrd:
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
-
- initrds = initrd.split(';')
- for rd in initrds:
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
- exec_cmd(cp_cmd, True)
- else:
- logger.debug("Ignoring missing initrd")
+ cls._copy_additional_files(hdddir, initrd, dtb)
if not custom_cfg:
# Create grub configuration using parameters from wks file
@@ -97,6 +109,9 @@ class BootimgEFIPlugin(SourcePlugin):
grubefi_conf += " /%s" % rd
grubefi_conf += "\n"
+ if dtb:
+ grubefi_conf += "devicetree /%s\n" % dtb
+
grubefi_conf += "}\n"
logger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg",
@@ -118,24 +133,18 @@ class BootimgEFIPlugin(SourcePlugin):
bootloader = creator.ks.bootloader
+ unified_image = source_params.get('create-unified-kernel-image') == "true"
+
loader_conf = ""
- loader_conf += "default boot\n"
+ if not unified_image:
+ loader_conf += "default boot\n"
loader_conf += "timeout %d\n" % bootloader.timeout
initrd = source_params.get('initrd')
+ dtb = source_params.get('dtb')
- if initrd:
- # obviously we need to have a common common deploy var
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
-
- initrds = initrd.split(';')
- for rd in initrds:
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
- exec_cmd(cp_cmd, True)
- else:
- logger.debug("Ignoring missing initrd")
+ if not unified_image:
+ cls._copy_additional_files(hdddir, initrd, dtb)
logger.debug("Writing systemd-boot config "
"%s/hdd/boot/loader/loader.conf", cr_workdir)
@@ -183,11 +192,15 @@ class BootimgEFIPlugin(SourcePlugin):
for rd in initrds:
boot_conf += "initrd /%s\n" % rd
- logger.debug("Writing systemd-boot config "
- "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
- cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
- cfg.write(boot_conf)
- cfg.close()
+ if dtb:
+ boot_conf += "devicetree /%s\n" % dtb
+
+ if not unified_image:
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
+ cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
+ cfg.write(boot_conf)
+ cfg.close()
@classmethod
@@ -207,6 +220,8 @@ class BootimgEFIPlugin(SourcePlugin):
cls.do_configure_grubefi(hdddir, creator, cr_workdir, source_params)
elif source_params['loader'] == 'systemd-boot':
cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
+ elif source_params['loader'] == 'uefi-kernel':
+ pass
else:
raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
except KeyError:
@@ -288,9 +303,107 @@ class BootimgEFIPlugin(SourcePlugin):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- install_cmd = "install -m 0644 %s/%s %s/%s" % \
- (staging_kernel_dir, kernel, hdddir, kernel)
- exec_cmd(install_cmd)
+ if source_params.get('create-unified-kernel-image') == "true":
+ initrd = source_params.get('initrd')
+ if not initrd:
+ raise WicError("initrd= must be specified when create-unified-kernel-image=true, exiting")
+
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ efi_stub = glob("%s/%s" % (deploy_dir, "linux*.efi.stub"))
+ if len(efi_stub) == 0:
+ raise WicError("Unified Kernel Image EFI stub not found, exiting")
+ efi_stub = efi_stub[0]
+
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ bootloader = creator.ks.bootloader
+ cmdline = open("%s/cmdline" % tmp_dir, "w")
+ cmdline.write("%s %s" % (label_conf, bootloader.append))
+ cmdline.close()
+
+ initrds = initrd.split(';')
+ initrd = open("%s/initrd" % tmp_dir, "wb")
+ for f in initrds:
+ with open("%s/%s" % (deploy_dir, f), 'rb') as in_file:
+ shutil.copyfileobj(in_file, initrd)
+ initrd.close()
+
+ # Searched by systemd-boot:
+ # https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
+ install_cmd = "install -d %s/EFI/Linux" % hdddir
+ exec_cmd(install_cmd)
+
+ staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
+ target_sys = get_bitbake_var("TARGET_SYS")
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -p %s" % efi_stub
+ objdump_cmd += " | awk '{ if ($1 == \"SectionAlignment\"){print $2} }'"
+
+ ret, align_str = exec_native_cmd(objdump_cmd, native_sysroot)
+ align = int(align_str, 16)
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -h %s | tail -2" % efi_stub
+ ret, output = exec_native_cmd(objdump_cmd, native_sysroot)
+
+ offset = int(output.split()[2], 16) + int(output.split()[3], 16)
+
+ osrel_off = offset + align - offset % align
+ osrel_path = "%s/usr/lib/os-release" % staging_dir_host
+ osrel_sz = os.stat(osrel_path).st_size
+
+ cmdline_off = osrel_off + osrel_sz
+ cmdline_off = cmdline_off + align - cmdline_off % align
+ cmdline_sz = os.stat(cmdline.name).st_size
+
+ dtb_off = cmdline_off + cmdline_sz
+ dtb_off = dtb_off + align - dtb_off % align
+
+ dtb = source_params.get('dtb')
+ if dtb:
+ if ';' in dtb:
+ raise WicError("Only one DTB supported, exiting")
+ dtb_path = "%s/%s" % (deploy_dir, dtb)
+ dtb_params = '--add-section .dtb=%s --change-section-vma .dtb=0x%x' % \
+ (dtb_path, dtb_off)
+ linux_off = dtb_off + os.stat(dtb_path).st_size
+ linux_off = linux_off + align - linux_off % align
+ else:
+ dtb_params = ''
+ linux_off = dtb_off
+
+ linux_path = "%s/%s" % (staging_kernel_dir, kernel)
+ linux_sz = os.stat(linux_path).st_size
+
+ initrd_off = linux_off + linux_sz
+ initrd_off = initrd_off + align - initrd_off % align
+
+ # https://www.freedesktop.org/software/systemd/man/systemd-stub.html
+ objcopy_cmd = "%s-objcopy" % target_sys
+ objcopy_cmd += " --enable-deterministic-archives"
+ objcopy_cmd += " --preserve-dates"
+ objcopy_cmd += " --add-section .osrel=%s" % osrel_path
+ objcopy_cmd += " --change-section-vma .osrel=0x%x" % osrel_off
+ objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name
+ objcopy_cmd += " --change-section-vma .cmdline=0x%x" % cmdline_off
+ objcopy_cmd += dtb_params
+ objcopy_cmd += " --add-section .linux=%s" % linux_path
+ objcopy_cmd += " --change-section-vma .linux=0x%x" % linux_off
+ objcopy_cmd += " --add-section .initrd=%s" % initrd.name
+ objcopy_cmd += " --change-section-vma .initrd=0x%x" % initrd_off
+ objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir)
+
+ exec_native_cmd(objcopy_cmd, native_sysroot)
+ else:
+ if source_params.get('install-kernel-into-boot-dir') != 'false':
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
+ exec_cmd(install_cmd)
if get_bitbake_var("IMAGE_EFI_BOOT_FILES"):
for src_path, dst_path in cls.install_task:
@@ -312,6 +425,28 @@ class BootimgEFIPlugin(SourcePlugin):
for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
exec_cmd(cp_cmd, True)
+ elif source_params['loader'] == 'uefi-kernel':
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ raise WicError("Empty KERNEL_IMAGETYPE %s\n" % target)
+ target = get_bitbake_var("TARGET_SYS")
+ if not target:
+ raise WicError("Unknown arch (TARGET_SYS) %s\n" % target)
+
+ if re.match("x86_64", target):
+ kernel_efi_image = "bootx64.efi"
+ elif re.match('i.86', target):
+ kernel_efi_image = "bootia32.efi"
+ elif re.match('aarch64', target):
+ kernel_efi_image = "bootaa64.efi"
+ elif re.match('arm', target):
+ kernel_efi_image = "bootarm.efi"
+ else:
+ raise WicError("UEFI stub kernel is incompatible with target %s" % target)
+
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith(kernel)]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, kernel_efi_image)
+ exec_cmd(cp_cmd, True)
else:
raise WicError("unrecognized bootimg-efi loader: %s" %
source_params['loader'])
@@ -323,6 +458,11 @@ class BootimgEFIPlugin(SourcePlugin):
cp_cmd = "cp %s %s/" % (startup, hdddir)
exec_cmd(cp_cmd, True)
+ for paths in part.include_path or []:
+ for path in paths:
+ cp_cmd = "cp -r %s %s/" % (path, hdddir)
+ exec_cmd(cp_cmd, True)
+
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
@@ -337,6 +477,13 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index 5dbe2558d2..1071d1af3f 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -30,6 +32,7 @@ class BootimgPartitionPlugin(SourcePlugin):
"""
name = 'bootimg-partition'
+ image_boot_files_var_name = 'IMAGE_BOOT_FILES'
@classmethod
def do_configure_partition(cls, part, source_params, cr, cr_workdir,
@@ -54,12 +57,12 @@ class BootimgPartitionPlugin(SourcePlugin):
else:
var = ""
- boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var)
+ boot_files = get_bitbake_var(cls.image_boot_files_var_name + var)
if boot_files is not None:
break
if boot_files is None:
- raise WicError('No boot files defined, IMAGE_BOOT_FILES unset for entry #%d' % part.lineno)
+ raise WicError('No boot files defined, %s unset for entry #%d' % (cls.image_boot_files_var_name, part.lineno))
logger.debug('Boot files: %s', boot_files)
@@ -110,7 +113,7 @@ class BootimgPartitionPlugin(SourcePlugin):
# Use a custom configuration for extlinux.conf
extlinux_conf = custom_cfg
logger.debug("Using custom configuration file "
- "%s for extlinux.cfg", configfile)
+ "%s for extlinux.conf", configfile)
else:
raise WicError("configfile is specified but failed to "
"get it from %s." % configfile)
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index f2639e7004..a207a83530 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -122,7 +122,7 @@ class BootimgPcbiosPlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/vmlinuz"
+ kernel = "/" + get_bitbake_var("KERNEL_IMAGETYPE")
syslinux_conf += "KERNEL " + kernel + "\n"
syslinux_conf += "APPEND label=boot root=%s %s\n" % \
@@ -155,8 +155,8 @@ class BootimgPcbiosPlugin(SourcePlugin):
kernel = "%s-%s.bin" % \
(get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- cmds = ("install -m 0644 %s/%s %s/vmlinuz" %
- (staging_kernel_dir, kernel, hdddir),
+ cmds = ("install -m 0644 %s/%s %s/%s" %
+ (staging_kernel_dir, kernel, hdddir, get_bitbake_var("KERNEL_IMAGETYPE")),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
@@ -186,8 +186,10 @@ class BootimgPcbiosPlugin(SourcePlugin):
# dosfs image, created by mkdosfs
bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
- dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "boot"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/empty.py b/scripts/lib/wic/plugins/source/empty.py
new file mode 100644
index 0000000000..4178912377
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/empty.py
@@ -0,0 +1,89 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# The empty wic plugin is used to create unformatted empty partitions for wic
+# images.
+# To use it you must pass "empty" as argument for the "--source" parameter in
+# the wks file. For example:
+# part foo --source empty --ondisk sda --size="1024" --align 1024
+#
+# The plugin supports writing zeros to the start of the
+# partition. This is useful to overwrite old content like
+# filesystem signatures which may be re-recognized otherwise.
+# This feature can be enabled with
+# '--sourceparams="[fill|size=<N>[S|s|K|k|M|G]][,][bs=<N>[S|s|K|k|M|G]]"'
+# Conflicting or missing options throw errors.
+
+import logging
+import os
+
+from wic import WicError
+from wic.ksparser import sizetype
+from wic.pluginbase import SourcePlugin
+
+logger = logging.getLogger('wic')
+
+class EmptyPartitionPlugin(SourcePlugin):
+ """
+ Populate unformatted empty partition.
+
+ The following sourceparams are supported:
+ - fill
+ Fill the entire partition with zeros. Requires '--fixed-size' option
+ to be set.
+ - size=<N>[S|s|K|k|M|G]
+ Set the first N bytes of the partition to zero. Default unit is 'K'.
+ - bs=<N>[S|s|K|k|M|G]
+ Write at most N bytes at a time during source file creation.
+ Defaults to '1M'. Default unit is 'K'.
+ """
+
+ name = 'empty'
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ get_byte_count = sizetype('K', True)
+ size = 0
+
+ if 'fill' in source_params and 'size' in source_params:
+ raise WicError("Conflicting source parameters 'fill' and 'size' specified, exiting.")
+
+ # Set the size of the zeros to be written to the partition
+ if 'fill' in source_params:
+ if part.fixed_size == 0:
+ raise WicError("Source parameter 'fill' only works with the '--fixed-size' option, exiting.")
+ size = get_byte_count(part.fixed_size)
+ elif 'size' in source_params:
+ size = get_byte_count(source_params['size'])
+
+ if size == 0:
+ # Nothing to do, create empty partition
+ return
+
+ if 'bs' in source_params:
+ bs = get_byte_count(source_params['bs'])
+ else:
+ bs = get_byte_count('1M')
+
+ # Create a binary file of the requested size filled with zeros
+ source_file = os.path.join(cr_workdir, 'empty-plugin-zeros%s.bin' % part.lineno)
+ if not os.path.exists(os.path.dirname(source_file)):
+ os.makedirs(os.path.dirname(source_file))
+
+ quotient, remainder = divmod(size, bs)
+ with open(source_file, 'wb') as file:
+ for _ in range(quotient):
+ file.write(bytearray(bs))
+ file.write(bytearray(remainder))
+
+ part.size = (size + 1024 - 1) // 1024 # size in KB rounded up
+ part.source_file = source_file
diff --git a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 11326a274b..607356ad13 100644
--- a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -1,4 +1,6 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -216,6 +218,18 @@ class IsoImagePlugin(SourcePlugin):
creator.name = source_params['image_name'].strip()
logger.debug("The name of the image is: %s", creator.name)
+ @staticmethod
+ def _install_payload(source_params, iso_dir):
+ """
+ Copies contents of payload directory (as specified in 'payload_dir' param) into iso_dir
+ """
+
+ if source_params.get('payload_dir'):
+ payload_dir = source_params['payload_dir']
+
+ logger.debug("Payload directory: %s", payload_dir)
+ shutil.copytree(payload_dir, iso_dir, symlinks=True, dirs_exist_ok=True)
+
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -228,6 +242,8 @@ class IsoImagePlugin(SourcePlugin):
isodir = "%s/ISO" % cr_workdir
+ cls._install_payload(source_params, isodir)
+
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in rootfs_dir:
raise WicError("Couldn't find --rootfs-dir, exiting.")
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
index 3c4997d8ba..21903c2f23 100644
--- a/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -1,9 +1,13 @@
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
+import signal
+import subprocess
from wic import WicError
from wic.pluginbase import SourcePlugin
@@ -21,6 +25,10 @@ class RawCopyPlugin(SourcePlugin):
@staticmethod
def do_image_label(fstype, dst, label):
+ # don't create label when fstype is none
+ if fstype == 'none':
+ return
+
if fstype.startswith('ext'):
cmd = 'tune2fs -L %s %s' % (label, dst)
elif fstype in ('msdos', 'vfat'):
@@ -29,15 +37,35 @@ class RawCopyPlugin(SourcePlugin):
cmd = 'btrfs filesystem label %s %s' % (dst, label)
elif fstype == 'swap':
cmd = 'mkswap -L %s %s' % (label, dst)
- elif fstype == 'squashfs':
- raise WicError("It's not possible to update a squashfs "
- "filesystem label '%s'" % (label))
+ elif fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to update a %s "
+ "filesystem label '%s'" % (fstype, label))
else:
raise WicError("Cannot update filesystem label: "
"Unknown fstype: '%s'" % (fstype))
exec_cmd(cmd)
+ @staticmethod
+ def do_image_uncompression(src, dst, workdir):
+ def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ # SIGPIPE errors are known issues with gzip/bash
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ extension = os.path.splitext(src)[1]
+ decompressor = {
+ ".bz2": "bzip2",
+ ".gz": "gzip",
+ ".xz": "xz",
+ ".zst": "zstd -f",
+ }.get(extension)
+ if not decompressor:
+ raise WicError("Not supported compressor filename extension: %s" % extension)
+ cmd = "%s -dc %s > %s" % (decompressor, src, dst)
+ subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=workdir)
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -56,7 +84,13 @@ class RawCopyPlugin(SourcePlugin):
if 'file' not in source_params:
raise WicError("No file specified")
- src = os.path.join(kernel_dir, source_params['file'])
+ if 'unpack' in source_params:
+ img = os.path.join(kernel_dir, source_params['file'])
+ src = os.path.join(cr_workdir, os.path.splitext(source_params['file'])[0])
+ RawCopyPlugin.do_image_uncompression(img, src, cr_workdir)
+ else:
+ src = os.path.join(kernel_dir, source_params['file'])
+
dst = os.path.join(cr_workdir, "%s.%s" % (os.path.basename(source_params['file']), part.lineno))
if not os.path.exists(os.path.dirname(dst)):
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index f1db83f8a1..e29f3a4c2f 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -35,7 +35,7 @@ class RootfsPlugin(SourcePlugin):
@staticmethod
def __validate_path(cmd, rootfs_dir, path):
if os.path.isabs(path):
- logger.error("%s: Must be relative: %s" % (cmd, orig_path))
+ logger.error("%s: Must be relative: %s" % (cmd, path))
sys.exit(1)
# Disallow climbing outside of parent directory using '..',
@@ -50,7 +50,7 @@ class RootfsPlugin(SourcePlugin):
@staticmethod
def __get_rootfs_dir(rootfs_dir):
- if os.path.isdir(rootfs_dir):
+ if rootfs_dir and os.path.isdir(rootfs_dir):
return os.path.realpath(rootfs_dir)
image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
@@ -94,8 +94,12 @@ class RootfsPlugin(SourcePlugin):
"it is not a valid path, exiting" % part.rootfs_dir)
part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+ part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab"))
pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo")
if not os.path.lexists(pseudo_dir):
+ pseudo_dir = os.path.join(cls.__get_rootfs_dir(None), '../pseudo')
+
+ if not os.path.lexists(pseudo_dir):
logger.warn("%s folder does not exist. "
"Usernames and permissions will be invalid " % pseudo_dir)
pseudo_dir = None
@@ -103,9 +107,9 @@ class RootfsPlugin(SourcePlugin):
new_rootfs = None
new_pseudo = None
# Handle excluded paths.
- if part.exclude_path or part.include_path or part.change_directory:
- # We need a new rootfs directory we can delete files from. Copy to
- # workdir.
+ if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs:
+ # We need a new rootfs directory we can safely modify without
+ # interfering with other tasks. Copy to workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
if os.path.lexists(new_rootfs):
@@ -199,17 +203,33 @@ class RootfsPlugin(SourcePlugin):
if not os.path.lexists(full_path):
continue
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
if path.endswith(os.sep):
# Delete content only.
for entry in os.listdir(full_path):
full_entry = os.path.join(full_path, entry)
- if os.path.isdir(full_entry) and not os.path.islink(full_entry):
- shutil.rmtree(full_entry)
- else:
- os.remove(full_entry)
+ rm_cmd = "rm -rf %s" % (full_entry)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
else:
# Delete whole directory.
- shutil.rmtree(full_path)
+ rm_cmd = "rm -rf %s" % (full_path)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
+
+ # Update part.has_fstab here as fstab may have been added or
+ # removed by the above modifications.
+ part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab"))
+ if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
+ fstab_path = os.path.join(new_rootfs, "etc/fstab")
+ # Assume that fstab should always be owned by root with fixed permissions
+ install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(install_cmd, native_sysroot, pseudo)
part.prepare_rootfs(cr_workdir, oe_builddir,
new_rootfs or part.rootfs_dir, native_sysroot,
diff --git a/scripts/lnr b/scripts/lnr
deleted file mode 100755
index a2ac4fec0f..0000000000
--- a/scripts/lnr
+++ /dev/null
@@ -1,24 +0,0 @@
-#! /usr/bin/env python3
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-# Create a *relative* symlink, just like ln --relative does but without needing
-# coreutils 8.16.
-
-import sys, os
-
-if len(sys.argv) != 3:
- print("$ lnr TARGET LINK_NAME")
- sys.exit(1)
-
-target = sys.argv[1]
-linkname = sys.argv[2]
-
-if os.path.isabs(target):
- if not os.path.isabs(linkname):
- linkname = os.path.abspath(linkname)
- start = os.path.dirname(linkname)
- target = os.path.relpath(target, start)
-
-os.symlink(target, linkname)
diff --git a/scripts/native-intercept/ar b/scripts/native-intercept/ar
new file mode 100755
index 0000000000..dcc623e3ed
--- /dev/null
+++ b/scripts/native-intercept/ar
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'ar' that defaults to deterministic archives
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'ar'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_ar = shutil.which('ar', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_ar, 'ar')
+
+# modify args to mimic 'ar' configured with --default-deterministic-archives
+argv = sys.argv
+if argv[1].startswith('--'):
+ # No modifier given
+ None
+else:
+ # remove the optional '-'
+ if argv[1][0] == '-':
+ argv[1] = argv[1][1:]
+ if 'U' in argv[1]:
+ sys.stderr.write("ar: non-deterministic mode requested\n")
+ else:
+ argv[1] = argv[1].replace('u', '')
+ argv[1] = 'D' + argv[1]
+
+os.execv(real_ar, argv)
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
new file mode 100755
index 0000000000..f8ae84b8b3
--- /dev/null
+++ b/scripts/nativesdk-intercept/chgrp
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chgrp' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chgrp'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chgrp = shutil.which('chgrp', path=path)
+
+args = list()
+
+found = False
+
+args.append(real_chgrp)
+
+for i in sys.argv[1:]:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chgrp, args)
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
new file mode 100755
index 0000000000..0805ceb70a
--- /dev/null
+++ b/scripts/nativesdk-intercept/chown
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chown' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chown'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chown = shutil.which('chown', path=path)
+
+args = list()
+
+found = False
+
+args.append(real_chown)
+
+for i in sys.argv[1:]:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root:root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chown, args)
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
index ba0a9b44d6..2fdb19565a 100755
--- a/scripts/oe-buildenv-internal
+++ b/scripts/oe-buildenv-internal
@@ -32,12 +32,12 @@ fi
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
-py_v35_check=$(python3 -c 'import sys; print(sys.version_info >= (3,5,0))')
-if [ "$py_v35_check" != "True" ]; then
- echo >&2 "BitBake requires Python 3.5.0 or later as 'python3 (scripts/install-buildtools can be used if needed)'"
+py_v38_check=$(python3 -c 'import sys; print(sys.version_info >= (3,8,0))')
+if [ "$py_v38_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.8.0 or later as 'python3' (scripts/install-buildtools can be used if needed)"
return 1
fi
-unset py_v35_check
+unset py_v38_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -88,27 +88,32 @@ if [ ! -d "$BITBAKEDIR" ]; then
return 1
fi
+# Add BitBake's library to PYTHONPATH
+PYTHONPATH=$BITBAKEDIR/lib:$PYTHONPATH
+export PYTHONPATH
+
+# Remove any paths added by sourcing this script before
+[ -n "$OE_ADDED_PATHS" ] && PATH=$(echo $PATH | sed -e "s#$OE_ADDED_PATHS##") ||
+ PATH=$(echo $PATH | sed -e "s#$OEROOT/scripts:$BITBAKEDIR/bin:##")
+
# Make sure our paths are at the beginning of $PATH
-for newpath in "$BITBAKEDIR/bin" "$OEROOT/scripts"; do
- # Remove any existences of $newpath from $PATH
- PATH=$(echo $PATH | sed -re "s#(^|:)$newpath(:|$)#\2#g;s#^:##")
+OE_ADDED_PATHS="$OEROOT/scripts:$BITBAKEDIR/bin:"
+PATH="$OE_ADDED_PATHS$PATH"
+export OE_ADDED_PATHS
- # Add $newpath to $PATH
- PATH="$newpath:$PATH"
-done
-unset BITBAKEDIR newpath
+# This is not needed anymore
+unset BITBAKEDIR
# Used by the runqemu script
export BUILDDIR
-export PATH
-BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
+BB_ENV_PASSTHROUGH_ADDITIONS_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE \
BB_LOGCONFIG"
-BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
+BB_ENV_PASSTHROUGH_ADDITIONS="$(echo $BB_ENV_PASSTHROUGH_ADDITIONS $BB_ENV_PASSTHROUGH_ADDITIONS_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
-export BB_ENV_EXTRAWHITE
+export BB_ENV_PASSTHROUGH_ADDITIONS
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index ca249ca67b..0d171c4463 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -18,7 +18,6 @@ import re
scripts_path = os.path.dirname(os.path.realpath(__file__))
lib_path = scripts_path + '/lib'
sys.path = sys.path + [lib_path]
-import scriptutils
import scriptpath
scriptpath.add_bitbake_lib_path()
import argparse_oe
@@ -47,17 +46,14 @@ def check(args):
try:
env = os.environ.copy()
if not args.same_tmpdir:
- env['BB_ENV_EXTRAWHITE'] = env.get('BB_ENV_EXTRAWHITE', '') + ' TMPDIR_forcevariable'
- env['TMPDIR_forcevariable'] = tmpdir
+ env['BB_ENV_PASSTHROUGH_ADDITIONS'] = env.get('BB_ENV_PASSTHROUGH_ADDITIONS', '') + ' TMPDIR:forcevariable'
+ env['TMPDIR:forcevariable'] = tmpdir
try:
- output = subprocess.check_output(
- 'bitbake -n %s' % ' '.join(args.target),
- stderr=subprocess.STDOUT,
- env=env,
- shell=True)
+ cmd = ['bitbake', '--dry-run', '--runall=build'] + args.target
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
- task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ task_re = re.compile(r'NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
tasks = []
for line in output.decode('utf-8').splitlines():
res = task_re.match(line)
diff --git a/scripts/oe-debuginfod b/scripts/oe-debuginfod
new file mode 100755
index 0000000000..b525310225
--- /dev/null
+++ b/scripts/oe-debuginfod
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import subprocess
+
+if __name__ == "__main__":
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ package_classes_var = "DEPLOY_DIR_" + tinfoil.config_data.getVar("PACKAGE_CLASSES").split()[0].replace("package_", "").upper()
+ feed_dir = tinfoil.config_data.getVar(package_classes_var, expand=True)
+
+ subprocess.call(['bitbake', '-c', 'addto_recipe_sysroot', 'elfutils-native'])
+
+ subprocess.call(['oe-run-native', 'elfutils-native', 'debuginfod', '--verbose', '-R', '-U', feed_dir])
+ print("\nTo use the debuginfod server please ensure that this variable PACKAGECONFIG:pn-elfutils-native = \"debuginfod libdebuginfod\" is set in the local.conf")
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 5eb3e12769..d02ee455f6 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -14,8 +14,8 @@ import re
class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
- description="Analyse recipe-depends.dot generated by bitbake -g",
- epilog="Use %(prog)s --help to get help")
+ description="Analyse task-depends.dot generated by bitbake -g",
+ formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
parser.add_argument("-k", "--key",
@@ -32,6 +32,21 @@ class Dot(object):
" For example, A->B, B->C, A->C, then A->C can be removed.",
action="store_true", default=False)
+ parser.epilog = """
+Examples:
+First generate the .dot file:
+ bitbake -g core-image-minimal
+
+To find out why a package is being built:
+ %(prog)s -k <package> -w ./task-depends.dot
+
+To find out what a package depends on:
+ %(prog)s -k <package> -d ./task-depends.dot
+
+Reduce the .dot file packages only, no tasks:
+ %(prog)s -r ./task-depends.dot
+"""
+
self.args = parser.parse_args()
if len(sys.argv) != 3 and len(sys.argv) < 5:
@@ -99,6 +114,10 @@ class Dot(object):
if key == "meta-world-pkgdata":
continue
dep = m.group(2)
+ key = key.split('.')[0]
+ dep = dep.split('.')[0]
+ if key == dep:
+ continue
if key in depends:
if not key in depends[key]:
depends[key].add(dep)
@@ -140,9 +159,14 @@ class Dot(object):
reverse_deps = []
if self.args.why:
- for k, v in depends.items():
- if self.args.key in v and not k in reverse_deps:
- reverse_deps.append(k)
+ key_list = [self.args.key]
+ current_key = self.args.key
+ while (len(key_list) != 0):
+ current_key = key_list.pop()
+ for k, v in depends.items():
+ if current_key in v and not k in reverse_deps:
+ reverse_deps.append(k)
+ key_list.append(k)
print('Because: %s' % ' '.join(reverse_deps))
Dot.print_dep_chains(self.args.key, reverse_deps, depends)
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
index 5146bbf999..6228efcbee 100755
--- a/scripts/oe-find-native-sysroot
+++ b/scripts/oe-find-native-sysroot
@@ -36,20 +36,9 @@ if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
fi
# Global vars
-BITBAKE_E=""
set_oe_native_sysroot(){
- echo "Running bitbake -e $1"
- BITBAKE_E="`bitbake -e $1`"
- OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE= | cut -d '"' -f2`
-
- if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
- # This indicates that there was an error running bitbake -e that
- # the user needs to be informed of
- echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
- echo "Here is the output from bitbake -e $1"
- echo $BITBAKE_E
- exit 1
- fi
+ echo "Getting sysroot..."
+ OECORE_NATIVE_SYSROOT=$(bitbake-getvar -r $1 --value STAGING_DIR_NATIVE)
}
if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
diff --git a/scripts/oe-gnome-terminal-phonehome b/scripts/oe-gnome-terminal-phonehome
index b6b9a3867b..1352a9872b 100755
--- a/scripts/oe-gnome-terminal-phonehome
+++ b/scripts/oe-gnome-terminal-phonehome
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Gnome terminal won't tell us which PID a given command is run as
diff --git a/scripts/oe-pkgdata-browser b/scripts/oe-pkgdata-browser
index 8d223185a4..c152c82b25 100755
--- a/scripts/oe-pkgdata-browser
+++ b/scripts/oe-pkgdata-browser
@@ -1,4 +1,9 @@
#! /usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
import os, sys, enum, ast
@@ -49,11 +54,11 @@ def load(filename, suffix=None):
from configparser import ConfigParser
from itertools import chain
- parser = ConfigParser()
+ parser = ConfigParser(delimiters=('='))
if suffix:
- parser.optionxform = lambda option: option.replace("_" + suffix, "")
+ parser.optionxform = lambda option: option.replace(":" + suffix, "")
with open(filename) as lines:
- lines = chain(("[fake]",), lines)
+ lines = chain(("[fake]",), (line.replace(": ", " = ", 1) for line in lines))
parser.read_file(lines)
# TODO extract the data and put it into a real dict so we can transform some
@@ -236,6 +241,8 @@ class PkgUi():
update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False)
def load_recipes(self):
+ if not os.path.exists(pkgdata):
+ sys.exit("Error: Please ensure %s exists by generating packages before using this tool." % pkgdata)
for recipe in sorted(os.listdir(pkgdata)):
if os.path.isfile(os.path.join(pkgdata, recipe)):
self.recipe_iters[recipe] = self.recipe_store.append([recipe])
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index 93220e3617..44ae40549a 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -96,7 +96,7 @@ def glob(args):
pn = os.path.basename(pkgdata_file)
with open(pkgdata_file, 'r') as f:
for line in f:
- if line.startswith("PKG_%s:" % pn):
+ if line.startswith("PKG:%s:" % pn):
renamed = line.split(': ')[1].rstrip()
return renamed
@@ -171,7 +171,7 @@ def read_value(args):
val = line.split(': ', 1)[1].rstrip()
return val
- logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
+ logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuenames, packages))
for package in packages:
pkg_split = package.split('_')
pkg_name = pkg_split[0]
@@ -180,20 +180,29 @@ def read_value(args):
logger.debug(revlink)
if os.path.exists(revlink):
mappedpkg = os.path.basename(os.readlink(revlink))
- qvar = args.valuename
- value = readvar(revlink, qvar, mappedpkg)
- if qvar == "PKGSIZE":
- # PKGSIZE is now in bytes, but we we want it in KB
- pkgsize = (int(value) + 1024 // 2) // 1024
- value = "%d" % pkgsize
- if args.unescape:
- import codecs
- # escape_decode() unescapes backslash encodings in byte streams
- value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ qvars = args.valuenames
+ val_names = qvars.split(',')
+ values = []
+ for qvar in val_names:
+ if qvar == "PACKAGE":
+ value = mappedpkg
+ else:
+ value = readvar(revlink, qvar, mappedpkg)
+ if qvar == "PKGSIZE":
+ # PKGSIZE is now in bytes, but we we want it in KB
+ pkgsize = (int(value) + 1024 // 2) // 1024
+ value = "%d" % pkgsize
+ if args.unescape:
+ import codecs
+ # escape_decode() unescapes backslash encodings in byte streams
+ value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ values.append(value)
+
+ values_str = ' '.join(values)
if args.prefix_name:
- print('%s %s' % (pkg_name, value))
+ print('%s %s' % (pkg_name, values_str))
else:
- print(value)
+ print(values_str)
else:
logger.debug("revlink %s does not exist", revlink)
@@ -213,7 +222,7 @@ def lookup_pkglist(pkgs, pkgdata_dir, reverse):
with open(pkgfile, 'r') as f:
for line in f:
fields = line.rstrip().split(': ')
- if fields[0] == 'PKG_%s' % pkg:
+ if fields[0] == 'PKG:%s' % pkg:
mappings[pkg].append(fields[1])
break
return mappings
@@ -287,7 +296,7 @@ def package_info(args):
extra = ''
for line in f:
for var in vars:
- m = re.match(var + '(?:_\S+)?:\s*(.+?)\s*$', line)
+ m = re.match(var + r'(?::\S+)?:\s*(.+?)\s*$', line)
if m:
vals[var] = m.group(1)
pkg_version = vals['PKGV'] or ''
@@ -431,7 +440,7 @@ def list_pkg_files(args):
for line in f:
if line.startswith('FILES_INFO:'):
found = True
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
if long:
width = max(map(len, dictval), default=0)
@@ -500,7 +509,7 @@ def find_path(args):
with open(os.path.join(root,fn)) as f:
for line in f:
if line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, args.targetpath):
@@ -570,7 +579,7 @@ def main():
parser_read_value = subparsers.add_parser('read-value',
help='Read any pkgdata value for one or more packages',
description='Reads the named value from the pkgdata files for the specified packages')
- parser_read_value.add_argument('valuename', help='Name of the value to look up')
+ parser_read_value.add_argument('valuenames', help='Name of the value/s to look up (separated by commas, no spaces)')
parser_read_value.add_argument('pkg', nargs='*', help='Runtime package name to look up')
parser_read_value.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
parser_read_value.add_argument('-n', '--prefix-name', help='Prefix output with package name', action='store_true')
@@ -598,6 +607,9 @@ def main():
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
+ if not os.environ.get('BUILDDIR', ''):
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
tinfoil = tinfoil_init()
try:
args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
diff --git a/scripts/oe-publish-sdk b/scripts/oe-publish-sdk
index deb8ae1807..b8a652e47f 100755
--- a/scripts/oe-publish-sdk
+++ b/scripts/oe-publish-sdk
@@ -107,9 +107,9 @@ def publish(args):
# Setting up the git repo
if not is_remote:
- cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
+ cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
else:
- cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc' > .gitignore; echo '*.pyo' >> .gitignore; echo 'pyshtables.py' >> .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
+ cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc' > .gitignore; echo '*.pyo' >> .gitignore; echo 'pyshtables.py' >> .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('SDK published successfully')
diff --git a/scripts/oe-pylint b/scripts/oe-pylint
index 7cc1ccb010..5ad72838e9 100755
--- a/scripts/oe-pylint
+++ b/scripts/oe-pylint
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Run the pylint3 against our common python module spaces and print a report of potential issues
diff --git a/scripts/oe-run-native b/scripts/oe-run-native
index 4e63e69cc4..22958d97e7 100755
--- a/scripts/oe-run-native
+++ b/scripts/oe-run-native
@@ -43,7 +43,7 @@ fi
OLD_PATH=$PATH
# look for a tool only in native sysroot
-PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin/*-native -maxdepth 1 -type d -printf ":%p")
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin -maxdepth 1 -name "*-native" -type d -printf ":%p")
tool_find=`/usr/bin/which $tool 2>/dev/null`
if [ -n "$tool_find" ] ; then
diff --git a/scripts/oe-setup-build b/scripts/oe-setup-build
new file mode 100755
index 0000000000..5364f2b481
--- /dev/null
+++ b/scripts/oe-setup-build
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import json
+import os
+import subprocess
+
+def defaultlayers():
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), '.oe-layers.json'))
+
+def makebuildpath(topdir, template):
+ return os.path.join(topdir, "build-{}".format(template))
+
+def discover_templates(layers_file):
+ if not os.path.exists(layers_file):
+ print("List of layers {} does not exist; were the layers set up using the setup-layers script?".format(layers_file))
+ return None
+
+ templates = []
+ layers_list = json.load(open(layers_file))["layers"]
+ for layer in layers_list:
+ template_dir = os.path.join(os.path.dirname(layers_file), layer, 'conf','templates')
+ if os.path.exists(template_dir):
+ for d in sorted(os.listdir(template_dir)):
+ templatepath = os.path.join(template_dir,d)
+ if not os.path.isfile(os.path.join(templatepath,'local.conf.sample')):
+ continue
+ layer_base = os.path.basename(layer)
+ templatename = "{}-{}".format(layer_base[5:] if layer_base.startswith("meta-") else layer_base, d)
+ buildpath = makebuildpath(os.getcwd(), templatename)
+ notespath = os.path.join(template_dir, d, 'conf-notes.txt')
+ try: notes = open(notespath).read()
+ except: notes = None
+ try: summary = open(os.path.join(template_dir, d, 'conf-summary.txt')).read()
+ except: summary = None
+ templates.append({"templatename":templatename,"templatepath":templatepath,"buildpath":buildpath,"notespath":notespath,"notes":notes,"summary":summary})
+
+ return templates
+
+def print_templates(templates, verbose):
+ print("Available build configurations:\n")
+
+ for i in range(len(templates)):
+ t = templates[i]
+ print("{}. {}".format(i+1, t["templatename"]))
+ print("{}".format(t["summary"].strip() if t["summary"] else "This configuration does not have a summary."))
+ if verbose:
+ print("Configuration template path:", t["templatepath"])
+ print("Build path:", t["buildpath"])
+ print("Usage notes:", t["notespath"] if t["notes"] else "This configuration does not have usage notes.")
+ print("")
+ if not verbose:
+ print("Re-run with 'list -v' to see additional information.")
+
+def list_templates(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ verbose = args.v
+ print_templates(templates, verbose)
+
+def find_template(template_name, templates):
+ print_templates(templates, False)
+ if not template_name:
+ n_s = input("Please choose a configuration by its number: ")
+ try: return templates[int(n_s) - 1]
+ except:
+ print("Invalid selection, please try again.")
+ return None
+ else:
+ for t in templates:
+ if t["templatename"] == template_name:
+ return t
+ print("Configuration {} is not one of {}, please try again.".format(tempalte_name, [t["templatename"] for t in templates]))
+ return None
+
+def setup_build_env(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ template = find_template(args.c, templates)
+ if not template:
+ return
+ builddir = args.b if args.b else template["buildpath"]
+ no_shell = args.no_shell
+ coredir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
+ cmd = "TEMPLATECONF={} . {} {}".format(template["templatepath"], os.path.join(coredir, 'oe-init-build-env'), builddir)
+ if not no_shell:
+ cmd = cmd + " && {}".format(os.environ['SHELL'])
+ print("Running:", cmd)
+ subprocess.run(cmd, shell=True, executable=os.environ['SHELL'])
+
+parser = argparse.ArgumentParser(description="A script that discovers available build configurations and sets up a build environment based on one of them. Run without arguments to choose one interactively.")
+parser.add_argument("--layerlist", default=defaultlayers(), help='Where to look for available layers (as written out by setup-layers script) (default is {}).'.format(defaultlayers()))
+
+subparsers = parser.add_subparsers()
+parser_list_templates = subparsers.add_parser('list', help='List available configurations')
+parser_list_templates.add_argument('-v', action='store_true',
+ help='Print detailed information and usage notes for each available build configuration.')
+parser_list_templates.set_defaults(func=list_templates)
+
+parser_setup_env = subparsers.add_parser('setup', help='Set up a build environment and open a shell session with it, ready to run builds.')
+parser_setup_env.add_argument('-c', metavar='configuration_name', help="Use a build configuration configuration_name to set up a build environment (run this script with 'list' to see what is available)")
+parser_setup_env.add_argument('-b', metavar='build_path', help="Set up a build directory in build_path (run this script with 'list -v' to see where it would be by default)")
+parser_setup_env.add_argument('--no-shell', action='store_true',
+ help='Create a build directory but do not start a shell session with the build environment from it.')
+parser_setup_env.set_defaults(func=setup_build_env)
+
+args = parser.parse_args()
+
+if 'func' in args:
+ args.func(args)
+else:
+ from argparse import Namespace
+ setup_build_env(Namespace(layerlist=args.layerlist, c=None, b=None, no_shell=False))
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 30eaa8efbe..dcb384c33a 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -7,12 +7,14 @@
# SPDX-License-Identifier: GPL-2.0-or-later
#
-if [ -z "$BUILDDIR" ]; then
- echo >&2 "Error: The build directory (BUILDDIR) must be set!"
+die() {
+ echo Error: "$@" >&2
exit 1
-fi
+}
+
+[ -n "$BUILDDIR" ] || die "The build directory (BUILDDIR) must be set!"
-if [ "$1" = '--help' -o "$1" = '-h' ]; then
+if [ "$1" = '--help' ] || [ "$1" = '-h' ]; then
echo 'Usage: oe-setup-builddir'
echo ''
echo "OpenEmbedded setup-builddir - setup build directory $BUILDDIR"
@@ -22,33 +24,22 @@ fi
mkdir -p "$BUILDDIR/conf"
-if [ ! -d "$BUILDDIR" ]; then
- echo >&2 "Error: The builddir ($BUILDDIR) does not exist!"
- exit 1
-fi
-
-if [ ! -w "$BUILDDIR" ]; then
- echo >&2 "Error: Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
- exit 1
-fi
+[ -d "$BUILDDIR" ] || die "The build directory ($BUILDDIR) does not exist!"
+[ -w "$BUILDDIR" ] ||
+ die "Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
# Attempting removal of sticky,setuid bits from BUILDDIR, BUILDDIR/conf
chmod -st "$BUILDDIR" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR"
chmod -st "$BUILDDIR/conf" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR/conf"
-cd "$BUILDDIR"
+cd "$BUILDDIR" || die "Failed to change directory to $BUILDDIR!"
-if [ -f "$BUILDDIR/conf/templateconf.cfg" ]; then
- TEMPLATECONF=$(cat "$BUILDDIR/conf/templateconf.cfg")
-fi
-
-. $OEROOT/.templateconf
+. "$OEROOT/.templateconf"
-if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
- echo "$TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
-fi
+# Keep the original TEMPLATECONF before possibly prefixing it with $OEROOT below.
+ORG_TEMPLATECONF=$TEMPLATECONF
-#
+#
# $TEMPLATECONF can point to a directory for the template local.conf & bblayers.conf
#
if [ -n "$TEMPLATECONF" ]; then
@@ -57,73 +48,94 @@ if [ -n "$TEMPLATECONF" ]; then
if [ -d "$OEROOT/$TEMPLATECONF" ]; then
TEMPLATECONF="$OEROOT/$TEMPLATECONF"
fi
- if [ ! -d "$TEMPLATECONF" ]; then
- echo >&2 "Error: TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
- exit 1
- fi
+ [ -d "$TEMPLATECONF" ] ||
+ die "TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
+ fi
+ templatesdir=$(python3 -c "import sys; print(sys.argv[1].strip('/').split('/')[-2])" "$TEMPLATECONF")
+ if [ "$templatesdir" != templates ] || [ ! -f "$TEMPLATECONF/../../layer.conf" ]; then
+ die "TEMPLATECONF value (which is $TEMPLATECONF) must point to meta-some-layer/conf/templates/template-name"
fi
OECORELAYERCONF="$TEMPLATECONF/bblayers.conf.sample"
OECORELOCALCONF="$TEMPLATECONF/local.conf.sample"
+ OECORESUMMARYCONF="$TEMPLATECONF/conf-summary.txt"
OECORENOTESCONF="$TEMPLATECONF/conf-notes.txt"
fi
unset SHOWYPDOC
if [ -z "$OECORELOCALCONF" ]; then
- OECORELOCALCONF="$OEROOT/meta/conf/local.conf.sample"
+ OECORELOCALCONF="$OEROOT/meta/conf/templates/default/local.conf.sample"
fi
if [ ! -r "$BUILDDIR/conf/local.conf" ]; then
cat <<EOM
You had no conf/local.conf file. This configuration file has therefore been
-created for you with some default values. You may wish to edit it to, for
-example, select a different MACHINE (target hardware). See conf/local.conf
-for more information as common configuration options are commented.
+created for you from $OECORELOCALCONF
+You may wish to edit it to, for example, select a different MACHINE (target
+hardware).
EOM
- cp -f $OECORELOCALCONF "$BUILDDIR/conf/local.conf"
+ cp -f "$OECORELOCALCONF" "$BUILDDIR/conf/local.conf"
SHOWYPDOC=yes
fi
if [ -z "$OECORELAYERCONF" ]; then
- OECORELAYERCONF="$OEROOT/meta/conf/bblayers.conf.sample"
+ OECORELAYERCONF="$OEROOT/meta/conf/templates/default/bblayers.conf.sample"
fi
if [ ! -r "$BUILDDIR/conf/bblayers.conf" ]; then
cat <<EOM
You had no conf/bblayers.conf file. This configuration file has therefore been
-created for you with some default values. To add additional metadata layers
-into your configuration please add entries to conf/bblayers.conf.
+created for you from $OECORELAYERCONF
+To add additional metadata layers into your configuration please add entries
+to conf/bblayers.conf.
EOM
- # Put the abosolute path to the layers in bblayers.conf so we can run
- # bitbake without the init script after the first run
- # ##COREBASE## is deprecated as it's meaning was inconsistent, but continue
+ # Put the absolute path to the layers in bblayers.conf so we can run
+ # bitbake without the init script after the first run.
+ # ##COREBASE## is deprecated as its meaning was inconsistent, but continue
# to replace it for compatibility.
sed -e "s|##OEROOT##|$OEROOT|g" \
-e "s|##COREBASE##|$OEROOT|g" \
- $OECORELAYERCONF > "$BUILDDIR/conf/bblayers.conf"
+ "$OECORELAYERCONF" > "$BUILDDIR/conf/bblayers.conf"
SHOWYPDOC=yes
fi
+if [ -z "$OECORESUMMARYCONF" ]; then
+ OECORESUMMARYCONF="$OEROOT/meta/conf/templates/default/conf-summary.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-summary.txt" ]; then
+ [ ! -r "$OECORESUMMARYCONF" ] || cp "$OECORESUMMARYCONF" "$BUILDDIR/conf/conf-summary.txt"
+fi
+
+if [ -z "$OECORENOTESCONF" ]; then
+ OECORENOTESCONF="$OEROOT/meta/conf/templates/default/conf-notes.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-notes.txt" ]; then
+ [ ! -r "$OECORENOTESCONF" ] || cp "$OECORENOTESCONF" "$BUILDDIR/conf/conf-notes.txt"
+fi
+
# Prevent disturbing a new GIT clone in same console
unset OECORELOCALCONF
unset OECORELAYERCONF
+unset OECORESUMMARYCONF
+unset OECORENOTESCONF
# Ending the first-time run message. Show the YP Documentation banner.
-if [ ! -z "$SHOWYPDOC" ]; then
+if [ -n "$SHOWYPDOC" ]; then
cat <<EOM
The Yocto Project has extensive documentation about OE including a reference
manual which can be found at:
- http://yoctoproject.org/documentation
+ https://docs.yoctoproject.org
-For more information about OpenEmbedded see their website:
- http://www.openembedded.org/
+For more information about OpenEmbedded see the website:
+ https://www.openembedded.org/
EOM
# unset SHOWYPDOC
fi
-if [ -z "$OECORENOTESCONF" ]; then
- OECORENOTESCONF="$OEROOT/meta/conf/conf-notes.txt"
+[ ! -r "$BUILDDIR/conf/conf-summary.txt" ] || cat "$BUILDDIR/conf/conf-summary.txt"
+[ ! -r "$BUILDDIR/conf/conf-notes.txt" ] || cat "$BUILDDIR/conf/conf-notes.txt"
+
+if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
+ echo "$ORG_TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
fi
-[ ! -r "$OECORENOTESCONF" ] || cat $OECORENOTESCONF
-unset OECORENOTESCONF
diff --git a/scripts/oe-setup-layers b/scripts/oe-setup-layers
new file mode 100755
index 0000000000..6fbfefd656
--- /dev/null
+++ b/scripts/oe-setup-layers
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# This file was copied from poky(or oe-core)/scripts/oe-setup-layers by running
+#
+# bitbake-layers create-layers-setup destdir
+#
+# It is recommended that you do not modify this file directly, but rather re-run the above command to get the freshest upstream copy.
+#
+# This script is idempotent. Subsequent runs only change what is necessary to
+# ensure your layers match your configuration.
+
+import argparse
+import json
+import os
+import subprocess
+
+def _is_repo_git_repo(repodir):
+ try:
+ curr_toplevel = subprocess.check_output("git -C %s rev-parse --show-toplevel" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_toplevel.strip().decode("utf-8") == repodir:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_rev(repodir, rev):
+ try:
+ curr_rev = subprocess.check_output("git -C %s rev-parse HEAD" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_rev.strip().decode("utf-8") == rev:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_remote_uri(repodir, remote, uri):
+ try:
+ curr_uri = subprocess.check_output("git -C %s remote get-url %s" % (repodir, remote), shell=True, stderr=subprocess.DEVNULL)
+ if curr_uri.strip().decode("utf-8") == uri:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _contains_submodules(repodir):
+ return os.path.exists(os.path.join(repodir,".gitmodules"))
+
+def _write_layer_list(dest, repodirs):
+ layers = []
+ for r in repodirs:
+ for root, dirs, files in os.walk(r):
+ if os.path.basename(root) == 'conf' and 'layer.conf' in files:
+ layers.append(os.path.relpath(os.path.dirname(root), dest))
+ layers_f = os.path.join(dest, ".oe-layers.json")
+ print("Writing list of layers into {}".format(layers_f))
+ with open(layers_f, 'w') as f:
+ json.dump({"version":"1.0","layers":layers}, f, sort_keys=True, indent=4)
+
+def _do_checkout(args, json):
+ repos = json['sources']
+ repodirs = []
+ oesetupbuild = None
+ for r_name in repos:
+ r_data = repos[r_name]
+ repodir = os.path.abspath(os.path.join(args['destdir'], r_data['path']))
+ repodirs.append(repodir)
+
+ if 'contains_this_file' in r_data.keys():
+ force_arg = 'force_bootstraplayer_checkout'
+ if not args[force_arg]:
+ print('Note: not checking out source {repo}, use {repoflag} to override.'.format(repo=r_name, repoflag='--force-bootstraplayer-checkout'))
+ continue
+ r_remote = r_data['git-remote']
+ rev = r_remote['rev']
+ desc = r_remote['describe']
+ if not desc:
+ desc = rev[:10]
+ branch = r_remote['branch']
+ remotes = r_remote['remotes']
+
+ print('\nSetting up source {}, revision {}, branch {}'.format(r_name, desc, branch))
+ if not _is_repo_git_repo(repodir):
+ cmd = 'git init -q {}'.format(repodir)
+ print("Running '{}'".format(cmd))
+ subprocess.check_output(cmd, shell=True)
+
+ for remote in remotes:
+ if not _is_repo_at_remote_uri(repodir, remote, remotes[remote]['uri']):
+ cmd = "git remote remove {} > /dev/null 2>&1; git remote add {} {}".format(remote, remote, remotes[remote]['uri'])
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = "git fetch -q {} || true".format(remote)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if not _is_repo_at_rev(repodir, rev):
+ cmd = "git fetch -q --all || true"
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = 'git checkout -q {}'.format(rev)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if _contains_submodules(repodir):
+ print("Repo {} contains submodules, use 'git submodule update' to ensure they are up to date".format(repodir))
+ if os.path.exists(os.path.join(repodir, 'scripts/oe-setup-build')):
+ oesetupbuild = os.path.join(repodir, 'scripts/oe-setup-build')
+
+ _write_layer_list(args['destdir'], repodirs)
+
+ if oesetupbuild:
+ oesetupbuild_symlink = os.path.join(args['destdir'], 'setup-build')
+ if os.path.exists(oesetupbuild_symlink):
+ os.remove(oesetupbuild_symlink)
+ os.symlink(os.path.relpath(oesetupbuild,args['destdir']),oesetupbuild_symlink)
+ print("\nRun '{}' to list available build configuration templates and set up a build from one of them.".format(oesetupbuild_symlink))
+
+parser = argparse.ArgumentParser(description="A self contained python script that fetches all the needed layers and sets them to correct revisions using data in a json format from a separate file. The json data can be created from an active build directory with 'bitbake-layers create-layers-setup destdir' and there's a sample file and a schema in meta/files/")
+
+parser.add_argument('--force-bootstraplayer-checkout', action='store_true',
+ help='Force the checkout of the layer containing this file (by default it is presumed that as this script is in it, the layer is already in place).')
+
+try:
+ defaultdest = os.path.dirname(subprocess.check_output('git rev-parse --show-toplevel', universal_newlines=True, shell=True, cwd=os.path.dirname(__file__)))
+except subprocess.CalledProcessError as e:
+ defaultdest = os.path.abspath(".")
+
+parser.add_argument('--destdir', default=defaultdest, help='Where to check out the layers (default is {defaultdest}).'.format(defaultdest=defaultdest))
+parser.add_argument('--jsondata', default=__file__+".json", help='File containing the layer data in json format (default is {defaultjson}).'.format(defaultjson=__file__+".json"))
+
+args = parser.parse_args()
+
+with open(args.jsondata) as f:
+ json_f = json.load(f)
+
+supported_versions = ["1.0"]
+if json_f["version"] not in supported_versions:
+ raise Exception("File {} has version {}, which is not in supported versions: {}".format(args.jsondata, json_f["version"], supported_versions))
+
+_do_checkout(vars(args), json_f)
diff --git a/scripts/oe-setup-vscode b/scripts/oe-setup-vscode
new file mode 100755
index 0000000000..b8642780d5
--- /dev/null
+++ b/scripts/oe-setup-vscode
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+usage() {
+ echo "$0 <OEINIT> <BUILDDIR>"
+ echo " OEINIT: path to directory where the .vscode folder is"
+ echo " BUILDDIR: directory passed to the oe-init-setup-env script"
+}
+
+if [ "$#" -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+OEINIT=$(readlink -f "$1")
+BUILDDIR=$(readlink -f "$2")
+VSCODEDIR=$OEINIT/.vscode
+
+if [ ! -d "$OEINIT" ] || [ ! -d "$BUILDDIR" ]; then
+ echo "$OEINIT and/or $BUILDDIR directories are not present."
+ exit 1
+fi
+
+VSCODE_SETTINGS=$VSCODEDIR/settings.json
+ws_builddir="$(echo "$BUILDDIR" | sed -e "s|$OEINIT|\${workspaceFolder}|g")"
+
+# If BUILDDIR is in scope of VSCode ensure VSCode does not try to index the build folder.
+# This would lead to a busy CPU and finally to an OOM exception.
+mkdir -p "$VSCODEDIR"
+cat <<EOMsettings > "$VSCODE_SETTINGS"
+{
+ "bitbake.pathToBitbakeFolder": "\${workspaceFolder}/bitbake",
+ "bitbake.pathToEnvScript": "\${workspaceFolder}/oe-init-build-env",
+ "bitbake.pathToBuildFolder": "$ws_builddir",
+ "bitbake.commandWrapper": "",
+ "bitbake.workingDirectory": "\${workspaceFolder}",
+ "files.exclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "files.watcherExclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "python.analysis.exclude": [
+ "**/_build/**",
+ "**/.git/**",
+ "**/buildhistory/**",
+ "**/cache/**",
+ "**/downloads/**",
+ "**/node_modules/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/sstate-cache/**",
+ "**/tmp*/**",
+ "**/workspace/attic/**",
+ "**/workspace/sources/**"
+ ]
+}
+EOMsettings
+
+
+# Ask the user if the yocto-bitbake extension should be installed
+VSCODE_EXTENSIONS=$VSCODEDIR/extensions.json
+cat <<EOMextensions > "$VSCODE_EXTENSIONS"
+{
+ "recommendations": [
+ "yocto-project.yocto-bitbake"
+ ]
+}
+EOMextensions
+
+echo "You had no $VSCODEDIR configuration."
+echo "These configuration files have therefore been created for you."
diff --git a/scripts/oe-time-dd-test.sh b/scripts/oe-time-dd-test.sh
new file mode 100755
index 0000000000..81748b8c9e
--- /dev/null
+++ b/scripts/oe-time-dd-test.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+# oe-time-dd-test records how much time it takes to
+# write <count> number of kilobytes to the filesystem.
+# It also records the number of processes that are in
+# running (R), uninterruptible sleep (D) and interruptible
+# sleep (S) state from the output of "top" command.
+# The purporse of this script is to find which part of
+# the build system puts stress on the filesystem io and
+# log all the processes.
+usage() {
+ echo "$0 is used to detect i/o latency and runs commands to display host information."
+ echo "The following commands are run in order:"
+ echo "1) top -c -b -n1 -w 512"
+ echo "2) iostat -y -z -x 5 1"
+ echo "3) tail -30 tmp*/log/cooker/*/console-latest.log to gather cooker log."
+ echo " "
+ echo "Options:"
+ echo "-c | --count <amount> dd (transfer) <amount> KiB of data within specified timeout to detect latency."
+ echo " Must enable -t option."
+ echo "-t | --timeout <time> timeout in seconds for the <count> amount of data to be transferred."
+ echo "-l | --log-only run the commands without performing the data transfer."
+ echo "-h | --help show help"
+
+}
+
+run_cmds() {
+ echo "start: top output"
+ top -c -b -n1 -w 512
+ echo "end: top output"
+ echo "start: iostat"
+ iostat -y -z -x 5 1
+ echo "end: iostat"
+ echo "start: cooker log"
+ tail -30 tmp*/log/cooker/*/console-latest.log
+ echo "end: cooker log"
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 1
+fi
+
+re_c='^[0-9]+$'
+#re_t='^[0-9]+([.][0-9]+)?$'
+
+while [[ $# -gt 0 ]]; do
+ key="$1"
+
+ case $key in
+ -c|--count)
+ COUNT=$2
+ shift
+ shift
+ if ! [[ $COUNT =~ $re_c ]] || [[ $COUNT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -t|--timeout)
+ TIMEOUT=$2
+ shift
+ shift
+ if ! [[ $TIMEOUT =~ $re_c ]] || [[ $TIMEOUT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -l|--log-only)
+ LOG_ONLY="true"
+ shift
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+
+if [ "$LOG_ONLY" = "true" ] ; then
+ uptime
+ run_cmds
+ exit
+fi
+
+if [ -z ${TIMEOUT+x} ] || [ -z ${COUNT+x} ] ; then
+ usage
+ exit 1
+fi
+
+uptime
+echo "Timeout used: ${TIMEOUT}"
+timeout ${TIMEOUT} dd if=/dev/zero of=oe-time-dd-test.dat bs=1024 count=${COUNT} conv=fsync
+if [ $? -ne 0 ]; then
+ run_cmds
+fi
diff --git a/scripts/oe-trim-schemas b/scripts/oe-trim-schemas
index bf77c8cf64..e3b26e273e 100755
--- a/scripts/oe-trim-schemas
+++ b/scripts/oe-trim-schemas
@@ -1,5 +1,7 @@
#! /usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/scripts/oepydevshell-internal.py b/scripts/oepydevshell-internal.py
index 96c078ef3d..3bf7df1114 100755
--- a/scripts/oepydevshell-internal.py
+++ b/scripts/oepydevshell-internal.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
@@ -43,7 +45,7 @@ nonblockingfd(pty)
nonblockingfd(sys.stdin)
-histfile = os.path.expanduser("~/.oedevpyshell-history")
+histfile = os.path.expanduser("~/.oepydevshell-history")
readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(histfile)
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index bc3ab43823..084d9ef684 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -29,7 +29,7 @@ for arg in sys.argv[1:]:
args.append(arg)
# Regex for removing version specs after dependency items
-verregex = re.compile(' \([=<>]* [^ )]*\)')
+verregex = re.compile(r' \([=<>]* [^ )]*\)')
pkg = ""
ver = ""
diff --git a/scripts/patchtest b/scripts/patchtest
new file mode 100755
index 0000000000..0be7062dc2
--- /dev/null
+++ b/scripts/patchtest
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import unittest
+import logging
+import traceback
+import json
+
+# Include current path so test cases can see it
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+
+# Include patchtest library
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../meta/lib/patchtest'))
+
+from data import PatchTestInput
+from repo import PatchTestRepo
+
+import utils
+logger = utils.logger_create('patchtest')
+info = logger.info
+error = logger.error
+
+import repo
+
+def getResult(patch, mergepatch, logfile=None):
+
+ class PatchTestResult(unittest.TextTestResult):
+ """ Patchtest TextTestResult """
+ shouldStop = True
+ longMessage = False
+
+ success = 'PASS'
+ fail = 'FAIL'
+ skip = 'SKIP'
+
+ def startTestRun(self):
+ # let's create the repo already, it can be used later on
+ repoargs = {
+ 'repodir': PatchTestInput.repodir,
+ 'commit' : PatchTestInput.basecommit,
+ 'branch' : PatchTestInput.basebranch,
+ 'patch' : patch,
+ }
+
+ self.repo_error = False
+ self.test_error = False
+ self.test_failure = False
+
+ try:
+ self.repo = PatchTestInput.repo = PatchTestRepo(**repoargs)
+ except:
+ logger.error(traceback.print_exc())
+ self.repo_error = True
+ self.stop()
+ return
+
+ if mergepatch:
+ self.repo.merge()
+
+ def addError(self, test, err):
+ self.test_error = True
+ (ty, va, trace) = err
+ logger.error(traceback.print_exc())
+
+ def addFailure(self, test, err):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ self.test_failure = True
+ fail_str = '{}: {}: {} ({})'.format(self.fail,
+ test_description, json.loads(str(err[1]))["issue"],
+ test.id())
+ print(fail_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(fail_str + "\n")
+
+ def addSuccess(self, test):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ success_str = '{}: {} ({})'.format(self.success,
+ test_description, test.id())
+ print(success_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(success_str + "\n")
+
+ def addSkip(self, test, reason):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ skip_str = '{}: {}: {} ({})'.format(self.skip,
+ test_description, json.loads(str(reason))["issue"],
+ test.id())
+ print(skip_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(skip_str + "\n")
+
+ def stopTestRun(self):
+
+ # in case there was an error on repo object creation, just return
+ if self.repo_error:
+ return
+
+ self.repo.clean()
+
+ return PatchTestResult
+
+def _runner(resultklass, prefix=None):
+ # load test with the corresponding prefix
+ loader = unittest.TestLoader()
+ if prefix:
+ loader.testMethodPrefix = prefix
+
+ # create the suite with discovered tests and the corresponding runner
+ suite = loader.discover(start_dir=PatchTestInput.testdir, pattern=PatchTestInput.pattern, top_level_dir=PatchTestInput.topdir)
+ ntc = suite.countTestCases()
+
+ # if there are no test cases, just quit
+ if not ntc:
+ return 2
+ runner = unittest.TextTestRunner(resultclass=resultklass, verbosity=0)
+
+ try:
+ result = runner.run(suite)
+ except:
+ logger.error(traceback.print_exc())
+ logger.error('patchtest: something went wrong')
+ return 1
+ if result.test_failure or result.test_error:
+ return 1
+
+ return 0
+
+def run(patch, logfile=None):
+ """ Load, setup and run pre and post-merge tests """
+ # Get the result class and install the control-c handler
+ unittest.installHandler()
+
+ # run pre-merge tests, meaning those methods with 'pretest' as prefix
+ premerge_resultklass = getResult(patch, False, logfile)
+ premerge_result = _runner(premerge_resultklass, 'pretest')
+
+ # run post-merge tests, meaning those methods with 'test' as prefix
+ postmerge_resultklass = getResult(patch, True, logfile)
+ postmerge_result = _runner(postmerge_resultklass, 'test')
+
+ print('----------------------------------------------------------------------\n')
+ if premerge_result == 2 and postmerge_result == 2:
+ logger.error('patchtest: No test cases found - did you specify the correct suite directory?')
+ if premerge_result == 1 or postmerge_result == 1:
+ logger.error('WARNING: patchtest: At least one patchtest caused a failure or an error - please check https://wiki.yoctoproject.org/wiki/Patchtest for further guidance')
+ else:
+ logger.info('OK: patchtest: All patchtests passed')
+ print('----------------------------------------------------------------------\n')
+ return premerge_result or postmerge_result
+
+def main():
+ tmp_patch = False
+ patch_path = PatchTestInput.patch_path
+ log_results = PatchTestInput.log_results
+ log_path = None
+ patch_list = None
+
+ git_status = os.popen("(cd %s && git status)" % PatchTestInput.repodir).read()
+ status_matches = ["Changes not staged for commit", "Changes to be committed"]
+ if any([match in git_status for match in status_matches]):
+ logger.error("patchtest: there are uncommitted changes in the target repo that would be overwritten. Please commit or restore them before running patchtest")
+ return 1
+
+ if os.path.isdir(patch_path):
+ patch_list = [os.path.join(patch_path, filename) for filename in sorted(os.listdir(patch_path))]
+ else:
+ patch_list = [patch_path]
+
+ for patch in patch_list:
+ if os.path.getsize(patch) == 0:
+ logger.error('patchtest: patch is empty')
+ return 1
+
+ logger.info('Testing patch %s' % patch)
+
+ if log_results:
+ log_path = patch + ".testresult"
+ with open(log_path, "a") as f:
+ f.write("Patchtest results for patch '%s':\n\n" % patch)
+
+ try:
+ if log_path:
+ run(patch, log_path)
+ else:
+ run(patch)
+ finally:
+ if tmp_patch:
+ os.remove(patch)
+
+if __name__ == '__main__':
+ ret = 1
+
+ # Parse the command line arguments and store it on the PatchTestInput namespace
+ PatchTestInput.set_namespace()
+
+ # set debugging level
+ if PatchTestInput.debug:
+ logger.setLevel(logging.DEBUG)
+
+ # if topdir not define, default it to testdir
+ if not PatchTestInput.topdir:
+ PatchTestInput.topdir = PatchTestInput.testdir
+
+ try:
+ ret = main()
+ except Exception:
+ import traceback
+ traceback.print_exc(5)
+
+ sys.exit(ret)
diff --git a/scripts/patchtest-get-branch b/scripts/patchtest-get-branch
new file mode 100755
index 0000000000..c6e242f8b6
--- /dev/null
+++ b/scripts/patchtest-get-branch
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# Get target branch from the corresponding mbox
+#
+# NOTE: this script was based on patches coming to the openembedded-core
+# where target branch is defined inside brackets as subject prefix
+# i.e. [master], [rocko], etc.
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import mailbox
+import argparse
+import re
+import git
+
+re_prefix = re.compile(r"(\[.*\])", re.DOTALL)
+
+def get_branch(filepath_repo, filepath_mbox, default_branch):
+ branch = None
+
+ # get all remotes branches
+ gitbranches = git.Git(filepath_repo).branch('-a').splitlines()
+
+ # from gitbranches, just get the names
+ branches = [b.split('/')[-1] for b in gitbranches]
+
+ subject = ' '.join(mailbox.mbox(filepath_mbox)[0]['subject'].splitlines())
+
+ # we expect that patches will have somewhere between one and three
+ # consecutive sets of square brackets with tokens inside, e.g.:
+ # 1. [PATCH]
+ # 2. [OE-core][PATCH]
+ # 3. [OE-core][kirkstone][PATCH]
+ # Some of them may also be part of a series, in which case the PATCH
+ # token will be formatted like:
+ # [PATCH 1/4]
+ # or they will be revisions to previous patches, where it will be:
+ # [PATCH v2]
+ # Or they may contain both:
+ # [PATCH v2 3/4]
+ # In any case, we want mprefix to contain all of these tokens so
+ # that we can search for branch names within them.
+ mprefix = re.findall(r'\[.*?\]', subject)
+ found_branch = None
+ if mprefix:
+ # Iterate over the tokens and compare against the branch list to
+ # figure out which one the patch is targeting
+ for token in mprefix:
+ stripped = token.lower().strip('[]')
+ if default_branch in stripped:
+ found_branch = default_branch
+ break
+ else:
+ for branch in branches:
+ # ignore branches named "core"
+ if branch != "core" and stripped.rfind(branch) != -1:
+ found_branch = token.split(' ')[0].strip('[]')
+ break
+
+ # if there's no mprefix content or no known branches were found in
+ # the tokens, assume the target is master
+ if found_branch is None:
+ found_branch = "master"
+
+ return (subject, found_branch)
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('repo', metavar='REPO', help='Main repository')
+ parser.add_argument('mbox', metavar='MBOX', help='mbox filename')
+ parser.add_argument('--default-branch', metavar='DEFAULT_BRANCH', default='master', help='Use this branch if no one is found')
+ parser.add_argument('--separator', '-s', metavar='SEPARATOR', default=' ', help='Char separator for output data')
+ args = parser.parse_args()
+
+ subject, branch = get_branch(args.repo, args.mbox, args.default_branch)
+ print("branch: %s" % branch)
+
diff --git a/scripts/patchtest-get-series b/scripts/patchtest-get-series
new file mode 100755
index 0000000000..908442089f
--- /dev/null
+++ b/scripts/patchtest-get-series
@@ -0,0 +1,115 @@
+#!/bin/bash -e
+#
+# get-latest-series: Download latest patch series from Patchwork
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# the interval into the past which we want to check for new series, in minutes
+INTERVAL_MINUTES=30
+
+# Maximum number of series to retrieve. the Patchwork API can support up to 250
+# at once
+SERIES_LIMIT=250
+
+# Location to save patches
+DOWNLOAD_PATH="."
+
+# Name of the file to use/check as a log of previously-tested series IDs
+SERIES_TEST_LOG=".series_test.log"
+
+# Patchwork project to pull series patches from
+PROJECT="oe-core"
+
+# The Patchwork server to pull from
+SERVER="https://patchwork.yoctoproject.org/api/1.2/"
+
+help()
+{
+ echo "Usage: get-latest-series [ -i | --interval MINUTES ]
+ [ -d | --directory DIRECTORY ]
+ [ -l | --limit COUNT ]
+ [ -h | --help ]
+ [ -t | --tested-series LOGFILE]
+ [ -p | --project PROJECT ]
+ [ -s | --server SERVER ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -i|--interval)
+ INTERVAL_MINUTES=$2
+ shift 2
+ ;;
+ -l|--limit)
+ SERIES_LIMIT=$2
+ shift 2
+ ;;
+ -d|--directory)
+ DOWNLOAD_PATH=$2
+ shift 2
+ ;;
+ -p|--project)
+ PROJECT=$2
+ shift 2
+ ;;
+ -s|--server)
+ SERVER=$2
+ shift 2
+ ;;
+ -t|--tested-series)
+ SERIES_TEST_LOG=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# The time this script is running at
+START_TIME=$(date --date "now" +"%Y-%m-%dT%H:%M:%S")
+
+# the corresponding timestamp we want to check against for new patch series
+SERIES_CHECK_LIMIT=$(date --date "now - ${INTERVAL_MINUTES} minutes" +"%Y-%m-%dT%H:%M:%S")
+
+echo "Start time is $START_TIME"
+echo "Series check limit is $SERIES_CHECK_LIMIT"
+
+# Create DOWNLOAD_PATH if it doesn't exist
+if [ ! -d "$DOWNLOAD_PATH" ]; then
+ mkdir "${DOWNLOAD_PATH}"
+fi
+
+# Create SERIES_TEST_LOG if it doesn't exist
+if [ ! -f "$SERIES_TEST_LOG" ]; then
+ touch "${SERIES_TEST_LOG}"
+fi
+
+# Retrieve a list of series IDs from the 'git-pw series list' output. The API
+# supports a maximum of 250 results, so make sure we allow that when required
+SERIES_LIST=$(git-pw --project "${PROJECT}" --server "${SERVER}" series list --since "${SERIES_CHECK_LIMIT}" --limit "${SERIES_LIMIT}" | awk '{print $2}' | xargs | sed -e 's/[^0-9 ]//g')
+
+if [ -z "$SERIES_LIST" ]; then
+ echo "No new series for project ${PROJECT} since ${SERIES_CHECK_LIMIT}"
+ exit 0
+fi
+
+# Check each series ID
+for SERIES in $SERIES_LIST; do
+ # Download the series only if it's not found in the SERIES_TEST_LOG
+ if ! grep -w --quiet "${SERIES}" "${SERIES_TEST_LOG}"; then
+ echo "Downloading $SERIES..."
+ git-pw series download --separate "${SERIES}" "${DOWNLOAD_PATH}"
+ echo "${SERIES}" >> "${SERIES_TEST_LOG}"
+ else
+ echo "Already tested ${SERIES}. Skipping..."
+ fi
+done
diff --git a/scripts/patchtest-send-results b/scripts/patchtest-send-results
new file mode 100755
index 0000000000..8a3dadbd11
--- /dev/null
+++ b/scripts/patchtest-send-results
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+# Note that this script is currently under development and has been
+# hard-coded with default values for testing purposes. This script
+# should not be used without changing the default recipient, at minimum.
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import boto3
+import configparser
+import mailbox
+import os
+import re
+import sys
+
+greeting = """Thank you for your submission. Patchtest identified one
+or more issues with the patch. Please see the log below for
+more information:\n\n---\n"""
+
+suggestions = """\n---\n\nPlease address the issues identified and
+submit a new revision of the patch, or alternatively, reply to this
+email with an explanation of why the patch should be accepted. If you
+believe these results are due to an error in patchtest, please submit a
+bug at https://bugzilla.yoctoproject.org/ (use the 'Patchtest' category
+under 'Yocto Project Subprojects'). For more information on specific
+failures, see: https://wiki.yoctoproject.org/wiki/Patchtest. Thank
+you!"""
+
+def has_a_failed_test(raw_results):
+ return any(raw_result.split(':')[0] == "FAIL" for raw_result in raw_results.splitlines())
+
+parser = argparse.ArgumentParser(description="Send patchtest results to a submitter for a given patch")
+parser.add_argument("-p", "--patch", dest="patch", required=True, help="The patch file to summarize")
+parser.add_argument("-d", "--debug", dest="debug", required=False, action='store_true', help="Print raw email headers and content, but don't actually send it")
+args = parser.parse_args()
+
+if not os.path.exists(args.patch):
+ print(f"Patch '{args.patch}' not found - did you provide the right path?")
+ sys.exit(1)
+elif not os.path.exists(args.patch + ".testresult"):
+ print(f"Found patch '{args.patch}' but '{args.patch}.testresult' was not present. Have you run patchtest on the patch?")
+ sys.exit(1)
+
+result_file = args.patch + ".testresult"
+testresult = None
+
+with open(result_file, "r") as f:
+ testresult = f.read()
+
+# we know these patch files will only contain a single patch, so only
+# worry about the first element for getting the subject
+mbox = mailbox.mbox(args.patch)
+mbox_subject = mbox[0]['subject']
+subject_line = f"Patchtest results for {mbox_subject}"
+
+# extract the submitter email address and use it as the reply address
+# for the results
+reply_address = mbox[0]['from']
+
+# extract the message ID and use that as the in-reply-to address
+# TODO: This will need to change again when patchtest can handle a whole
+# series at once
+in_reply_to = mbox[0]['Message-ID']
+
+# the address the results email is sent from
+from_address = "patchtest@automation.yoctoproject.org"
+
+# mailing list to CC
+cc_address = "openembedded-core@lists.openembedded.org"
+
+if has_a_failed_test(testresult):
+ reply_contents = None
+ if len(max(open(result_file, 'r'), key=len)) > 220:
+ warning = "Tests failed for the patch, but the results log could not be processed due to excessive result line length."
+ reply_contents = greeting + warning + suggestions
+ else:
+ reply_contents = greeting + testresult + suggestions
+
+ ses_client = boto3.client('ses', region_name='us-west-2')
+
+ # Construct the headers for the email. We only want to reply
+ # directly to the tested patch, so make In-Reply-To and References
+ # the same value.
+ raw_data = 'From: ' + from_address + '\nTo: ' + reply_address + \
+ '\nCC: ' + cc_address + '\nSubject:' + subject_line + \
+ '\nIn-Reply-To:' + in_reply_to + \
+ '\nReferences:' + in_reply_to + \
+ '\nMIME-Version: 1.0" + \
+ "\nContent-type: Multipart/Mixed;boundary="NextPart"\n\n--NextPart\nContent-Type: text/plain\n\n' + \
+ reply_contents + '\n\n--NextPart'
+
+ if args.debug:
+ print(f"RawMessage: \n\n{raw_data}")
+ else:
+ response = ses_client.send_raw_email(
+ Source="patchtest@automation.yoctoproject.org",
+ RawMessage={
+ "Data": raw_data,
+ },
+ )
+
+else:
+ print(f"No failures identified for {args.patch}.")
diff --git a/scripts/patchtest-setup-sharedir b/scripts/patchtest-setup-sharedir
new file mode 100755
index 0000000000..277677e527
--- /dev/null
+++ b/scripts/patchtest-setup-sharedir
@@ -0,0 +1,83 @@
+#!/bin/bash -e
+#
+# patchtest-setup-sharedir: Setup a directory for storing mboxes and
+# repositories to be shared with the guest machine, including updates to
+# the repos if the directory already exists
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# poky repository
+POKY_REPO="https://git.yoctoproject.org/poky"
+
+# patchtest repository
+PATCHTEST_REPO="https://git.yoctoproject.org/patchtest"
+
+# the name of the directory
+SHAREDIR="patchtest_share"
+
+help()
+{
+ echo "Usage: patchtest-setup-sharedir [ -d | --directory SHAREDIR ]
+ [ -p | --patchtest PATCHTEST_REPO ]
+ [ -y | --poky POKY_REPO ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -d|--directory)
+ SHAREDIR=$2
+ shift 2
+ ;;
+ -p|--patchtest)
+ PATCHTEST_REPO=$2
+ shift 2
+ ;;
+ -y|--poky)
+ POKY_REPO=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# define MBOX_DIR where the patch series will be stored by
+# get-latest-series
+MBOX_DIR="${SHAREDIR}/mboxes"
+
+# Create SHAREDIR if it doesn't exist
+if [ ! -d "$SHAREDIR" ]; then
+ mkdir -p "${SHAREDIR}"
+ echo "Created ${SHAREDIR}"
+fi
+
+# Create the mboxes directory if it doesn't exist
+if [ ! -d "$MBOX_DIR" ]; then
+ mkdir -p "${MBOX_DIR}"
+ echo "Created ${MBOX_DIR}"
+fi
+
+# clone poky if it's not already present; otherwise, update it
+if [ ! -d "$POKY_REPO" ]; then
+ BASENAME=$(basename ${POKY_REPO})
+ git clone "${POKY_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
+
+# clone patchtest if it's not already present; otherwise, update it
+if [ ! -d "$PATCHTEST_REPO" ]; then
+ BASENAME=$(basename ${PATCHTEST_REPO})
+ git clone "${PATCHTEST_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
diff --git a/scripts/patchtest.README b/scripts/patchtest.README
new file mode 100644
index 0000000000..76b5fcdb6d
--- /dev/null
+++ b/scripts/patchtest.README
@@ -0,0 +1,153 @@
+# Patchtest
+
+## Introduction
+
+Patchtest is a test framework for community patches based on the standard
+unittest python module. As input, it needs tree elements to work properly:
+a patch in mbox format (either created with `git format-patch` or fetched
+from 'patchwork'), a test suite and a target repository.
+
+The first test suite intended to be used with patchtest is found in the
+openembedded-core repository [1] targeted for patches that get into the
+openembedded-core mailing list [2]. This suite is also intended as a
+baseline for development of similar suites for other layers as needed.
+
+Patchtest can either run on a host or a guest machine, depending on which
+environment the execution needs to be done. If you plan to test your own patches
+(a good practice before these are sent to the mailing list), the easiest way is
+to install and execute on your local host; in the other hand, if automatic
+testing is intended, the guest method is strongly recommended. The guest
+method requires the use of the patchtest layer, in addition to the tools
+available in oe-core: https://git.yoctoproject.org/patchtest/
+
+## Installation
+
+As a tool for use with the Yocto Project, the [quick start guide](https://docs.yoctoproject.org/brief-yoctoprojectqs/index.html)
+contains the necessary prerequisites for a basic project. In addition,
+patchtest relies on the following Python modules:
+
+- boto3 (for sending automated results emails only)
+- git-pw>=2.5.0
+- jinja2
+- pylint
+- pyparsing>=3.0.9
+- unidiff
+
+These can be installed by running `pip install -r
+meta/lib/patchtest/requirements.txt`. Note that git-pw is not
+automatically added to the user's PATH; by default, it is installed at
+~/.local/bin/git-pw.
+
+For git-pw (and therefore scripts such as patchtest-get--series) to work, you need
+to provide a Patchwork instance in your user's .gitconfig, like so (the project
+can be specified using the --project argument):
+
+ git config --global pw.server "https://patchwork.yoctoproject.org/api/1.2/"
+
+To work with patchtest, you should have the following repositories cloned:
+
+1. https://git.openembedded.org/openembedded-core/ (or https://git.yoctoproject.org/poky/)
+2. https://git.openembedded.org/bitbake/ (if not using poky)
+3. https://git.yoctoproject.org/patchtest (if using guest mode)
+
+## Usage
+
+### Obtaining Patches
+
+Patch files can be obtained directly from cloned repositories using `git
+format-patch -N` (where N is the number of patches starting from HEAD to
+generate). git-pw can also be used with filters for users, patch/series IDs,
+and timeboxes if specific patches are desired. For more information, see the
+git-pw [documentation](https://patchwork.readthedocs.io/projects/git-pw/en/latest/).
+
+Alternatively, `scripts/patchtest-get-series` can be used to pull mbox files from
+the Patchwork instance configured previously in .gitconfig. It uses a log file
+called ".series_test.log" to store and compare series IDs so that the same
+versions of a patch are not tested multiple times unintentionally. By default,
+it will pull up to five patch series from the last 30 minutes using oe-core as
+the target project, but these parameters can be configured using the `--limit`,
+`--interval`, and `--project` arguments respectively. For more information, run
+`patchtest-get-series -h`.
+
+### Host Mode
+
+To run patchtest on the host, do the following:
+
+1. In openembedded-core/poky, do `source oe-init-build-env`
+2. Generate patch files from the target repository by doing `git-format patch -N`,
+ where N is the number of patches starting at HEAD, or by using git-pw
+ or patchtest-get-series
+3. Run patchtest on a patch file by doing the following:
+
+ patchtest --patch /path/to/patch/file
+
+ or, if you have stored the patch files in a directory, do:
+
+ patchtest --directory /path/to/patch/directory
+
+ For example, to test `master-gcc-Fix--fstack-protector-issue-on-aarch64.patch` against the oe-core test suite:
+
+ patchtest --patch master-gcc-Fix--fstack-protector-issue-on-aarch64.patch
+
+ If you want to use a different test suite or target repository, you can use the --testdir and --repodir flags:
+
+ patchtest --patch /path/to/patch/file --repodir /path/to/repo --testdir /path/to/test/dir
+
+### Guest Mode
+
+Patchtest's guest mode has been refactored to more closely mirror the
+typical Yocto Project image build workflow, but there are still some key
+differences to keep in mind. The primary objective is to provide a level
+of isolation from the host when testing patches pulled automatically
+from the mailing lists. When executed this way, the test process is
+essentially running random code from the internet and could be
+catastrophic if malicious bits or even poorly-handled edge cases aren't
+protected against. In order to use this mode, the
+https://git.yoctoproject.org/patchtest/ repository must be cloned and
+the meta-patchtest layer added to bblayers.conf.
+
+The general flow of guest mode is:
+
+1. Run patchtest-setup-sharedir --directory <dirname> to create a
+ directory for mounting
+2. Collect patches via patchtest-get-series (or other manual step) into the
+ <dirname>/mboxes path
+3. Ensure that a user with ID 1200 has appropriate read/write
+ permissions to <dirname> and <dirname>/mboxes, so that the
+ "patchtest" user in the core-image-patchtest image can function
+4. Build the core-image-patchtest image
+5. Run the core-image-patchtest image with the mounted sharedir, like
+ so:
+ `runqemu kvm nographic qemuparams="-snapshot -fsdev
+ local,id=test_mount,path=/workspace/yocto/poky/build/patchtestdir,security_model=mapped
+ -device virtio-9p-pci,fsdev=test_mount,mount_tag=test_mount -smp 4 -m
+ 2048"`
+
+Patchtest runs as an initscript for the core-image-patchtest image and
+shuts down after completion, so there is no input required from a user
+during operation. Unlike in host mode, the guest is designed to
+automatically generate test result files, in the same directory as the
+targeted patch files but with .testresult as an extension. These contain
+the entire output of the patchtest run for each respective pass,
+including the PASS, FAIL, and SKIP indicators for each test run.
+
+## Contributing
+
+The yocto mailing list (openembedded-core@lists.openembedded.org) is used for questions,
+comments and patch review. It is subscriber only, so please register before
+posting.
+
+When sending single patches, please use something like:
+
+ git send-email -M -1 --to=openembedded-core@lists.openembedded.org --subject-prefix=OE-core][PATCH
+
+## Maintenance
+-----------
+
+Maintainers:
+ Trevor Gamblin <tgamblin@baylibre.com>
+
+## Links
+-----
+[1] https://git.openembedded.org/openembedded-core/
+[2] https://www.yoctoproject.org/community/mailing-lists/
diff --git a/scripts/postinst-intercepts/update_gtk_icon_cache b/scripts/postinst-intercepts/update_gtk_icon_cache
index 99367a2855..a92bd840c6 100644
--- a/scripts/postinst-intercepts/update_gtk_icon_cache
+++ b/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -11,7 +11,11 @@ $STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --u
for icondir in $D/usr/share/icons/*/ ; do
if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
+ for gtkuic_cmd in gtk-update-icon-cache gtk4-update-icon-cache ; do
+ if [ -n "$(which $gtkuic_cmd)" ]; then
+ $gtkuic_cmd -fqt $icondir
+ fi
+ done
fi
done
diff --git a/scripts/postinst-intercepts/update_mandb b/scripts/postinst-intercepts/update_mandb
new file mode 100644
index 0000000000..f91bafdb11
--- /dev/null
+++ b/scripts/postinst-intercepts/update_mandb
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -eu
+
+# Create a temporary man_db.conf with paths to the rootfs, as mandb needs absolute paths
+CONFIG=$(mktemp --tmpdir update-mandb.XXXXX)
+sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf > $CONFIG
+
+mkdir -p $D${localstatedir}/cache/man/
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${bindir}/mandb --config-file $CONFIG --create
+
+rm -f $CONFIG
+
+chown -R man:man $D${localstatedir}/cache/man/
diff --git a/scripts/postinst-intercepts/update_udev_hwdb b/scripts/postinst-intercepts/update_udev_hwdb
index 102e99b947..8b3f5de791 100644
--- a/scripts/postinst-intercepts/update_udev_hwdb
+++ b/scripts/postinst-intercepts/update_udev_hwdb
@@ -9,13 +9,17 @@ case "${PREFERRED_PROVIDER_udev}" in
systemd)
UDEV_EXTRA_ARGS="--usr"
UDEVLIBDIR="${rootlibexecdir}"
+ UDEVADM="${base_bindir}/udevadm"
;;
*)
UDEV_EXTRA_ARGS=""
UDEVLIBDIR="${sysconfdir}"
+ UDEVADM="${bindir}/udevadm"
;;
esac
-PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}udevadm hwdb --update --root $D ${UDEV_EXTRA_ARGS}
+rm -f $D${UDEVLIBDIR}/udev/hwdb.bin
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${UDEVADM} hwdb --update --root $D ${UDEV_EXTRA_ARGS} ||
+ PSEUDO_UNLOAD=1 qemuwrapper -L $D $D${UDEVADM} hwdb --update --root $D ${UDEV_EXTRA_ARGS}
chown root:root $D${UDEVLIBDIR}/udev/hwdb.bin
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 53324b9f8b..c6e67833ab 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -80,6 +80,22 @@ MEM_BUFFERS_COLOR = (0.4, 0.4, 0.4, 0.3)
# Swap color
MEM_SWAP_COLOR = DISK_TPUT_COLOR
+# avg10 CPU pressure color
+CPU_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total CPU pressure color
+CPU_PRESSURE_TOTAL_COLOR = CPU_COLOR
+# avg10 IO pressure color
+IO_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total IO pressure color
+IO_PRESSURE_TOTAL_COLOR = IO_COLOR
+# avg10 memory pressure color
+MEM_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total memory pressure color
+MEM_PRESSURE_TOTAL_COLOR = DISK_TPUT_COLOR
+
+
+
+
# Process border color.
PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
# Waiting process color.
@@ -267,11 +283,14 @@ def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
# avoid divide by zero
if max_y == 0:
max_y = 1.0
- xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ if (max_x - x_shift):
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ else:
+ xscale = float (chart_bounds[2])
# If data_range is given, scale the chart so that the value range in
# data_range matches the chart bounds exactly.
# Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
+ if data_range and (data_range[1] - data_range[0]):
yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
ybase = data_range[0]
else:
@@ -337,6 +356,12 @@ def extents(options, xscale, trace):
h += 30 + bar_h
if trace.disk_stats:
h += 30 + bar_h
+ if trace.cpu_pressure:
+ h += 30 + bar_h
+ if trace.io_pressure:
+ h += 30 + bar_h
+ if trace.mem_pressure:
+ h += 30 + bar_h
if trace.monitor_disk:
h += 30 + bar_h
if trace.mem_stats:
@@ -412,6 +437,108 @@ def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
curr_y = curr_y + 30 + bar_h
+ # render CPU pressure chart
+ if trace.cpu_pressure:
+ max_sample_avg = max (trace.cpu_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.cpu_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 CPU Pressure (max %d%%)" % (max_sample_avg.avg10), CPU_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total CPU Pressure (max %d)" % (max_sample_total.deltaTotal), CPU_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total cpu
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, CPU_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.cpu_pressure], \
+ proc_tree, None)
+
+ # render avg10 cpu
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, CPU_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.cpu_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, CPU_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render I/O pressure chart
+ if trace.io_pressure:
+ max_sample_avg = max (trace.io_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.io_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 I/O Pressure (max %d%%)" % (max_sample_avg.avg10), IO_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total I/O Pressure (max %d)" % (max_sample_total.deltaTotal), IO_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total io
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.io_pressure], \
+ proc_tree, None)
+
+ # render avg10 io
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, IO_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.io_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, IO_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render MEM pressure chart
+ if trace.mem_pressure:
+ max_sample_avg = max (trace.mem_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.mem_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 MEM Pressure (max %d%%)" % (max_sample_avg.avg10), MEM_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total MEM Pressure (max %d)" % (max_sample_total.deltaTotal), MEM_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total mem
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, MEM_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.mem_pressure], \
+ proc_tree, None)
+
+ # render avg10 mem
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, MEM_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.mem_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, MEM_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
# render disk space usage
#
# Draws the amount of disk space used on each volume relative to the
@@ -493,8 +620,8 @@ def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
return curr_y
-def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - curr_y - 1 * off_y - header_h ]
+def render_processes_chart(ctx, options, trace, curr_y, width, h, sec_w):
+ chart_rect = [off_x, curr_y+header_h, width, h - curr_y - 1 * off_y - header_h ]
draw_legend_box (ctx, "Configure", \
TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
@@ -519,8 +646,9 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
offset = trace.min or min(trace.start.keys())
for start in sorted(trace.start.keys()):
for process in sorted(trace.start[start]):
+ elapsed_time = trace.processes[process][1] - start
if not options.app_options.show_all and \
- trace.processes[process][1] - start < options.app_options.mintime:
+ elapsed_time < options.app_options.mintime:
continue
task = process.split(":")[1]
@@ -529,14 +657,23 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
#print(s)
x = chart_rect[0] + (start - offset) * sec_w
- w = ((trace.processes[process][1] - start) * sec_w)
+ w = elapsed_time * sec_w
+
+ def set_alfa(color, alfa):
+ clist = list(color)
+ clist[-1] = alfa
+ return tuple(clist)
#print("proc at %s %s %s %s" % (x, y, w, proc_h))
col = None
if task == "do_compile":
col = TASK_COLOR_COMPILE
+ elif "do_compile" in task:
+ col = set_alfa(TASK_COLOR_COMPILE, 0.25)
elif task == "do_configure":
col = TASK_COLOR_CONFIGURE
+ elif "do_configure" in task:
+ col = set_alfa(TASK_COLOR_CONFIGURE, 0.25)
elif task == "do_install":
col = TASK_COLOR_INSTALL
elif task == "do_populate_sysroot":
@@ -554,7 +691,10 @@ def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
draw_fill_rect(ctx, col, (x, y, w, proc_h))
draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, proc_h)
+ # Show elapsed time for each task
+ process = "%ds %s" % (elapsed_time, process)
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, width)
+
y = y + proc_h
return curr_y
@@ -695,7 +835,7 @@ def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
cmdString = proc.cmd
else:
cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid != 0:
cmdString = cmdString + " [" + str(ipid // 1000) + "]"
if OPTIONS.show_all:
if proc.args:
@@ -793,7 +933,7 @@ class CumlSample:
if self.color is None:
i = self.next() % HSV_MAX_MOD
h = 0.0
- if i is not 0:
+ if i != 0:
h = (1.0 * i) / HSV_MAX_MOD
s = 0.5
v = 1.0
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index b42dac6b88..63a53b6b88 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -49,6 +49,9 @@ class Trace:
self.parent_map = None
self.mem_stats = []
self.monitor_disk = None
+ self.cpu_pressure = []
+ self.io_pressure = []
+ self.mem_pressure = []
self.times = [] # Always empty, but expected by draw.py when drawing system charts.
if len(paths):
@@ -128,7 +131,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
@@ -554,6 +557,29 @@ def _parse_monitor_disk_log(file):
return disk_stats
+def _parse_pressure_logs(file, filename):
+ """
+ Parse file for "some" pressure with 'avg10', 'avg60' 'avg300' and delta total values
+ (in that order) directly stored on one line for both CPU and IO, based on filename.
+ """
+ pressure_stats = []
+ if filename == "cpu.log":
+ SamplingClass = CPUPressureSample
+ elif filename == "memory.log":
+ SamplingClass = MemPressureSample
+ else:
+ SamplingClass = IOPressureSample
+ for time, lines in _parse_timed_blocks(file):
+ for line in lines:
+ if not line: continue
+ tokens = line.split()
+ avg10 = float(tokens[0])
+ avg60 = float(tokens[1])
+ avg300 = float(tokens[2])
+ delta = float(tokens[3])
+ pressure_stats.append(SamplingClass(time, avg10, avg60, avg300, delta))
+
+ return pressure_stats
# if we boot the kernel with: initcall_debug printk.time=1 we can
# get all manner of interesting data from the dmesg output
@@ -741,6 +767,13 @@ def _do_parse(writer, state, filename, file):
state.cmdline = _parse_cmdline_log(writer, file)
elif name == "monitor_disk.log":
state.monitor_disk = _parse_monitor_disk_log(file)
+ #pressure logs are in a subdirectory
+ elif name == "cpu.log":
+ state.cpu_pressure = _parse_pressure_logs(file, name)
+ elif name == "io.log":
+ state.io_pressure = _parse_pressure_logs(file, name)
+ elif name == "memory.log":
+ state.mem_pressure = _parse_pressure_logs(file, name)
elif not filename.endswith('.log'):
_parse_bitbake_buildstats(writer, state, filename, file)
t2 = time.process_time()
diff --git a/scripts/pybootchartgui/pybootchartgui/samples.py b/scripts/pybootchartgui/pybootchartgui/samples.py
index 9fc309b3ab..a70d8a5a28 100644
--- a/scripts/pybootchartgui/pybootchartgui/samples.py
+++ b/scripts/pybootchartgui/pybootchartgui/samples.py
@@ -37,6 +37,31 @@ class CPUSample:
return str(self.time) + "\t" + str(self.user) + "\t" + \
str(self.sys) + "\t" + str(self.io) + "\t" + str (self.swap)
+class CPUPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+class IOPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+class MemPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+
class MemSample:
used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
diff --git a/scripts/pythondeps b/scripts/pythondeps
index be21dd84eb..48277ec28a 100755
--- a/scripts/pythondeps
+++ b/scripts/pythondeps
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Determine dependencies of python scripts or available python modules in a search path.
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index 8c0fdb986a..8a728720ba 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -30,9 +30,16 @@ else:
old_prefix = re.compile(b("##DEFAULT_INSTALL_DIR##"))
def get_arch():
+ global endian_prefix
f.seek(0)
e_ident =f.read(16)
- ei_mag0,ei_mag1_3,ei_class = struct.unpack("<B3sB11x", e_ident)
+ ei_mag0,ei_mag1_3,ei_class,ei_data,ei_version = struct.unpack("<B3sBBB9x", e_ident)
+
+ # ei_data = 1 for little-endian & 0 for big-endian
+ if ei_data == 1:
+ endian_prefix = '<'
+ else:
+ endian_prefix = '>'
if (ei_mag0 != 0x7f and ei_mag1_3 != "ELF") or ei_class == 0:
return 0
@@ -51,11 +58,11 @@ def parse_elf_header():
if arch == 32:
# 32bit
- hdr_fmt = "<HHILLLIHHHHHH"
+ hdr_fmt = endian_prefix + "HHILLLIHHHHHH"
hdr_size = 52
else:
# 64bit
- hdr_fmt = "<HHIQQQIHHHHHH"
+ hdr_fmt = endian_prefix + "HHIQQQIHHHHHH"
hdr_size = 64
e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
@@ -64,9 +71,9 @@ def parse_elf_header():
def change_interpreter(elf_file_name):
if arch == 32:
- ph_fmt = "<IIIIIIII"
+ ph_fmt = endian_prefix + "IIIIIIII"
else:
- ph_fmt = "<IIQQQQQQ"
+ ph_fmt = endian_prefix + "IIQQQQQQ"
""" look for PT_INTERP section """
for i in range(0,e_phnum):
@@ -97,25 +104,26 @@ def change_interpreter(elf_file_name):
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
- break
+ return False
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
+ return True
def change_dl_sysdirs(elf_file_name):
if arch == 32:
- sh_fmt = "<IIIIIIIIII"
+ sh_fmt = endian_prefix + "IIIIIIIIII"
else:
- sh_fmt = "<IIQQQQIIQQ"
+ sh_fmt = endian_prefix + "IIQQQQIIQQ"
""" read section string table """
f.seek(e_shoff + e_shstrndx * e_shentsize)
sh_hdr = f.read(e_shentsize)
if arch == 32:
- sh_offset, sh_size = struct.unpack("<16xII16x", sh_hdr)
+ sh_offset, sh_size = struct.unpack(endian_prefix + "16xII16x", sh_hdr)
else:
- sh_offset, sh_size = struct.unpack("<24xQQ24x", sh_hdr)
+ sh_offset, sh_size = struct.unpack(endian_prefix + "24xQQ24x", sh_hdr)
f.seek(sh_offset)
sh_strtab = f.read(sh_size)
@@ -215,6 +223,7 @@ else:
executables_list = sys.argv[3:]
+errors = False
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
@@ -240,7 +249,8 @@ for e in executables_list:
arch = get_arch()
if arch:
parse_elf_header()
- change_interpreter(e)
+ if not change_interpreter(e):
+ errors = True
change_dl_sysdirs(e)
""" change permissions back """
@@ -253,3 +263,6 @@ for e in executables_list:
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
+if errors:
+ print("Relocation of one or more executables failed.")
+ sys.exit(-1)
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index 7cd771bbe7..8199b43784 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -7,7 +7,7 @@ fatal() {
}
pkg="$1"
-[ -n "$pkg" -a -e "$pkg" ] ||
+[ -n "$pkg" ] && [ -e "$pkg" ] ||
fatal "No package supplied"
_dd() {
@@ -16,14 +16,23 @@ _dd() {
}
calcsize() {
+
+ case "$(_dd $1 bs=4 count=1 | tr -d '\0')" in
+ "$(printf '\216\255\350')"*) ;; # '\x8e\xad\xe8'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+ esac
+
offset=$(($1 + 8))
local i b b0 b1 b2 b3 b4 b5 b6 b7
i=0
while [ $i -lt 8 ]; do
- b=$(_dd $(($offset + $i)) bs=1 count=1; echo X)
- b=${b%X}
+ # add . to not loose \n
+ # strip \0 as it gets dropped with warning otherwise
+ b="$(_dd $(($offset + $i)) bs=1 count=1 | tr -d '\0' ; echo .)"
+ b=${b%.} # strip . again
+
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
@@ -35,7 +44,7 @@ calcsize() {
offset=$(($offset + $rsize))
}
-case "$(_dd 0 bs=8 count=1)" in
+case "$(_dd 0 bs=4 count=1 | tr -d '\0')" in
"$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
*) fatal "File doesn't look like rpm: $pkg" ;;
esac
@@ -46,10 +55,11 @@ sigsize=$rsize
calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
hdrsize=$rsize
-case "$(_dd $offset bs=3 count=1)" in
- "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
- "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
- "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
- "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
- *) fatal "Unrecognized rpm file: $pkg" ;;
+case "$(_dd $offset bs=2 count=1 | tr -d '\0')" in
+ "$(printf '\102\132')") _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')") _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')") _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135')") _dd $offset | unlzma ;; # '\x5d\x00'
+ "$(printf '\050\265')") _dd $offset | unzstd ;; # '\x28\xb5'
+ *) fatal "Unrecognized payload compression format in rpm file: $pkg" ;;
esac
diff --git a/scripts/runqemu b/scripts/runqemu
index e5e66f3453..69cd44864e 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -18,6 +18,7 @@ import shutil
import glob
import configparser
import signal
+import time
class RunQemuError(Exception):
"""Custom exception to raise on known errors."""
@@ -65,19 +66,25 @@ of the following environment variables (in any order):
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ nonetwork - disable network connectivity
+ novga - Disable VGA emulation completely
sdl - choose the SDL UI frontend
gtk - choose the Gtk UI frontend
gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
+ (hint: if /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create
+ one suitable for mesa llvmpipe software renderer)
serial - enable a serial console on /dev/ttyS0
serialstdio - enable a serial console on the console (regardless of graphics mode)
- slirp - enable user networking, no root privileges is required
- snapshot - don't write changes to back to images
+ slirp - enable user networking, no root privilege is required
+ snapshot - don't write changes back to images
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
+ guestagent - enable guest agent communication
+ qmp=<path> - create a QMP socket (defaults to unix:qmp.sock if unspecified)
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
qemuparams=<xyz> - specify custom parameters to QEMU
@@ -94,11 +101,13 @@ Examples:
runqemu qemux86-64 core-image-sato ext4
runqemu qemux86-64 wic-image-minimal wic
runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
- runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz...
+ runqemu qemux86 iso/hddimg/wic.vmdk/wic.vhd/wic.vhdx/wic.qcow2/wic.vdi/ramfs/cpio.gz...
runqemu qemux86 qemuparams="-m 256"
runqemu qemux86 bootparams="psplash=false"
runqemu path/to/<image>-<machine>.wic
runqemu path/to/<image>-<machine>.wic.vmdk
+ runqemu path/to/<image>-<machine>.wic.vhdx
+ runqemu path/to/<image>-<machine>.wic.vhd
""")
def check_tun():
@@ -110,10 +119,10 @@ def check_tun():
if not os.access(dev_tun, os.W_OK):
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-def get_first_file(cmds):
- """Return first file found in wildcard cmds"""
- for cmd in cmds:
- all_files = glob.glob(cmd)
+def get_first_file(globs):
+ """Return first file found in wildcard globs"""
+ for g in globs:
+ all_files = glob.glob(g)
if all_files:
for f in all_files:
if not os.path.isdir(f):
@@ -136,12 +145,12 @@ class BaseConfig(object):
'OE_TMPDIR',
'OECORE_NATIVE_SYSROOT',
'MULTICONFIG',
+ 'SERIAL_CONSOLES',
)
self.qemu_opt = ''
self.qemu_opt_script = ''
self.qemuparams = ''
- self.clean_nfs_dir = False
self.nfs_server = ''
self.rootfs = ''
# File name(s) of a OVMF firmware file or variable store,
@@ -170,6 +179,15 @@ class BaseConfig(object):
self.nfs_running = False
self.serialconsole = False
self.serialstdio = False
+ self.nographic = False
+ self.nonetwork = False
+ self.sdl = False
+ self.gtk = False
+ self.gl = False
+ self.gl_es = False
+ self.egl_headless = False
+ self.publicvnc = False
+ self.novga = False
self.cleantap = False
self.saved_stty = ''
self.audio_enabled = False
@@ -179,14 +197,16 @@ class BaseConfig(object):
self.portlocks = {}
self.bitbake_e = ''
self.snapshot = False
- self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi')
+ self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi', "wic.vhd", "wic.vhdx")
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
- 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
+ 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz',
+ 'squashfs', 'squashfs-xz', 'squashfs-lzo',
+ 'squashfs-lz4', 'squashfs-zst')
self.vmtypes = ('hddimg', 'iso')
self.fsinfo = {}
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
self.cmdline_ip_slirp = "ip=dhcp"
- self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0"
+ self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8 net.ifnames=0"
# Use different mac section for tap and slirp to avoid
# conflicts, e.g., when one is running with tap, the other is
# running with slirp.
@@ -196,9 +216,15 @@ class BaseConfig(object):
self.mac_tap = "52:54:00:12:34:"
self.mac_slirp = "52:54:00:12:35:"
# pid of the actual qemu process
- self.qemupid = None
+ self.qemu_environ = os.environ.copy()
+ self.qemuprocess = None
# avoid cleanup twice
self.cleaned = False
+ # Files to cleanup after run
+ self.cleanup_files = []
+ self.qmp = None
+ self.guest_agent = False
+ self.guest_agent_sockpath = '/tmp/qga.sock'
def acquire_taplock(self, error=True):
logger.debug("Acquiring lockfile %s..." % self.taplock)
@@ -220,9 +246,12 @@ class BaseConfig(object):
def release_taplock(self):
if self.taplock_descriptor:
logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
- fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
self.taplock_descriptor.close()
- os.remove(self.taplock)
+ # Removing the file is a potential race, don't do that either
+ # os.remove(self.taplock)
self.taplock_descriptor = None
def check_free_port(self, host, port, lockdir):
@@ -260,17 +289,23 @@ class BaseConfig(object):
def release_portlock(self, lockfile=None):
if lockfile != None:
- logger.debug("Releasing lockfile '%s'" % lockfile)
- fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
- self.portlocks[lockfile].close()
- os.remove(lockfile)
- del self.portlocks[lockfile]
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
+ self.portlocks[lockfile].close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
+ del self.portlocks[lockfile]
elif len(self.portlocks):
for lockfile, descriptor in self.portlocks.items():
logger.debug("Releasing lockfile '%s'" % lockfile)
- fcntl.flock(descriptor, fcntl.LOCK_UN)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(descriptor, fcntl.LOCK_UN)
descriptor.close()
- os.remove(lockfile)
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
self.portlocks = {}
def get(self, key):
@@ -328,21 +363,21 @@ class BaseConfig(object):
def check_arg_path(self, p):
"""
- Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
- - Check whether is a kernel file
- - Check whether is a image file
- - Check whether it is a nfs dir
- - Check whether it is a OVMF flash file
+ - Check whether it is a kernel file
+ - Check whether it is an image file
+ - Check whether it is an NFS dir
+ - Check whether it is an OVMF flash file
"""
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
self.qbconfload = True
- elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ elif re.search('\\.bin$', p) or re.search('bzImage', p) or \
re.search('zImage', p) or re.search('vmlinux', p) or \
re.search('fitImage', p) or re.search('uImage', p):
self.kernel = p
- elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
+ elif os.path.isfile(p) and ('-image-' in os.path.basename(p) or '.rootfs.' in os.path.basename(p)):
self.rootfs = p
- # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+ # Check filename against self.fstypes can handle <file>.cpio.gz,
# otherwise, its type would be "gz", which is incorrect.
fst = ""
for t in self.fstypes:
@@ -350,18 +385,24 @@ class BaseConfig(object):
fst = t
break
if not fst:
- m = re.search('.*\.(.*)$', self.rootfs)
+ m = re.search('.*\\.(.*)$', self.rootfs)
if m:
fst = m.group(1)
if fst:
self.check_arg_fstype(fst)
- qb = re.sub('\.' + fst + "$", '', self.rootfs)
- qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
+ qb = re.sub('\\.' + fst + "$", '.qemuboot.conf', self.rootfs)
if os.path.exists(qb):
self.qemuboot = qb
self.qbconfload = True
else:
- logger.warning("%s doesn't exist" % qb)
+ logger.warning("%s doesn't exist, will try to remove '.rootfs' from filename" % qb)
+ # They to remove .rootfs (IMAGE_NAME_SUFFIX) as well
+ qb = re.sub('\\.rootfs.qemuboot.conf$', '.qemuboot.conf', qb)
+ if os.path.exists(qb):
+ self.qemuboot = qb
+ self.qbconfload = True
+ else:
+ logger.warning("%s doesn't exist" % qb)
else:
raise RunQemuError("Can't find FSTYPE from: %s" % p)
@@ -395,6 +436,7 @@ class BaseConfig(object):
# are there other scenarios in which we need to support being
# invoked by bitbake?
deploy = self.get('DEPLOY_DIR_IMAGE')
+ image_link_name = self.get('IMAGE_LINK_NAME')
bbchild = deploy and self.get('OE_TMPDIR')
if bbchild:
self.set_machine_deploy_dir(arg, deploy)
@@ -419,23 +461,24 @@ class BaseConfig(object):
else:
logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
self.set("MACHINE", arg)
+ if not image_link_name:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
def set_dri_path(self):
- # As runqemu can be run within bitbake (when using testimage, for example),
- # we need to ensure that we run host pkg-config, and that it does not
- # get mis-directed to native build paths set by bitbake.
- try:
- del os.environ['PKG_CONFIG_PATH']
- del os.environ['PKG_CONFIG_DIR']
- del os.environ['PKG_CONFIG_LIBDIR']
- del os.environ['PKG_CONFIG_SYSROOT_DIR']
- except KeyError:
- pass
- try:
- dripath = subprocess.check_output("PATH=/bin:/usr/bin:$PATH pkg-config --variable=dridriverdir dri", shell=True)
- except subprocess.CalledProcessError as e:
- raise RunQemuError("Could not determine the path to dri drivers on the host via pkg-config.\nPlease install Mesa development files (particularly, dri.pc) on the host machine.")
- os.environ['LIBGL_DRIVERS_PATH'] = dripath.decode('utf-8').strip()
+ drivers_path = os.path.join(self.bindir_native, '../lib/dri')
+ if not os.path.exists(drivers_path) or not os.listdir(drivers_path):
+ raise RunQemuError("""
+qemu has been built without opengl support and accelerated graphics support is not available.
+To enable it, add:
+DISTRO_FEATURES_NATIVE:append = " opengl"
+DISTRO_FEATURES_NATIVESDK:append = " opengl"
+to your build configuration.
+""")
+ self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path
def check_args(self):
for debug in ("-d", "--debug"):
@@ -449,49 +492,32 @@ class BaseConfig(object):
sys.argv.remove(quiet)
if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
- os.environ['SDL_RENDER_DRIVER'] = 'software'
+ self.qemu_environ['SDL_RENDER_DRIVER'] = 'software'
+ self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
unknown_arg = ""
for arg in sys.argv[1:]:
if arg in self.fstypes + self.vmtypes + self.wictypes:
self.check_arg_fstype(arg)
elif arg == 'nographic':
- if ('sdl' in sys.argv):
- raise RunQemuError('Option nographic makes no sense alongside the sdl option.' % (arg))
- if ('gtk' in sys.argv):
- raise RunQemuError('Option nographic makes no sense alongside the gtk option.' % (arg))
- self.qemu_opt_script += ' -nographic'
- self.kernel_cmdline_script += ' console=ttyS0'
+ self.nographic = True
+ elif arg == "nonetwork":
+ self.nonetwork = True
elif arg == 'sdl':
- if 'gl' in sys.argv[1:]:
- self.set_dri_path()
- self.qemu_opt_script += ' -vga virtio -display sdl,gl=on,show-cursor=on'
- elif 'gl-es' in sys.argv[1:]:
- self.set_dri_path()
- self.qemu_opt_script += ' -vga virtio -display sdl,gl=es,show-cursor=on'
- else:
- self.qemu_opt_script += ' -display sdl,show-cursor=on'
+ self.sdl = True
elif arg == 'gtk':
- if 'gl' in sys.argv[1:]:
- self.set_dri_path()
- self.qemu_opt_script += ' -vga virtio -display gtk,gl=on,show-cursor=on'
- elif 'gl-es' in sys.argv[1:]:
- self.set_dri_path()
- self.qemu_opt_script += ' -vga virtio -display gtk,gl=es,show-cursor=on'
- else:
- self.qemu_opt_script += ' -display gtk,show-cursor=on'
- elif arg == 'gl' or arg == 'gl-es':
- # These args are handled inside sdl or gtk blocks above
- if ('gtk' not in sys.argv) and ('sdl' not in sys.argv):
- raise RunQemuError('Option %s also needs gtk or sdl option.' % (arg))
+ self.gtk = True
+ elif arg == 'gl':
+ self.gl = True
+ elif arg == 'gl-es':
+ self.gl_es = True
elif arg == 'egl-headless':
- self.set_dri_path()
- self.qemu_opt_script += ' -vga virtio -display egl-headless,show-cursor=on'
+ self.egl_headless = True
+ elif arg == 'novga':
+ self.novga = True
elif arg == 'serial':
- self.kernel_cmdline_script += ' console=ttyS0'
self.serialconsole = True
elif arg == "serialstdio":
- self.kernel_cmdline_script += ' console=ttyS0'
self.serialstdio = True
elif arg == 'audio':
logger.info("Enabling audio in qemu")
@@ -508,7 +534,16 @@ class BaseConfig(object):
elif arg == 'snapshot':
self.snapshot = True
elif arg == 'publicvnc':
+ self.publicvnc = True
self.qemu_opt_script += ' -vnc :0'
+ elif arg == 'guestagent':
+ self.guest_agent = True
+ elif arg == "qmp":
+ self.qmp = "unix:qmp.sock"
+ elif arg.startswith("qmp="):
+ self.qmp = arg[len('qmp='):]
+ elif arg.startswith('guestagent-sockpath='):
+ self.guest_agent_sockpath = '%s' % arg[len('guestagent-sockpath='):]
elif arg.startswith('tcpserial='):
self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
@@ -540,21 +575,28 @@ class BaseConfig(object):
self.check_arg_machine(unknown_arg)
if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
- self.load_bitbake_env()
+ self.load_bitbake_env(target=self.rootfs)
s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
if s:
self.set("DEPLOY_DIR_IMAGE", s.group(1))
+ if not self.get('IMAGE_LINK_NAME') and self.rootfs:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
+
def check_kvm(self):
"""Check kvm and kvm-host"""
if not (self.kvm_enabled or self.vhost_enabled):
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'), self.get('QB_SMP'))
return
if not self.get('QB_CPU_KVM'):
raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'), self.get('QB_SMP'))
yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
dev_kvm = '/dev/kvm'
@@ -574,11 +616,6 @@ class BaseConfig(object):
if os.access(dev_kvm, os.W_OK|os.R_OK):
self.qemu_opt_script += ' -enable-kvm'
- if self.get('MACHINE') == "qemux86":
- # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
- # See YOCTO #12301
- # On 64 bit we use x2apic
- self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
else:
logger.error("You have no read or write permission on /dev/kvm.")
logger.error("Please change the ownership of this file as described at:")
@@ -619,10 +656,10 @@ class BaseConfig(object):
elif fsflag == 'kernel-in-fs':
wic_fs = False
else:
- logger.warn('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ logger.warning('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
continue
else:
- logger.warn('QB_FSINFO is not supported for image type "%s"', fstype)
+ logger.warning('QB_FSINFO is not supported for image type "%s"', fstype)
continue
if fstype in self.fsinfo:
@@ -655,16 +692,16 @@ class BaseConfig(object):
if self.rootfs and not os.path.exists(self.rootfs):
# Lazy rootfs
- self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
- self.rootfs, self.get('MACHINE'),
+ self.rootfs = "%s/%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
+ self.get('IMAGE_LINK_NAME'),
self.fstype)
elif not self.rootfs:
- cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
- cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
- cmds = (cmd_name, cmd_link)
- self.rootfs = get_first_file(cmds)
+ glob_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
+ glob_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
+ globs = (glob_name, glob_link)
+ self.rootfs = get_first_file(globs)
if not self.rootfs:
- raise RunQemuError("Failed to find rootfs: %s or %s" % cmds)
+ raise RunQemuError("Failed to find rootfs: %s or %s" % globs)
if not os.path.exists(self.rootfs):
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
@@ -724,10 +761,10 @@ class BaseConfig(object):
kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
- cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
- self.kernel = get_first_file(cmds)
+ globs = (kernel_match_name, kernel_match_link, kernel_startswith)
+ self.kernel = get_first_file(globs)
if not self.kernel:
- raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds)
+ raise RunQemuError('KERNEL not found: %s, %s or %s' % globs)
if not os.path.exists(self.kernel):
raise RunQemuError("KERNEL %s not found" % self.kernel)
@@ -744,13 +781,13 @@ class BaseConfig(object):
dtb = self.get('QB_DTB')
if dtb:
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
- cmd_match = "%s/%s" % (deploy_dir_image, dtb)
- cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
- cmd_wild = "%s/*.dtb" % deploy_dir_image
- cmds = (cmd_match, cmd_startswith, cmd_wild)
- self.dtb = get_first_file(cmds)
+ glob_match = "%s/%s" % (deploy_dir_image, dtb)
+ glob_startswith = "%s/%s*" % (deploy_dir_image, dtb)
+ glob_wild = "%s/*.dtb" % deploy_dir_image
+ globs = (glob_match, glob_startswith, glob_wild)
+ self.dtb = get_first_file(globs)
if not os.path.exists(self.dtb):
- raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
+ raise RunQemuError('DTB not found: %s, %s or %s' % globs)
def check_bios(self):
"""Check and set bios"""
@@ -774,7 +811,7 @@ class BaseConfig(object):
raise RunQemuError('BIOS not found: %s' % bios_match_name)
if not os.path.exists(self.bios):
- raise RunQemuError("KERNEL %s not found" % self.bios)
+ raise RunQemuError("BIOS %s not found" % self.bios)
def check_mem(self):
@@ -801,7 +838,7 @@ class BaseConfig(object):
self.set('QB_MEM', qb_mem)
mach = self.get('MACHINE')
- if not mach.startswith('qemumips'):
+ if not mach.startswith(('qemumips', 'qemux86', 'qemuloongarch64')):
self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
@@ -813,11 +850,11 @@ class BaseConfig(object):
if self.get('QB_TCPSERIAL_OPT'):
self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
if len(ports) > 1:
for port in ports[1:]:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % port
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
@@ -860,8 +897,10 @@ class BaseConfig(object):
machine = self.get('MACHINE')
if not machine:
machine = os.path.basename(deploy_dir_image)
- self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image,
- self.rootfs, machine)
+ if not self.get('IMAGE_LINK_NAME'):
+ raise RunQemuError("IMAGE_LINK_NAME wasn't set to find corresponding .qemuboot.conf file")
+ self.qemuboot = "%s/%s.qemuboot.conf" % (deploy_dir_image,
+ self.get('IMAGE_LINK_NAME'))
else:
cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
logger.debug('Running %s...' % cmd)
@@ -982,19 +1021,16 @@ class BaseConfig(object):
if self.slirp_enabled:
self.nfs_server = '10.0.2.2'
else:
- self.nfs_server = '192.168.7.1'
+ self.nfs_server = '192.168.7.@GATEWAY@'
- # Figure out a new nfs_instance to allow multiple qemus running.
- ps = subprocess.check_output(("ps", "auxww")).decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
-
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -1005,7 +1041,11 @@ class BaseConfig(object):
# Use '%s' since they are integers
os.putenv(k, '%s' % v)
- self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s" % (nfsd_port, mountd_port)
+ qb_nfsrootfs_extra_opt = self.get("QB_NFSROOTFS_EXTRA_OPT")
+ if qb_nfsrootfs_extra_opt and not qb_nfsrootfs_extra_opt.startswith(","):
+ qb_nfsrootfs_extra_opt = "," + qb_nfsrootfs_extra_opt
+
+ self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s%s" % (nfsd_port, mountd_port, qb_nfsrootfs_extra_opt)
# Extract .tar.bz2 or .tar.bz if no nfs dir
if not (self.rootfs and os.path.isdir(self.rootfs)):
@@ -1028,22 +1068,41 @@ class BaseConfig(object):
cmd = ('runqemu-extract-sdk', src, dest)
logger.info('Running %s...' % str(cmd))
if subprocess.call(cmd) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
- self.clean_nfs_dir = True
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.rootfs = dest
+ self.cleanup_files.append(self.rootfs)
+ self.cleanup_files.append('%s.pseudo_state' % self.rootfs)
# Start the userspace NFS server
cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
logger.info('Running %s...' % str(cmd))
if subprocess.call(cmd) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.nfs_running = True
+ def setup_cmd(self):
+ cmd = self.get('QB_SETUP_CMD')
+ if cmd != '':
+ logger.info('Running setup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
+
def setup_net_bridge(self):
self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
@@ -1053,7 +1112,7 @@ class BaseConfig(object):
logger.info("Network configuration:%s", netconf)
self.kernel_cmdline_script += netconf
# Port mapping
- hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+ hostfwd = ",hostfwd=tcp:127.0.0.1:2222-:22,hostfwd=tcp:127.0.0.1:2323-:23"
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
# Figure out the port
@@ -1062,14 +1121,7 @@ class BaseConfig(object):
mac = 2
lockdir = "/tmp/qemu-port-locks"
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
# Find a free port to avoid conflicts
for p in ports[:]:
@@ -1109,20 +1161,17 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
cmd = (ip, 'link')
logger.debug('Running %s...' % str(cmd))
ip_link = subprocess.check_output(cmd).decode('utf-8')
# Matches line like: 6: tap0: <foo>
- possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ oe_tap_name = 'tap'
+ if 'OE_TAP_NAME' in os.environ:
+ oe_tap_name = os.environ['OE_TAP_NAME']
+ tap_re = '^[0-9]+: +(' + oe_tap_name + '[0-9]+): <.*'
+ possibles = re.findall(tap_re, ip_link, re.M)
tap = ""
for p in possibles:
lockfile = os.path.join(lockdir, p)
@@ -1145,7 +1194,7 @@ class BaseConfig(object):
gid = os.getgid()
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
- cmd = ('sudo', self.qemuifup, str(uid), str(gid), self.bindir_native)
+ cmd = ('sudo', self.qemuifup, str(gid))
try:
tap = subprocess.check_output(cmd).decode('utf-8').strip()
except subprocess.CalledProcessError as e:
@@ -1161,7 +1210,7 @@ class BaseConfig(object):
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
sys.exit(1)
self.tap = tap
- tapnum = int(tap[3:])
+ tapnum = int(tap[len(oe_tap_name):])
gateway = tapnum * 2 + 1
client = gateway + 1
if self.fstype == 'nfs':
@@ -1169,6 +1218,7 @@ class BaseConfig(object):
netconf = " " + self.cmdline_ip_tap
netconf = netconf.replace('@CLIENT@', str(client))
netconf = netconf.replace('@GATEWAY@', str(gateway))
+ self.nfs_server = self.nfs_server.replace('@GATEWAY@', str(gateway))
logger.info("Network configuration:%s", netconf)
self.kernel_cmdline_script += netconf
mac = "%s%02x" % (self.mac_tap, client)
@@ -1184,7 +1234,8 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
def setup_network(self):
- if self.get('QB_NET') == 'none':
+ if self.nonetwork or self.get('QB_NET') == 'none':
+ self.set('NETWORK_CMD', '-nic none')
return
if sys.stdin.isatty():
self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
@@ -1203,7 +1254,19 @@ class BaseConfig(object):
return
if 'wic.' in self.fstype:
self.fstype = self.fstype[4:]
- rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'vhd', 'vhdx', 'qcow2', 'vdi') else 'raw'
+
+ tmpfsdir = os.environ.get("RUNQEMU_TMPFS_DIR", None)
+ if self.snapshot and tmpfsdir:
+ newrootfs = os.path.join(tmpfsdir, os.path.basename(self.rootfs)) + "." + str(os.getpid())
+ logger.info("Copying rootfs to %s" % newrootfs)
+ copy_start = time.time()
+ shutil.copyfile(self.rootfs, newrootfs)
+ logger.info("Copy done in %s seconds" % (time.time() - copy_start))
+ self.rootfs = newrootfs
+ # Don't need a second copy now!
+ self.snapshot = False
+ self.cleanup_files.append(newrootfs)
qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
if qb_rootfs_opt:
@@ -1249,7 +1312,13 @@ class BaseConfig(object):
self.rootfs_options = vm_drive
if not self.fstype in self.vmtypes:
self.rootfs_options += ' -no-reboot'
- self.kernel_cmdline = 'root=%s rw' % (self.get('QB_KERNEL_ROOT'))
+
+ # By default, ' rw' is appended to QB_KERNEL_ROOT unless either ro or rw is explicitly passed.
+ qb_kernel_root = self.get('QB_KERNEL_ROOT')
+ qb_kernel_root_l = qb_kernel_root.split()
+ if not ('ro' in qb_kernel_root_l or 'rw' in qb_kernel_root_l):
+ qb_kernel_root += ' rw'
+ self.kernel_cmdline = 'root=%s' % qb_kernel_root
if self.fstype == 'nfs':
self.rootfs_options = ''
@@ -1265,7 +1334,7 @@ class BaseConfig(object):
"""attempt to determine the appropriate qemu-system binary"""
mach = self.get('MACHINE')
if not mach:
- search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemuloongarch64|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
if self.rootfs:
match = re.match(search, self.rootfs)
if match:
@@ -1288,6 +1357,8 @@ class BaseConfig(object):
qbsys = 'x86_64'
elif mach == 'qemuppc':
qbsys = 'ppc'
+ elif mach == 'qemuloongarch64':
+ qbsys = 'loongarch64'
elif mach == 'qemumips':
qbsys = 'mips'
elif mach == 'qemumips64':
@@ -1316,7 +1387,127 @@ class BaseConfig(object):
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
self.qemu_system = qemu_system
- def setup_final(self):
+ def check_render_nodes(self):
+ render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
+ try:
+ content = os.listdir("/dev/dri")
+ nodes = [i for i in content if i.startswith('renderD')]
+ if len(nodes) == 0:
+ raise RunQemuError("No render nodes found in /dev/dri/: %s. %s" %(content, render_hint))
+ for n in nodes:
+ try:
+ with open(os.path.join("/dev/dri", n), "w") as f:
+ f.close()
+ break
+ except IOError:
+ pass
+ else:
+ raise RunQemuError("None of the render nodes in /dev/dri/ are accessible: %s; you may need to add yourself to 'render' group or otherwise ensure you have read-write permissions on one of them." %(nodes))
+ except FileNotFoundError:
+ raise RunQemuError("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
+
+ def setup_guest_agent(self):
+ if self.guest_agent == True:
+ self.qemu_opt += ' -chardev socket,path=' + self.guest_agent_sockpath + ',server,nowait,id=qga0 '
+ self.qemu_opt += ' -device virtio-serial '
+ self.qemu_opt += ' -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 '
+
+ def setup_qmp(self):
+ if self.qmp:
+ self.qemu_opt += " -qmp %s,server,nowait" % self.qmp
+
+ def setup_vga(self):
+ if self.nographic == True:
+ if self.sdl == True:
+ raise RunQemuError('Option nographic makes no sense alongside the sdl option.')
+ if self.gtk == True:
+ raise RunQemuError('Option nographic makes no sense alongside the gtk option.')
+ self.qemu_opt += ' -nographic'
+
+ if self.novga == True:
+ self.qemu_opt += ' -vga none'
+ return
+
+ if (self.gl_es == True or self.gl == True) and (self.sdl == False and self.gtk == False):
+ raise RunQemuError('Option gl/gl-es needs gtk or sdl option.')
+
+ # If we have no display option, we autodetect based upon what qemu supports. We
+ # need our font setup and show-cusor below so we need to see what qemu --help says
+ # is supported so we can pass our correct config in.
+ if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True:
+ output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ)
+ if "-display gtk" in output:
+ self.gtk = True
+ elif "-display sdl" in output:
+ self.sdl = True
+ else:
+ self.qemu_opt += ' -display none'
+
+ if self.sdl == True or self.gtk == True or self.egl_headless == True:
+
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ if self.gl or self.gl_es or self.egl_headless:
+ self.qemu_opt += ' -device virtio-vga-gl '
+ else:
+ self.qemu_opt += ' -device virtio-vga '
+
+ self.qemu_opt += ' -display '
+ if self.egl_headless == True:
+ self.check_render_nodes()
+ self.set_dri_path()
+ self.qemu_opt += 'egl-headless,'
+ else:
+ if self.sdl == True:
+ self.qemu_opt += 'sdl,'
+ elif self.gtk == True:
+ self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts'
+ self.qemu_opt += 'gtk,'
+
+ if self.gl == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=on,'
+ elif self.gl_es == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=es,'
+ self.qemu_opt += 'show-cursor=on'
+
+ self.qemu_opt += ' %s' %self.get('QB_GRAPHICS')
+
+ def setup_serial(self):
+ # Setup correct kernel command line for serial
+ if self.get('SERIAL_CONSOLES') and (self.serialstdio == True or self.serialconsole == True or self.nographic == True or self.tcpserial_portnum):
+ for entry in self.get('SERIAL_CONSOLES').split(' '):
+ self.kernel_cmdline_script += ' console=%s' %entry.split(';')[1]
+
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
+ # If no serial or serialtcp options were specified, only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+
+ # Assume if the user passed serial options, they know what they want
+ # and pad to two devices
+ if serial_num == 1:
+ self.qemu_opt += " -serial null"
+ elif serial_num >= 2:
+ return
+
+ if self.serialstdio == True or self.nographic == True:
+ self.qemu_opt += " -serial mon:stdio"
+ else:
+ self.qemu_opt += " -serial mon:vc"
+ if self.serialconsole:
+ if sys.stdin.isatty():
+ subprocess.check_call(("stty", "intr", "^]"))
+ logger.info("Interrupt character is '^]'")
+
+ self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
+
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num < 2:
+ self.qemu_opt += " -serial null"
+
+ def find_qemu(self):
qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
# It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
@@ -1335,11 +1526,18 @@ class BaseConfig(object):
if not os.access(qemu_bin, os.X_OK):
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
+ self.qemu_bin = qemu_bin
- self.qemu_opt = "%s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('QB_RNG'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+ def setup_final(self):
+
+ self.find_qemu()
+
+ self.qemu_opt = "%s %s %s %s %s" % (self.qemu_bin, self.get('NETWORK_CMD'), self.get('QB_RNG'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND').replace('@DEPLOY_DIR_IMAGE@', self.get('DEPLOY_DIR_IMAGE')))
for ovmf in self.ovmf_bios:
format = ovmf.rsplit('.', 1)[-1]
+ if format == "bin":
+ format = "raw"
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
self.qemu_opt += ' ' + self.qemu_opt_script
@@ -1358,61 +1556,44 @@ class BaseConfig(object):
if self.snapshot:
self.qemu_opt += " -snapshot"
- if self.serialconsole:
- if sys.stdin.isatty():
- subprocess.check_call(("stty", "intr", "^]"))
- logger.info("Interrupt character is '^]'")
-
- first_serial = ""
- if not re.search("-nographic", self.qemu_opt):
- first_serial = "-serial mon:vc"
- # We always want a ttyS1. Since qemu by default adds a serial
- # port when nodefaults is not specified, it seems that all that
- # would be needed is to make sure a "-serial" is there. However,
- # it appears that when "-serial" is specified, it ignores the
- # default serial port that is normally added. So here we make
- # sure to add two -serial if there are none. And only one if
- # there is one -serial already.
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT"))
- elif serial_num == 1:
- self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
-
- # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
- # if not serial or serialtcp options was specified only ttyS0 is created
- # and sysvinit shows an error trying to enable ttyS1:
- # INIT: Id "S1" respawning too fast: disabled for 5 minutes
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- if re.search("-nographic", self.qemu_opt) or self.serialstdio:
- self.qemu_opt += " -serial mon:stdio -serial null"
- else:
- self.qemu_opt += " -serial mon:vc -serial null"
+ self.setup_guest_agent()
+ self.setup_qmp()
+ self.setup_serial()
+ self.setup_vga()
def start_qemu(self):
import shlex
if self.kernel:
- kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
+ kernel_opts = "-kernel %s" % (self.kernel)
+ if self.get('QB_KERNEL_CMDLINE') == "none":
+ if self.bootparams:
+ kernel_opts += " -append '%s'" % (self.bootparams)
+ else:
+ kernel_opts += " -append '%s %s %s %s'" % (self.kernel_cmdline,
self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
self.bootparams)
- if self.bios:
- kernel_opts += " -bios %s" % self.bios
if self.dtb:
kernel_opts += " -dtb %s" % self.dtb
else:
kernel_opts = ""
+
+ if self.bios:
+ self.qemu_opt += " -bios %s" % self.bios
+
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
cmds = shlex.split(cmd)
logger.info('Running %s\n' % cmd)
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
pass_fds = []
if self.taplock_descriptor:
pass_fds = [self.taplock_descriptor.fileno()]
if len(self.portlocks):
for descriptor in self.portlocks.values():
pass_fds.append(descriptor.fileno())
- process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds)
- self.qemupid = process.pid
+ process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ)
+ self.qemuprocess = process
retcode = process.wait()
if retcode:
if retcode == -signal.SIGTERM:
@@ -1420,6 +1601,13 @@ class BaseConfig(object):
else:
logger.error("Failed to run qemu: %s", process.stderr.read().decode())
+ def cleanup_cmd(self):
+ cmd = self.get('QB_CLEANUP_CMD')
+ if cmd != '':
+ logger.info('Running cleanup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
+
def cleanup(self):
if self.cleaned:
return
@@ -1428,30 +1616,48 @@ class BaseConfig(object):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.info("Cleaning up")
+
+ if self.qemuprocess:
+ try:
+ # give it some time to shut down, ignore return values and output
+ self.qemuprocess.send_signal(signal.SIGTERM)
+ self.qemuprocess.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ self.qemuprocess.kill()
+
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
if self.cleantap:
- cmd = ('sudo', self.qemuifdown, self.tap, self.bindir_native)
+ cmd = ('sudo', self.qemuifdown, self.tap)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
self.release_taplock()
- self.release_portlock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
logger.debug('Running %s' % str(cmd))
subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
subprocess.check_call(("stty", self.saved_stty))
- if self.clean_nfs_dir:
- logger.info('Removing %s' % self.rootfs)
- shutil.rmtree(self.rootfs)
- shutil.rmtree('%s.pseudo_state' % self.rootfs)
+ if self.cleanup_files:
+ for ent in self.cleanup_files:
+ logger.info('Removing %s' % ent)
+ if os.path.isfile(ent):
+ os.remove(ent)
+ else:
+ shutil.rmtree(ent)
+
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
self.cleaned = True
- def run_bitbake_env(self, mach=None):
+ def run_bitbake_env(self, mach=None, target=''):
bitbake = shutil.which('bitbake')
if not bitbake:
return
@@ -1464,22 +1670,33 @@ class BaseConfig(object):
multiconfig = "mc:%s" % multiconfig
if mach:
- cmd = 'MACHINE=%s bitbake -e %s' % (mach, multiconfig)
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
else:
- cmd = 'bitbake -e %s' % multiconfig
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
logger.info('Running %s...' % cmd)
- return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ try:
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ logger.warning("Couldn't run '%s' to gather environment information, maybe the target wasn't an image name, will retry with virtual/kernel as a target:\n%s" % (cmd, err.output.decode('utf-8')))
+ # need something with IMAGE_NAME_SUFFIX/IMAGE_LINK_NAME defined (kernel also inherits image-artifact-names.bbclass)
+ target = 'virtual/kernel'
+ if mach:
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
+ else:
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
+ try:
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ logger.warning("Couldn't run '%s' to gather environment information, giving up with 'bitbake -e':\n%s" % (cmd, err.output.decode('utf-8')))
+ return ''
+
- def load_bitbake_env(self, mach=None):
+ def load_bitbake_env(self, mach=None, target=None):
if self.bitbake_e:
return
- try:
- self.bitbake_e = self.run_bitbake_env(mach=mach)
- except subprocess.CalledProcessError as err:
- self.bitbake_e = ''
- logger.warning("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+ self.bitbake_e = self.run_bitbake_env(mach=mach, target=target)
def validate_combos(self):
if (self.fstype in self.vmtypes) and self.kernel:
@@ -1509,7 +1726,7 @@ class BaseConfig(object):
return result
raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
else:
- raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+ raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % str(cmd))
def main():
@@ -1525,11 +1742,8 @@ def main():
subprocess.check_call([renice, str(os.getpid())])
def sigterm_handler(signum, frame):
- logger.info("SIGTERM received")
- os.kill(config.qemupid, signal.SIGTERM)
+ logger.info("Received signal: %s" % (signum))
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
signal.signal(signal.SIGTERM, sigterm_handler)
config.check_args()
@@ -1541,6 +1755,7 @@ def main():
config.setup_network()
config.setup_rootfs()
config.setup_final()
+ config.setup_cmd()
config.start_qemu()
except RunQemuError as err:
logger.error(err)
@@ -1550,9 +1765,8 @@ def main():
traceback.print_exc()
return 1
finally:
+ config.cleanup_cmd()
config.cleanup()
- # Deliberately ignore the return code of 'tput smam'.
- subprocess.call(["tput", "smam"])
if __name__ == "__main__":
sys.exit(main())
diff --git a/scripts/runqemu-addptable2image b/scripts/runqemu-addptable2image
index ca29427258..87a8da3a63 100755
--- a/scripts/runqemu-addptable2image
+++ b/scripts/runqemu-addptable2image
@@ -1,6 +1,6 @@
#!/bin/sh
-# Add a partion table to an ext2 image file
+# Add a partition table to an ext2 image file
#
# Copyright (C) 2006-2007 OpenedHand Ltd.
#
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
index 384c091713..6a8acd0d5a 100755
--- a/scripts/runqemu-export-rootfs
+++ b/scripts/runqemu-export-rootfs
@@ -34,16 +34,12 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
- if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
- echo "Have you run 'bitbake meta-ide-support'?"
- else
- echo "This shouldn't happen - something is missing from your toolchain installation"
- fi
+ echo "This shouldn't happen - something is missing from your toolchain installation"
exit 1
fi
@@ -74,26 +70,11 @@ MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]}
## For debugging you would additionally add
## --debug all
-UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
+UNFSD_OPTS="-p -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
# See how we were called.
case "$1" in
start)
- PORTMAP_RUNNING=`ps -ef | grep portmap | grep -v grep`
- RPCBIND_RUNNING=`ps -ef | grep rpcbind | grep -v grep`
- if [[ "x$PORTMAP_RUNNING" = "x" && "x$RPCBIND_RUNNING" = "x" ]]; then
- echo "======================================================="
- echo "Error: neither rpcbind nor portmap appear to be running"
- echo "Please install and start one of these services first"
- echo "======================================================="
- echo "Tip: for recent Ubuntu hosts, run:"
- echo " sudo apt-get install rpcbind"
- echo "Then add OPTIONS=\"-i -w\" to /etc/default/rpcbind and run"
- echo " sudo service portmap restart"
-
- exit 1
- fi
-
echo "Creating exports file..."
echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
index 9bc0c07fb8..db05da25f2 100755
--- a/scripts/runqemu-extract-sdk
+++ b/scripts/runqemu-extract-sdk
@@ -25,7 +25,7 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build system environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
ROOTFS_TARBALL=$1
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
index a6ee4517da..a00c79c442 100755
--- a/scripts/runqemu-gen-tapdevs
+++ b/scripts/runqemu-gen-tapdevs
@@ -1,53 +1,58 @@
#!/bin/bash
#
# Create a "bank" of tap network devices that can be used by the
-# runqemu script. This script needs to be run as root, and will
-# use the tunctl binary from the build system sysroot. Note: many Linux
-# distros these days still use an older version of tunctl which does not
-# support the group permissions option, hence the need to use the build
-# system provided version.
+# runqemu script. This script needs to be run as root
#
# Copyright (C) 2010 Intel Corp.
#
# SPDX-License-Identifier: GPL-2.0-only
#
-uid=`id -u`
gid=`id -g`
-if [ -n "$SUDO_UID" ]; then
- uid=$SUDO_UID
-fi
if [ -n "$SUDO_GID" ]; then
gid=$SUDO_GID
fi
usage() {
- echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
- echo "Where <uid> is the numeric user id the tap devices will be owned by"
+ echo "Usage: sudo $0 <gid> <num>"
echo "Where <gid> is the numeric group id the tap devices will be owned by"
echo "<num> is the number of tap devices to create (0 to remove all)"
- echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
echo "For example:"
echo "$ bitbake qemu-helper-native"
- echo "$ sudo $0 $uid $gid 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
+ echo "$ sudo $0 $gid 4"
echo ""
exit 1
}
-if [ $# -ne 4 ]; then
+# Allow passing 4 arguments for backward compatibility with warning
+if [ $# -gt 4 ]; then
+ echo "Error: Incorrect number of arguments"
+ usage
+fi
+if [ $# -gt 3 ]; then
+ echo "Warning: Ignoring the <native-sysroot-basedir> parameter. It is no longer needed."
+fi
+if [ $# -gt 2 ]; then
+ echo "Warning: Ignoring the <uid> parameter. It is no longer needed."
+ GID=$2
+ COUNT=$3
+elif [ $# -eq 2 ]; then
+ GID=$1
+ COUNT=$2
+else
echo "Error: Incorrect number of arguments"
usage
fi
-TUID=$1
-GID=$2
-COUNT=$3
-STAGING_BINDIR_NATIVE=$4
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
- echo "Error: $TUNCTL is not an executable"
- usage
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+# check if COUNT is a number and >= 0
+if ! [ $COUNT -ge 0 ]; then
+ echo "Error: Incorrect count: $COUNT"
+ exit 1
fi
if [ $EUID -ne 0 ]; then
@@ -62,48 +67,41 @@ if [ ! -x "$RUNQEMU_IFUP" ]; then
exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ -z "$IFCONFIG" ]; then
- # Is it ever anywhere else?
- IFCONFIG=/sbin/ip
-fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
- exit 1
-fi
-
-if [ $COUNT -ge 0 ]; then
- # Ensure we start with a clean slate
- for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
- echo "Note: Destroying pre-existing tap interface $tap..."
- $TUNCTL -d $tap
- done
- rm -f /etc/runqemu-nosudo
+if interfaces=`ip tuntap list` 2>/dev/null; then
+ interfaces=`echo "$interfaces" |cut -f1 -d: |grep -E "^$OE_TAP_NAME.*"`
else
- echo "Error: Incorrect count: $COUNT"
+ echo "Failed to call 'ip tuntap list'" >&2
exit 1
fi
-if [ $COUNT -gt 0 ]; then
- echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
- for ((index=0; index < $COUNT; index++)); do
- echo "Creating tap$index"
- ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
- if [ $? -ne 0 ]; then
- echo "Error running tunctl: $ifup"
- exit 1
- fi
- done
+# Ensure we start with a clean slate
+for tap in $interfaces; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ ip tuntap del $tap mode tap
+done
+rm -f /etc/runqemu-nosudo
- echo "Note: For systems running NetworkManager, it's recommended"
- echo "Note: that the tap devices be set as unmanaged in the"
- echo "Note: NetworkManager.conf file. Add the following lines to"
- echo "Note: /etc/NetworkManager/NetworkManager.conf"
- echo "[keyfile]"
- echo "unmanaged-devices=interface-name:tap*"
-
- # The runqemu script will check for this file, and if it exists,
- # will use the existing bank of tap devices without creating
- # additional ones via sudo.
- touch /etc/runqemu-nosudo
+if [ $COUNT -eq 0 ]; then
+ exit 0
fi
+
+echo "Creating $COUNT tap devices for GID: $GID..."
+for ((index=0; index < $COUNT; index++)); do
+ echo "Creating $OE_TAP_NAME$index"
+ if ! ifup=`$RUNQEMU_IFUP $GID 2>&1`; then
+ echo "Error bringing up interface: $ifup"
+ exit 1
+ fi
+done
+
+echo "Note: For systems running NetworkManager, it's recommended"
+echo "Note: that the tap devices be set as unmanaged in the"
+echo "Note: NetworkManager.conf file. Add the following lines to"
+echo "Note: /etc/NetworkManager/NetworkManager.conf"
+echo "[keyfile]"
+echo "unmanaged-devices=interface-name:$OE_TAP_NAME*"
+
+# The runqemu script will check for this file, and if it exists,
+# will use the existing bank of tap devices without creating
+# additional ones via sudo.
+touch /etc/runqemu-nosudo
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
index a104c37bf8..822a2a39b9 100755
--- a/scripts/runqemu-ifdown
+++ b/scripts/runqemu-ifdown
@@ -1,8 +1,7 @@
#!/bin/bash
#
# QEMU network configuration script to bring down tap devices. This
-# utility needs to be run as root, and will use the tunctl binary
-# from the native sysroot.
+# utility needs to be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -17,7 +16,7 @@
#
usage() {
- echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <tap-dev>"
}
if [ $EUID -ne 0 ]; then
@@ -25,30 +24,31 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 2 ]; then
+if [ $# -gt 2 ] || [ $# -lt 1 ]; then
usage
exit 1
fi
+# backward compatibility
+if [ $# -eq 2 ] ; then
+ echo "Warning: native-sysroot-basedir parameter is ignored. It is no longer needed." >&2
+fi
+
TAP=$1
-STAGING_BINDIR_NATIVE=$2
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -e "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
+if ! ip tuntap del $TAP mode tap 2>/dev/null; then
+ echo "Error: Unable to run up tuntap del"
exit 1
fi
-$TUNCTL -d $TAP
-
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ -x "$IFCONFIG" ]; then
- if `$IFCONFIG link show $TAP > /dev/null 2>&1`; then
- $IFCONFIG link del $TAP
+if [ -x "$IPTOOL" ]; then
+ if `$IPTOOL link show $TAP > /dev/null 2>&1`; then
+ $IPTOOL link del $TAP
fi
fi
# cleanup the remaining iptables rules
@@ -60,7 +60,13 @@ if [ ! -x "$IPTABLES" ]; then
echo "$IPTABLES cannot be executed"
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
+true
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
index bb661740c5..05c9325b6b 100755
--- a/scripts/runqemu-ifup
+++ b/scripts/runqemu-ifup
@@ -1,10 +1,7 @@
#!/bin/bash
#
# QEMU network interface configuration script. This utility needs to
-# be run as root, and will use the tunctl binary from a native sysroot.
-# Note: many Linux distros these days still use an older version of
-# tunctl which does not support the group permissions option, hence
-# the need to use build system's version.
+# be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -24,7 +21,7 @@
#
usage() {
- echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <gid>"
}
if [ $EUID -ne 0 ]; then
@@ -32,41 +29,43 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 3 ]; then
+if [ $# -eq 2 ]; then
+ echo "Warning: uid parameter is ignored. It is no longer needed." >&2
+ GROUP="$2"
+elif [ $# -eq 1 ]; then
+ GROUP="$1"
+else
usage
exit 1
fi
-USERID="-u $1"
-GROUP="-g $2"
-STAGING_BINDIR_NATIVE=$3
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -x "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
- exit 1
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
fi
-TAP=`$TUNCTL -b $GROUP 2>&1`
-STATUS=$?
-if [ $STATUS -ne 0 ]; then
-# If tunctl -g fails, try using tunctl -u, for older host kernels
-# which do not support the TUNSETGROUP ioctl
- TAP=`$TUNCTL -b $USERID 2>&1`
- STATUS=$?
- if [ $STATUS -ne 0 ]; then
- echo "tunctl failed:"
- exit 1
+if taps=$(ip tuntap list 2>/dev/null); then
+ tap_no_last=$(echo "$taps" |cut -f 1 -d ":" |grep -E "^$OE_TAP_NAME.*" |sed "s/$OE_TAP_NAME//g" | sort -rn | head -n 1)
+ if [ -z "$tap_no_last" ]; then
+ tap_no=0
+ else
+ tap_no=$(("$tap_no_last" + 1))
fi
+ ip tuntap add "$OE_TAP_NAME$tap_no" mode tap group "$GROUP" && TAP=$OE_TAP_NAME$tap_no
+fi
+
+if [ -z "$TAP" ]; then
+ echo "Error: Unable to find a tap device to use"
+ exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
+if [ ! -x "$IPTOOL" ]; then
+ echo "$IPTOOL cannot be executed"
exit 1
fi
@@ -79,22 +78,22 @@ if [ ! -x "$IPTABLES" ]; then
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-$IFCONFIG addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+$IPTOOL addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to set up IP addressing on $TAP"
exit 1
fi
-$IFCONFIG link set dev $TAP up
+$IPTOOL link set dev $TAP up
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to bring up $TAP"
exit 1
fi
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
-$IFCONFIG route add to 192.168.7.$dest dev $TAP
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
+$IPTOOL route add to 192.168.7.$dest dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to add route to 192.168.7.$dest using $TAP"
diff --git a/scripts/runqemu.README b/scripts/runqemu.README
index da9abd7dfb..e5f4b4634c 100644
--- a/scripts/runqemu.README
+++ b/scripts/runqemu.README
@@ -1,12 +1,12 @@
Using OE images with QEMU
=========================
-OE-Core can generate qemu bootable kernels and images with can be used
+OE-Core can generate qemu bootable kernels and images which can be used
on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC
-and x86 (32 and 64 bit) images. The scripts can be used within the OE build
-system or externaly.
+and x86 (32 and 64 bit) images. The scripts can be used within the OE build
+system or externally.
-The runqemu script is run as:
+The runqemu script is run as:
runqemu <machine> <zimage> <filesystem>
@@ -15,13 +15,13 @@ where:
<machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64)
<zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
<filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory
-
-If <machine> isn't specified, the script will try to detect the machine name
+
+If <machine> isn't specified, the script will try to detect the machine name
from the name of the <zimage> file.
If <filesystem> isn't specified, nfs booting will be assumed.
-When used within the build system, it will default to qemuarm, ext2 and the last kernel and
+When used within the build system, it will default to qemuarm, ext2 and the last kernel and
core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look
for sato and minimal images.
@@ -31,7 +31,7 @@ Full usage instructions can be seen by running the command with no options speci
Notes
=====
- - The scripts run qemu using sudo. Change perms on /dev/net/tun to
+ - The scripts run qemu using sudo. Change perms on /dev/net/tun to
run as non root. The runqemu-gen-tapdevs script can also be used by
root to prepopulate the appropriate network devices.
- You can access the host computer at 192.168.7.1 within the image.
diff --git a/scripts/sstate-cache-management.py b/scripts/sstate-cache-management.py
new file mode 100755
index 0000000000..d3f600bd28
--- /dev/null
+++ b/scripts/sstate-cache-management.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import os
+import re
+import sys
+
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from dataclasses import dataclass
+from pathlib import Path
+
+if sys.version_info < (3, 8, 0):
+ raise RuntimeError("Sorry, python 3.8.0 or later is required for this script.")
+
+SSTATE_PREFIX = "sstate:"
+SSTATE_EXTENSION = ".tar.zst"
+# SSTATE_EXTENSION = ".tgz"
+# .siginfo.done files are mentioned in the original script?
+SSTATE_SUFFIXES = (
+ SSTATE_EXTENSION,
+ f"{SSTATE_EXTENSION}.siginfo",
+ f"{SSTATE_EXTENSION}.done",
+)
+
+RE_SSTATE_PKGSPEC = re.compile(
+ rf"""sstate:(?P<pn>[^:]*):
+ (?P<package_target>[^:]*):
+ (?P<pv>[^:]*):
+ (?P<pr>[^:]*):
+ (?P<sstate_pkgarch>[^:]*):
+ (?P<sstate_version>[^_]*):
+ (?P<bb_unihash>[^_]*)_
+ (?P<bb_task>[^:]*)
+ (?P<ext>({"|".join([re.escape(s) for s in SSTATE_SUFFIXES])}))$""",
+ re.X,
+)
+
+
+# Really we'd like something like a Path subclass which implements a stat
+# cache here, unfortunately there's no good way to do that transparently
+# (yet); see:
+#
+# https://github.com/python/cpython/issues/70219
+# https://discuss.python.org/t/make-pathlib-extensible/3428/77
+@dataclass
+class SstateEntry:
+ """Class for keeping track of an entry in sstate-cache."""
+
+ path: Path
+ match: re.Match
+ stat_result: os.stat_result = None
+
+ def __hash__(self):
+ return self.path.__hash__()
+
+ def __getattr__(self, name):
+ return self.match.group(name)
+
+
+# this is what's in the original script; as far as I can tell, it's an
+# implementation artefact which we don't need?
+def find_archs():
+ # all_archs
+ builder_arch = os.uname().machine
+
+ # FIXME
+ layer_paths = [Path("../..")]
+
+ tune_archs = set()
+ re_tune = re.compile(r'AVAILTUNES .*=.*"(.*)"')
+ for path in layer_paths:
+ for tunefile in [
+ p for p in path.glob("meta*/conf/machine/include/**/*") if p.is_file()
+ ]:
+ with open(tunefile) as f:
+ for line in f:
+ m = re_tune.match(line)
+ if m:
+ tune_archs.update(m.group(1).split())
+
+ # all_machines
+ machine_archs = set()
+ for path in layer_paths:
+ for machine_file in path.glob("meta*/conf/machine/*.conf"):
+ machine_archs.add(machine_file.parts[-1][:-5])
+
+ extra_archs = set()
+ all_archs = (
+ set(
+ arch.replace("-", "_")
+ for arch in machine_archs | tune_archs | set(["allarch", builder_arch])
+ )
+ | extra_archs
+ )
+
+ print(all_archs)
+
+
+# again, not needed?
+def find_tasks():
+ print(set([p.bb_task for p in paths]))
+
+
+def collect_sstate_paths(args):
+ def scandir(path, paths):
+ # Assume everything is a directory; by not checking we avoid needing an
+ # additional stat which is potentially a synchronous roundtrip over NFS
+ try:
+ for p in path.iterdir():
+ filename = p.parts[-1]
+ if filename.startswith(SSTATE_PREFIX):
+ if filename.endswith(SSTATE_SUFFIXES):
+ m = RE_SSTATE_PKGSPEC.match(p.parts[-1])
+ assert m
+ paths.add(SstateEntry(p, m))
+ # ignore other things (includes things like lockfiles)
+ else:
+ scandir(p, paths)
+
+ except NotADirectoryError:
+ pass
+
+ paths = set()
+ # TODO: parellise scandir
+ scandir(Path(args.cache_dir), paths)
+
+ def path_stat(p):
+ p.stat_result = p.path.lstat()
+
+ if args.remove_duplicated:
+ # This is probably slightly performance negative on a local filesystem
+ # when we interact with the GIL; over NFS it's a massive win.
+ with ThreadPoolExecutor(max_workers=args.jobs) as executor:
+ executor.map(path_stat, paths)
+
+ return paths
+
+
+def remove_by_stamps(args, paths):
+ all_sums = set()
+ for stamps_dir in args.stamps_dir:
+ stamps_path = Path(stamps_dir)
+ assert stamps_path.is_dir()
+ re_sigdata = re.compile(r"do_.*\.sigdata\.([^.]*)")
+ all_sums |= set(
+ [
+ re_sigdata.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*.sigdata.*")
+ ]
+ )
+ re_setscene = re.compile(r"do_.*_setscene\.([^.]*)")
+ all_sums |= set(
+ [
+ re_setscene.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*_setscene.*")
+ ]
+ )
+ return [p for p in paths if p.bb_unihash not in all_sums]
+
+
+def remove_duplicated(args, paths):
+ # Skip populate_lic as it produces duplicates in a normal build
+ #
+ # 9ae16469e707 sstate-cache-management: skip populate_lic archives when removing duplicates
+ valid_paths = [p for p in paths if p.bb_task != "populate_lic"]
+
+ keep = dict()
+ remove = list()
+ for p in valid_paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task, p.ext])
+ if sstate_sig not in keep:
+ keep[sstate_sig] = p
+ elif p.stat_result.st_mtime > keep[sstate_sig].stat_result.st_mtime:
+ remove.append(keep[sstate_sig])
+ keep[sstate_sig] = p
+ else:
+ remove.append(p)
+
+ return remove
+
+
+def remove_orphans(args, paths):
+ remove = list()
+ pathsigs = defaultdict(list)
+ for p in paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task])
+ pathsigs[sstate_sig].append(p)
+ for k, v in pathsigs.items():
+ if len([p for p in v if p.ext == SSTATE_EXTENSION]) == 0:
+ remove.extend(v)
+ return remove
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(description="sstate cache management utility.")
+
+ parser.add_argument(
+ "--cache-dir",
+ default=os.environ.get("SSTATE_CACHE_DIR"),
+ help="""Specify sstate cache directory, will use the environment
+ variable SSTATE_CACHE_DIR if it is not specified.""",
+ )
+
+ # parser.add_argument(
+ # "--extra-archs",
+ # help="""Specify list of architectures which should be tested, this list
+ # will be extended with native arch, allarch and empty arch. The
+ # script won't be trying to generate list of available archs from
+ # AVAILTUNES in tune files.""",
+ # )
+
+ # parser.add_argument(
+ # "--extra-layer",
+ # help="""Specify the layer which will be used for searching the archs,
+ # it will search the meta and meta-* layers in the top dir by
+ # default, and will search meta, meta-*, <layer1>, <layer2>,
+ # ...<layern> when specified. Use "," as the separator.
+ #
+ # This is useless for --stamps-dir or when --extra-archs is used.""",
+ # )
+
+ parser.add_argument(
+ "-d",
+ "--remove-duplicated",
+ action="store_true",
+ help="""Remove the duplicated sstate cache files of one package, only
+ the newest one will be kept. The duplicated sstate cache files
+ of one package must have the same arch, which means sstate cache
+ files with multiple archs are not considered duplicate.
+
+ Conflicts with --stamps-dir.""",
+ )
+
+ parser.add_argument(
+ "--remove-orphans",
+ action="store_true",
+ help=f"""Remove orphan siginfo files from the sstate cache, i.e. those
+ where this is no {SSTATE_EXTENSION} file but there are associated
+ tracking files.""",
+ )
+
+ parser.add_argument(
+ "--stamps-dir",
+ action="append",
+ help="""Specify the build directory's stamps directories, the sstate
+ cache file which IS USED by these build diretories will be KEPT,
+ other sstate cache files in cache-dir will be removed. Can be
+ specified multiple times for several directories.
+
+ Conflicts with --remove-duplicated.""",
+ )
+
+ parser.add_argument(
+ "-j", "--jobs", default=8, type=int, help="Run JOBS jobs in parallel."
+ )
+
+ # parser.add_argument(
+ # "-L",
+ # "--follow-symlink",
+ # action="store_true",
+ # help="Remove both the symbol link and the destination file, default: no.",
+ # )
+
+ parser.add_argument(
+ "-y",
+ "--yes",
+ action="store_true",
+ help="""Automatic yes to prompts; assume "yes" as answer to all prompts
+ and run non-interactively.""",
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", action="store_true", help="Explain what is being done."
+ )
+
+ parser.add_argument(
+ "-D",
+ "--debug",
+ action="count",
+ default=0,
+ help="Show debug info, repeat for more debug info.",
+ )
+
+ args = parser.parse_args()
+ if args.cache_dir is None or (
+ not args.remove_duplicated and not args.stamps_dir and not args.remove_orphans
+ ):
+ parser.print_usage()
+ sys.exit(1)
+
+ return args
+
+
+def main():
+ args = parse_arguments()
+
+ paths = collect_sstate_paths(args)
+ if args.remove_duplicated:
+ remove = remove_duplicated(args, paths)
+ elif args.stamps_dir:
+ remove = remove_by_stamps(args, paths)
+ else:
+ remove = list()
+
+ if args.remove_orphans:
+ remove = set(remove) | set(remove_orphans(args, paths))
+
+ if args.debug >= 1:
+ print("\n".join([str(p.path) for p in remove]))
+ print(f"{len(remove)} out of {len(paths)} files will be removed!")
+ if not args.yes:
+ print("Do you want to continue (y/n)?")
+ confirm = input() in ("y", "Y")
+ else:
+ confirm = True
+ if confirm:
+ # TODO: parallelise remove
+ for p in remove:
+ p.path.unlink()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
deleted file mode 100755
index f1706a2229..0000000000
--- a/scripts/sstate-cache-management.sh
+++ /dev/null
@@ -1,458 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) 2012 Wind River Systems, Inc.
-#
-# SPDX-License-Identifier: GPL-2.0-only
-#
-
-# Global vars
-cache_dir=
-confirm=
-fsym=
-total_deleted=0
-verbose=
-debug=0
-
-usage () {
- cat << EOF
-Welcome to sstate cache management utilities.
-sstate-cache-management.sh <OPTION>
-
-Options:
- -h, --help
- Display this help and exit.
-
- --cache-dir=<sstate cache dir>
- Specify sstate cache directory, will use the environment
- variable SSTATE_CACHE_DIR if it is not specified.
-
- --extra-archs=<arch1>,<arch2>...<archn>
- Specify list of architectures which should be tested, this list
- will be extended with native arch, allarch and empty arch. The
- script won't be trying to generate list of available archs from
- AVAILTUNES in tune files.
-
- --extra-layer=<layer1>,<layer2>...<layern>
- Specify the layer which will be used for searching the archs,
- it will search the meta and meta-* layers in the top dir by
- default, and will search meta, meta-*, <layer1>, <layer2>,
- ...<layern> when specified. Use "," as the separator.
-
- This is useless for --stamps-dir or when --extra-archs is used.
-
- -d, --remove-duplicated
- Remove the duplicated sstate cache files of one package, only
- the newest one will be kept. The duplicated sstate cache files
- of one package must have the same arch, which means sstate cache
- files with multiple archs are not considered duplicate.
-
- Conflicts with --stamps-dir.
-
- --stamps-dir=<dir1>,<dir2>...<dirn>
- Specify the build directory's stamps directories, the sstate
- cache file which IS USED by these build diretories will be KEPT,
- other sstate cache files in cache-dir will be removed. Use ","
- as the separator. For example:
- --stamps-dir=build1/tmp/stamps,build2/tmp/stamps
-
- Conflicts with --remove-duplicated.
-
- -L, --follow-symlink
- Remove both the symbol link and the destination file, default: no.
-
- -y, --yes
- Automatic yes to prompts; assume "yes" as answer to all prompts
- and run non-interactively.
-
- -v, --verbose
- Explain what is being done.
-
- -D, --debug
- Show debug info, repeat for more debug info.
-
-EOF
-}
-
-if [ $# -lt 1 ]; then
- usage
- exit 0
-fi
-
-# Echo no files to remove
-no_files () {
- echo No files to remove
-}
-
-# Echo nothing to do
-do_nothing () {
- echo Nothing to do
-}
-
-# Read the input "y"
-read_confirm () {
- echo "$total_deleted out of $total_files files will be removed! "
- if [ "$confirm" != "y" ]; then
- echo "Do you want to continue (y/n)? "
- while read confirm; do
- [ "$confirm" = "Y" -o "$confirm" = "y" -o "$confirm" = "n" \
- -o "$confirm" = "N" ] && break
- echo "Invalid input \"$confirm\", please input 'y' or 'n': "
- done
- else
- echo
- fi
-}
-
-# Print error information and exit.
-echo_error () {
- echo "ERROR: $1" >&2
- exit 1
-}
-
-# Generate the remove list:
-#
-# * Add .done/.siginfo to the remove list
-# * Add destination of symlink to the remove list
-#
-# $1: output file, others: sstate cache file (.tgz)
-gen_rmlist (){
- local rmlist_file="$1"
- shift
- local files="$@"
- for i in $files; do
- echo $i >> $rmlist_file
- # Add the ".siginfo"
- if [ -e $i.siginfo ]; then
- echo $i.siginfo >> $rmlist_file
- fi
- # Add the destination of symlink
- if [ -L "$i" ]; then
- if [ "$fsym" = "y" ]; then
- dest="`readlink -e $i`"
- if [ -n "$dest" ]; then
- echo $dest >> $rmlist_file
- # Remove the .siginfo when .tgz is removed
- if [ -f "$dest.siginfo" ]; then
- echo $dest.siginfo >> $rmlist_file
- fi
- fi
- fi
- # Add the ".tgz.done" and ".siginfo.done" (may exist in the future)
- base_fn="${i##/*/}"
- t_fn="$base_fn.done"
- s_fn="$base_fn.siginfo.done"
- for d in $t_fn $s_fn; do
- if [ -f $cache_dir/$d ]; then
- echo $cache_dir/$d >> $rmlist_file
- fi
- done
- fi
- done
-}
-
-# Remove the duplicated cache files for the pkg, keep the newest one
-remove_duplicated () {
-
- local topdir
- local oe_core_dir
- local tunedirs
- local all_archs
- local all_machines
- local ava_archs
- local arch
- local file_names
- local sstate_files_list
- local fn_tmp
- local list_suffix=`mktemp` || exit 1
-
- if [ -z "$extra_archs" ] ; then
- # Find out the archs in all the layers
- echo "Figuring out the archs in the layers ... "
- oe_core_dir=$(dirname $(dirname $(readlink -e $0)))
- topdir=$(dirname $oe_core_dir)
- tunedirs="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/include' 2>/dev/null`"
- [ -n "$tunedirs" ] || echo_error "Can't find the tune directory"
- all_machines="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/*' -name '*.conf' 2>/dev/null | sed -e 's/.*\///' -e 's/.conf$//'`"
- all_archs=`grep -r -h "^AVAILTUNES .*=" $tunedirs | sed -e 's/.*=//' -e 's/\"//g'`
- fi
-
- # Use the "_" to substitute "-", e.g., x86-64 to x86_64, but not for extra_archs which can be something like cortexa9t2-vfp-neon
- # Sort to remove the duplicated ones
- # Add allarch and builder arch (native)
- builder_arch=$(uname -m)
- all_archs="$(echo allarch $all_archs $all_machines $builder_arch \
- | sed -e 's/-/_/g' -e 's/ /\n/g' | sort -u) $extra_archs"
- echo "Done"
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -name 'sstate*' | wc -l`
- # Save all the sstate files in a file
- sstate_files_list=`mktemp` || exit 1
- find $cache_dir -name 'sstate:*:*:*:*:*:*:*.tgz*' >$sstate_files_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $sstate_files_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- echo "Figuring out the archs in the sstate cache dir ... "
- # Using this SSTATE_PKGSPEC definition it's 6th colon separated field
- # SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
- for arch in $all_archs; do
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs $arch"
- # ${builder_arch}_$arch used by toolchain sstate
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs ${builder_arch}_$arch"
- done
- echo "Done"
- echo "The following archs have been found in the cache dir:"
- echo $ava_archs
- echo ""
-
- # Save the file list which needs to be removed
- local remove_listdir=`mktemp -d` || exit 1
- for suffix in $sstate_suffixes; do
- if [ "$suffix" = "populate_lic" ] ; then
- echo "Skipping populate_lic, because removing duplicates doesn't work correctly for them (use --stamps-dir instead)"
- continue
- fi
- # Total number of files including .siginfo and .done files
- total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list | wc -l 2>/dev/null`
- total_tgz_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz$" $sstate_files_list | wc -l 2>/dev/null`
- # Save the file list to a file, some suffix's file may not exist
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list >$list_suffix 2>/dev/null
- local deleted_tgz=0
- local deleted_files=0
- for ext in tgz tgz.siginfo tgz.done; do
- echo "Figuring out the sstate:xxx_$suffix.$ext ... "
- # Uniq BPNs
- file_names=`for arch in $ava_archs ""; do
- sed -ne "s%.*/sstate:\([^:]*\):[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$%\1%p" $list_suffix
- done | sort -u`
-
- fn_tmp=`mktemp` || exit 1
- rm_list="$remove_listdir/sstate:xxx_$suffix"
- for fn in $file_names; do
- [ -z "$verbose" ] || echo "Analyzing sstate:$fn-xxx_$suffix.${ext}"
- for arch in $ava_archs ""; do
- grep -h ".*/sstate:$fn:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$" $list_suffix >$fn_tmp
- if [ -s $fn_tmp ] ; then
- [ $debug -gt 1 ] && echo "Available files for $fn-$arch- with suffix $suffix.${ext}:" && cat $fn_tmp
- # Use the modification time
- to_del=$(ls -t $(cat $fn_tmp) | sed -n '1!p')
- [ $debug -gt 2 ] && echo "Considering to delete: $to_del"
- # The sstate file which is downloaded from the SSTATE_MIRROR is
- # put in SSTATE_DIR, and there is a symlink in SSTATE_DIR/??/ to
- # it, so filter it out from the remove list if it should not be
- # removed.
- to_keep=$(ls -t $(cat $fn_tmp) | sed -n '1p')
- [ $debug -gt 2 ] && echo "Considering to keep: $to_keep"
- for k in $to_keep; do
- if [ -L "$k" ]; then
- # The symlink's destination
- k_dest="`readlink -e $k`"
- # Maybe it is the one in cache_dir
- k_maybe="$cache_dir/${k##/*/}"
- # Remove it from the remove list if they are the same.
- if [ "$k_dest" = "$k_maybe" ]; then
- to_del="`echo $to_del | sed 's#'\"$k_maybe\"'##g'`"
- fi
- fi
- done
- rm -f $fn_tmp
- [ $debug -gt 2 ] && echo "Decided to delete: $to_del"
- gen_rmlist $rm_list.$ext "$to_del"
- fi
- done
- done
- done
- deleted_tgz=`cat $rm_list.* 2>/dev/null | grep ".tgz$" | wc -l`
- deleted_files=`cat $rm_list.* 2>/dev/null | wc -l`
- [ "$deleted_files" -gt 0 -a $debug -gt 0 ] && cat $rm_list.*
- echo "($deleted_tgz out of $total_tgz_suffix .tgz files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
- done
- deleted_tgz=0
- rm_old_list=$remove_listdir/sstate-old-filenames
- find $cache_dir -name 'sstate-*.tgz' >$rm_old_list
- [ -s "$rm_old_list" ] && deleted_tgz=`cat $rm_old_list | grep ".tgz$" | wc -l`
- [ -s "$rm_old_list" ] && deleted_files=`cat $rm_old_list | wc -l`
- [ -s "$rm_old_list" -a $debug -gt 0 ] && cat $rm_old_list
- echo "($deleted_tgz .tgz files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
-
- rm -f $list_suffix
- rm -f $sstate_files_list
- if [ $total_deleted -gt 0 ]; then
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- for list in `ls $remove_listdir/`; do
- echo "Removing $list.tgz (`cat $remove_listdir/$list | wc -w` files) ... "
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $remove_listdir/$list`; do
- rm -f $verbose $i
- done
- echo "Done"
- done
- echo "$total_deleted files have been removed!"
- else
- do_nothing
- fi
- else
- no_files
- fi
- [ -d $remove_listdir ] && rm -fr $remove_listdir
-}
-
-# Remove the sstate file by stamps dir, the file not used by the stamps dir
-# will be removed.
-rm_by_stamps (){
-
- local cache_list=`mktemp` || exit 1
- local keep_list=`mktemp` || exit 1
- local rm_list=`mktemp` || exit 1
- local sums
- local all_sums
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -type f -name 'sstate*' | wc -l`
- # Save all the state file list to a file
- find $cache_dir -type f -name 'sstate*' | sort -u -o $cache_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $cache_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- # Figure out all the md5sums in the stamps dir.
- echo "Figuring out all the md5sums in stamps dir ... "
- for i in $sstate_suffixes; do
- # There is no "\.sigdata" but "_setcene" when it is mirrored
- # from the SSTATE_MIRRORS, use them to figure out the sum.
- sums=`find $stamps -maxdepth 3 -name "*.do_$i.*" \
- -o -name "*.do_${i}_setscene.*" | \
- sed -ne 's#.*_setscene\.##p' -e 's#.*\.sigdata\.##p' | \
- sed -e 's#\..*##' | sort -u`
- all_sums="$all_sums $sums"
- done
- echo "Done"
-
- echo "Figuring out the files which will be removed ... "
- for i in $all_sums; do
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:${i}_.*" $cache_list >>$keep_list
- done
- echo "Done"
-
- if [ -s $keep_list ]; then
- sort -u $keep_list -o $keep_list
- to_del=`comm -1 -3 $keep_list $cache_list`
- gen_rmlist $rm_list "$to_del"
- let total_deleted=`cat $rm_list | sort -u | wc -w`
- if [ $total_deleted -gt 0 ]; then
- [ $debug -gt 0 ] && cat $rm_list | sort -u
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- echo "Removing sstate cache files ... ($total_deleted files)"
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $rm_list | sort -u`; do
- rm -f $verbose $i
- done
- echo "$total_deleted files have been removed"
- else
- do_nothing
- fi
- else
- no_files
- fi
- else
- echo_error "All files in cache dir will be removed! Abort!"
- fi
-
- rm -f $cache_list
- rm -f $keep_list
- rm -f $rm_list
-}
-
-# Parse arguments
-while [ -n "$1" ]; do
- case $1 in
- --cache-dir=*)
- cache_dir=`echo $1 | sed -e 's#^--cache-dir=##' | xargs readlink -e`
- [ -d "$cache_dir" ] || echo_error "Invalid argument to --cache-dir"
- shift
- ;;
- --remove-duplicated|-d)
- rm_duplicated="y"
- shift
- ;;
- --yes|-y)
- confirm="y"
- shift
- ;;
- --follow-symlink|-L)
- fsym="y"
- shift
- ;;
- --extra-archs=*)
- extra_archs=`echo $1 | sed -e 's#^--extra-archs=##' -e 's#,# #g'`
- [ -n "$extra_archs" ] || echo_error "Invalid extra arch parameter"
- shift
- ;;
- --extra-layer=*)
- extra_layers=`echo $1 | sed -e 's#^--extra-layer=##' -e 's#,# #g'`
- [ -n "$extra_layers" ] || echo_error "Invalid extra layer parameter"
- for i in $extra_layers; do
- l=`readlink -e $i`
- if [ -d "$l" ]; then
- layers="$layers $l"
- else
- echo_error "Can't find layer $i"
- fi
- done
- shift
- ;;
- --stamps-dir=*)
- stamps=`echo $1 | sed -e 's#^--stamps-dir=##' -e 's#,# #g'`
- [ -n "$stamps" ] || echo_error "Invalid stamps dir $i"
- for i in $stamps; do
- [ -d "$i" ] || echo_error "Invalid stamps dir $i"
- done
- shift
- ;;
- --verbose|-v)
- verbose="-v"
- shift
- ;;
- --debug|-D)
- debug=`expr $debug + 1`
- echo "Debug level $debug"
- shift
- ;;
- --help|-h)
- usage
- exit 0
- ;;
- *)
- echo "Invalid arguments $*"
- echo_error "Try 'sstate-cache-management.sh -h' for more information."
- ;;
- esac
-done
-
-# sstate cache directory, use environment variable SSTATE_CACHE_DIR
-# if it was not specified, otherwise, error.
-[ -n "$cache_dir" ] || cache_dir=$SSTATE_CACHE_DIR
-[ -n "$cache_dir" ] || echo_error "No cache dir found!"
-[ -d "$cache_dir" ] || echo_error "Invalid cache directory \"$cache_dir\""
-
-[ -n "$rm_duplicated" -a -n "$stamps" ] && \
- echo_error "Can not use both --remove-duplicated and --stamps-dir"
-
-[ "$rm_duplicated" = "y" ] && remove_duplicated
-[ -n "$stamps" ] && rm_by_stamps
-[ -z "$rm_duplicated" -a -z "$stamps" ] && \
- echo "What do you want to do?"
-exit 0
diff --git a/scripts/sstate-diff-machines.sh b/scripts/sstate-diff-machines.sh
index 1d721eb87d..5ed413b2ee 100755
--- a/scripts/sstate-diff-machines.sh
+++ b/scripts/sstate-diff-machines.sh
@@ -1,5 +1,7 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Used to compare sstate checksums between MACHINES.
@@ -127,6 +129,8 @@ for M in ${machines}; do
fi
done
+COMPARE_TASKS="do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata do_package_write_rpm.sigdata do_package_write_deb.sigdata do_package_write_tar.sigdata"
+
function compareSignatures() {
MACHINE1=$1
MACHINE2=$2
@@ -134,7 +138,7 @@ function compareSignatures() {
PRE_PATTERN=""
[ -n "${PATTERN}" ] || PRE_PATTERN="-v"
[ -n "${PATTERN}" ] || PATTERN="MACHINE"
- for TASK in do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata; do
+ for TASK in $COMPARE_TASKS; do
printf "\n\n === Comparing signatures for task ${TASK} between ${MACHINE1} and ${MACHINE2} ===\n" | tee -a ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log
diff ${OUTPUT}/${MACHINE1}/list.M ${OUTPUT}/${MACHINE2}/list.M | grep ${PRE_PATTERN} "${PATTERN}" | grep ${TASK} > ${OUTPUT}/signatures.${MACHINE2}.${TASK}
for i in `cat ${OUTPUT}/signatures.${MACHINE2}.${TASK} | sed 's#[^/]*/\([^/]*\)/.*#\1#g' | sort -u | xargs`; do
diff --git a/scripts/sstate-sysroot-cruft.sh b/scripts/sstate-sysroot-cruft.sh
index fbf1ca3c43..b2002badfb 100755
--- a/scripts/sstate-sysroot-cruft.sh
+++ b/scripts/sstate-sysroot-cruft.sh
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
# Used to find files installed in sysroot which are not tracked by sstate manifest
@@ -145,18 +147,6 @@ WHITELIST="${WHITELIST} \
.*/var/cache/fontconfig/ \
"
-# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes:
-# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811
-# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061
-WHITELIST="${WHITELIST} \
- [^/]*/etc/ld.so.conf \
-"
-
SYSROOTS="`readlink -f ${tmpdir}`/sysroots/"
mkdir ${OUTPUT}
diff --git a/scripts/sysroot-relativelinks.py b/scripts/sysroot-relativelinks.py
index 56e36f3ad5..ccb3c867f0 100755
--- a/scripts/sysroot-relativelinks.py
+++ b/scripts/sysroot-relativelinks.py
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/scripts/task-time b/scripts/task-time
index bcd1e25817..8f71b29b77 100755
--- a/scripts/task-time
+++ b/scripts/task-time
@@ -1,5 +1,7 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms
index fb0cc719ea..fc3677c6ed 100755
--- a/scripts/verify-bashisms
+++ b/scripts/verify-bashisms
@@ -1,11 +1,13 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
# SPDX-License-Identifier: GPL-2.0-only
#
import sys, os, subprocess, re, shutil
-whitelist = (
+allowed = (
# type is supported by dash
'if type systemctl >/dev/null 2>/dev/null; then',
'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
@@ -19,8 +21,8 @@ whitelist = (
'. $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE'
)
-def is_whitelisted(s):
- for w in whitelist:
+def is_allowed(s):
+ for w in allowed:
if w in s:
return True
return False
@@ -49,7 +51,7 @@ def process(filename, function, lineno, script):
output = e.output.replace(fn.name, function)
if not output or not output.startswith('possible bashism'):
# Probably starts with or contains only warnings. Dump verbatim
- # with one space indention. Can't do the splitting and whitelist
+ # with one space indention. Can't do the splitting and allowed
# checking below.
return '\n'.join([filename,
' Unexpected output from checkbashisms.pl'] +
@@ -65,7 +67,7 @@ def process(filename, function, lineno, script):
# ...
# ...
result = []
- # Check the results against the whitelist
+ # Check the results against the allowed list
for message, source in zip(output[0::2], output[1::2]):
if not is_whitelisted(source):
if lineno is not None:
@@ -100,7 +102,7 @@ if __name__=='__main__':
args = parser.parse_args()
if shutil.which("checkbashisms.pl") is None:
- print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://salsa.debian.org/debian/devscripts/raw/master/scripts/checkbashisms.pl")
sys.exit(1)
# The order of defining the worker function,
diff --git a/scripts/wic b/scripts/wic
index 24700f380f..06e0b48db0 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -22,9 +22,9 @@ import sys
import argparse
import logging
import subprocess
+import shutil
from collections import namedtuple
-from distutils import spawn
# External modules
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -47,7 +47,7 @@ if os.environ.get('SDKTARGETSYSROOT'):
break
sdkroot = os.path.dirname(sdkroot)
-bitbake_exe = spawn.find_executable('bitbake')
+bitbake_exe = shutil.which('bitbake')
if bitbake_exe:
bitbake_path = scriptpath.add_bitbake_lib_path()
import bb
@@ -159,6 +159,9 @@ def wic_create_subcommand(options, usage_str):
"(Use -e/--image-name to specify it)")
native_sysroot = options.native_sysroot
+ if options.kernel_dir:
+ kernel_dir = options.kernel_dir
+
if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
logger.info("Building wic-tools...\n")
subprocess.check_call(["bitbake", "wic-tools"])
@@ -206,7 +209,7 @@ def wic_create_subcommand(options, usage_str):
logger.info(" (Please check that the build artifacts for the machine")
logger.info(" selected in local.conf actually exist and that they")
logger.info(" are the correct artifacts for the image (.wks file)).\n")
- raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+ raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir))
krootfs_dir = options.rootfs_dir
if krootfs_dir is None:
@@ -312,6 +315,8 @@ def wic_init_parser_create(subparser):
subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
help="name of directory to create image in")
+ subparser.add_argument("-w", "--workdir",
+ help="temporary workdir to use for intermediate files")
subparser.add_argument("-e", "--image-name", dest="image_name",
help="name of the image to use the artifacts from "
"e.g. core-image-sato")
@@ -344,6 +349,8 @@ def wic_init_parser_create(subparser):
default=False, help="output debug information")
subparser.add_argument("-i", "--imager", dest="imager",
default="direct", help="the wic imager plugin")
+ subparser.add_argument("--extra-space", type=int, dest="extra_space",
+ default=0, help="additional free disk space to add to the image")
return
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index b7c83c8b54..67cc71950f 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -24,7 +24,7 @@ import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
-from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures, check_bblayers
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_layer_dependencies, get_signatures, check_bblayers, sanity_check_layers
from oeqa.utils.commands import get_bb_vars
PROGNAME = 'yocto-check-layer'
@@ -41,6 +41,12 @@ def test_layer(td, layer, test_software_layer_signatures):
tc.loadTests(CASES_PATHS)
return tc.runTests()
+def dump_layer_debug(layer):
+ logger.debug("Found layer %s (%s)" % (layer["name"], layer["path"]))
+ collections = layer.get("collections", {})
+ if collections:
+ logger.debug("%s collections: %s" % (layer["name"], ", ".join(collections)))
+
def main():
parser = argparse.ArgumentParser(
description="Yocto Project layer checking tool",
@@ -51,6 +57,8 @@ def main():
help='File to output log (optional)', action='store')
parser.add_argument('--dependency', nargs="+",
help='Layers to process for dependencies', action='store')
+ parser.add_argument('--no-auto-dependency', help='Disable automatic testing of dependencies',
+ action='store_true')
parser.add_argument('--machines', nargs="+",
help='List of MACHINEs to be used during testing', action='store')
parser.add_argument('--additional-layers', nargs="+",
@@ -104,6 +112,17 @@ def main():
else:
dep_layers = layers
+ logger.debug("Found additional layers:")
+ for l in additional_layers:
+ dump_layer_debug(l)
+ logger.debug("Found dependency layers:")
+ for l in dep_layers:
+ dump_layer_debug(l)
+
+ if not sanity_check_layers(additional_layers + dep_layers, logger):
+ logger.error("Failed layer validation")
+ return 1
+
logger.info("Detected layers:")
for layer in layers:
if layer['type'] == LayerType.ERROR_BSP_DISTRO:
@@ -112,7 +131,7 @@ def main():
% layer['name'])
layers.remove(layer)
elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
- logger.error("%s: Don't have conf/layer.conf file."\
+ logger.info("%s: Doesn't have conf/layer.conf file, so ignoring"\
% layer['name'])
layers.remove(layer)
else:
@@ -121,6 +140,21 @@ def main():
if not layers:
return 1
+ # Find all dependencies, and get them checked too
+ if not args.no_auto_dependency:
+ depends = []
+ for layer in layers:
+ layer_depends = get_layer_dependencies(layer, dep_layers, logger)
+ if layer_depends:
+ for d in layer_depends:
+ if d not in depends:
+ depends.append(d)
+
+ for d in depends:
+ if d not in layers:
+ logger.info("Adding %s to the list of layers to test, as a dependency", d['name'])
+ layers.append(d)
+
shutil.copyfile(bblayersconf, bblayersconf + '.backup')
def cleanup_bblayers(signum, frame):
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
@@ -134,11 +168,13 @@ def main():
layers_tested = 0
for layer in layers:
- if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
- layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ if layer['type'] in (LayerType.ERROR_NO_LAYER_CONF, LayerType.ERROR_BSP_DISTRO):
continue
- if check_bblayers(bblayersconf, layer['path'], logger):
+ # Reset to a clean backup copy for each run
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+ if layer['type'] not in (LayerType.CORE, ) and check_bblayers(bblayersconf, layer['path'], logger):
logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
"in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
results[layer['name']] = None
@@ -149,17 +185,13 @@ def main():
logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
layer['path']))
- shutil.copyfile(bblayersconf + '.backup', bblayersconf)
-
missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
if not missing_dependencies:
for additional_layer in additional_layers:
if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
missing_dependencies = True
break
- if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
- any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
- additional_layers)):
+ if missing_dependencies:
logger.info('Skipping %s due to missing dependencies.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
diff --git a/scripts/yocto_testresults_query.py b/scripts/yocto_testresults_query.py
new file mode 100755
index 0000000000..521ead8473
--- /dev/null
+++ b/scripts/yocto_testresults_query.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+
+# Yocto Project test results management tool
+# This script is an thin layer over resulttool to manage tes results and regression reports.
+# Its main feature is to translate tags or branch names to revisions SHA1, and then to run resulttool
+# with those computed revisions
+#
+# Copyright (C) 2023 OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+import os
+import argparse
+import subprocess
+import tempfile
+import lib.scriptutils as scriptutils
+
+script_path = os.path.dirname(os.path.realpath(__file__))
+poky_path = os.path.abspath(os.path.join(script_path, ".."))
+resulttool = os.path.abspath(os.path.join(script_path, "resulttool"))
+logger = scriptutils.logger_create(sys.argv[0])
+testresults_default_url="git://git.yoctoproject.org/yocto-testresults"
+
+def create_workdir():
+ workdir = tempfile.mkdtemp(prefix='yocto-testresults-query.')
+ logger.info(f"Shallow-cloning testresults in {workdir}")
+ subprocess.check_call(["git", "clone", testresults_default_url, workdir, "--depth", "1"])
+ return workdir
+
+def get_sha1(pokydir, revision):
+ try:
+ rev = subprocess.check_output(["git", "rev-list", "-n", "1", revision], cwd=pokydir).decode('utf-8').strip()
+ logger.info(f"SHA-1 revision for {revision} in {pokydir} is {rev}")
+ return rev
+ except subprocess.CalledProcessError:
+ logger.error(f"Can not find SHA-1 for {revision} in {pokydir}")
+ return None
+
+def get_branch(tag):
+ # The tags in test results repository, as returned by git rev-list, have the following form:
+ # refs/tags/<branch>/<count>-g<sha1>/<num>
+ return '/'.join(tag.split("/")[2:-2])
+
+def fetch_testresults(workdir, sha1):
+ logger.info(f"Fetching test results for {sha1} in {workdir}")
+ rawtags = subprocess.check_output(["git", "ls-remote", "--refs", "--tags", "origin", f"*{sha1}*"], cwd=workdir).decode('utf-8').strip()
+ if not rawtags:
+ raise Exception(f"No reference found for commit {sha1} in {workdir}")
+ branch = ""
+ for rev in [rawtag.split()[1] for rawtag in rawtags.splitlines()]:
+ if not branch:
+ branch = get_branch(rev)
+ logger.info(f"Fetching matching revision: {rev}")
+ subprocess.check_call(["git", "fetch", "--depth", "1", "origin", f"{rev}:{rev}"], cwd=workdir)
+ return branch
+
+def compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args):
+ logger.info(f"Running resulttool regression between SHA1 {baserevision} and {targetrevision}")
+ command = [resulttool, "regression-git", "--branch", basebranch, "--commit", baserevision, "--branch2", targetbranch, "--commit2", targetrevision, workdir]
+ if args.limit:
+ command.extend(["-l", args.limit])
+ report = subprocess.check_output(command).decode("utf-8")
+ return report
+
+def print_report_with_header(report, baseversion, baserevision, targetversion, targetrevision):
+ print("========================== Regression report ==============================")
+ print(f'{"=> Target:": <16}{targetversion: <16}({targetrevision})')
+ print(f'{"=> Base:": <16}{baseversion: <16}({baserevision})')
+ print("===========================================================================\n")
+ print(report, end='')
+
+def regression(args):
+ logger.info(f"Compute regression report between {args.base} and {args.target}")
+ if args.testresultsdir:
+ workdir = args.testresultsdir
+ else:
+ workdir = create_workdir()
+
+ try:
+ baserevision = get_sha1(poky_path, args.base)
+ targetrevision = get_sha1(poky_path, args.target)
+ if not baserevision or not targetrevision:
+ logger.error("One or more revision(s) missing. You might be targeting nonexistant tags/branches, or are in wrong repository (you must use Poky and not oe-core)")
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+ sys.exit(1)
+ basebranch = fetch_testresults(workdir, baserevision)
+ targetbranch = fetch_testresults(workdir, targetrevision)
+ report = compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args)
+ print_report_with_header(report, args.base, baserevision, args.target, targetrevision)
+ finally:
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+
+def main():
+ parser = argparse.ArgumentParser(description="Yocto Project test results helper")
+ subparsers = parser.add_subparsers(
+ help="Supported commands for test results helper",
+ required=True)
+ parser_regression_report = subparsers.add_parser(
+ "regression-report",
+ help="Generate regression report between two fixed revisions. Revisions can be branch name or tag")
+ parser_regression_report.add_argument(
+ 'base',
+ help="Revision or tag against which to compare results (i.e: the older)")
+ parser_regression_report.add_argument(
+ 'target',
+ help="Revision or tag to compare against the base (i.e: the newer)")
+ parser_regression_report.add_argument(
+ '-t',
+ '--testresultsdir',
+ help=f"An existing test results directory. {sys.argv[0]} will automatically clone it and use default branch if not provided")
+ parser_regression_report.add_argument(
+ '-l',
+ '--limit',
+ help=f"Maximum number of changes to display per test. Can be set to 0 to print all changes")
+ parser_regression_report.set_defaults(func=regression)
+
+ args = parser.parse_args()
+ args.func(args)
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)