summaryrefslogtreecommitdiffstats
path: root/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/.oe-layers.json7
-rwxr-xr-xscripts/autobuilder-worker-prereq-tests93
-rwxr-xr-xscripts/bblock184
-rwxr-xr-xscripts/bitbake-prserv-tool12
-rwxr-xr-xscripts/bitbake-whatchanged330
-rwxr-xr-xscripts/buildall-qemu120
-rwxr-xr-xscripts/buildhistory-collect-srcrevs19
-rwxr-xr-xscripts/buildhistory-diff17
-rwxr-xr-xscripts/buildstats-diff26
-rwxr-xr-xscripts/buildstats-summary126
-rwxr-xr-xscripts/combo-layer54
-rwxr-xr-xscripts/combo-layer-hook-default.sh5
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix-plot.sh19
-rwxr-xr-xscripts/contrib/bb-perf/bb-matrix.sh15
-rwxr-xr-xscripts/contrib/bb-perf/buildstats-plot.sh93
-rwxr-xr-xscripts/contrib/bb-perf/buildstats.sh154
-rwxr-xr-xscripts/contrib/bbvars.py20
-rwxr-xr-xscripts/contrib/build-perf-test-wrapper.sh40
-rwxr-xr-xscripts/contrib/build-perf-test.sh400
-rwxr-xr-xscripts/contrib/convert-overrides.py155
-rwxr-xr-xscripts/contrib/convert-spdx-licenses.py145
-rwxr-xr-xscripts/contrib/convert-srcuri.py77
-rwxr-xr-xscripts/contrib/convert-variable-renames.py116
-rwxr-xr-xscripts/contrib/ddimage94
-rwxr-xr-xscripts/contrib/devtool-stress.py13
-rwxr-xr-xscripts/contrib/dialog-power-control4
-rwxr-xr-xscripts/contrib/documentation-audit.sh9
-rwxr-xr-xscripts/contrib/graph-tool113
-rwxr-xr-xscripts/contrib/image-manifest523
-rwxr-xr-xscripts/contrib/list-packageconfig-flags.py19
-rwxr-xr-xscripts/contrib/mkefidisk.sh464
-rwxr-xr-xscripts/contrib/oe-build-perf-report-email.py177
-rwxr-xr-xscripts/contrib/patchreview.py88
-rwxr-xr-xscripts/contrib/patchtest.sh18
-rwxr-xr-xscripts/contrib/serdevtry3
-rwxr-xr-xscripts/contrib/test_build_time.sh18
-rwxr-xr-xscripts/contrib/test_build_time_worker.sh6
-rwxr-xr-xscripts/contrib/uncovered15
-rwxr-xr-xscripts/contrib/verify-homepage.py8
-rwxr-xr-xscripts/cp-noerror4
-rwxr-xr-xscripts/create-pull-request44
l---------scripts/cross-intercept/ar1
-rwxr-xr-xscripts/crosstap21
-rwxr-xr-xscripts/devtool48
-rwxr-xr-xscripts/distro/build-recipe-list.py129
-rwxr-xr-xscripts/distro/distrocompare.sh123
l---------scripts/esdk-tools/devtool1
l---------scripts/esdk-tools/oe-find-native-sysroot1
l---------scripts/esdk-tools/recipetool1
l---------scripts/esdk-tools/runqemu1
l---------scripts/esdk-tools/runqemu-addptable2image1
l---------scripts/esdk-tools/runqemu-export-rootfs1
l---------scripts/esdk-tools/runqemu-extract-sdk1
l---------scripts/esdk-tools/runqemu-gen-tapdevs1
l---------scripts/esdk-tools/runqemu-ifdown1
l---------scripts/esdk-tools/runqemu-ifup1
l---------scripts/esdk-tools/wic1
-rwxr-xr-xscripts/gen-lockedsig-cache62
-rwxr-xr-xscripts/gen-site-config12
-rwxr-xr-xscripts/git30
-rwxr-xr-xscripts/install-buildtools357
-rw-r--r--scripts/lib/argparse_oe.py6
-rw-r--r--scripts/lib/build_perf/__init__.py9
-rw-r--r--scripts/lib/build_perf/html.py9
-rw-r--r--scripts/lib/build_perf/report.py12
-rw-r--r--scripts/lib/buildstats.py55
-rw-r--r--scripts/lib/checklayer/__init__.py128
-rw-r--r--scripts/lib/checklayer/case.py4
-rw-r--r--scripts/lib/checklayer/cases/bsp.py8
-rw-r--r--scripts/lib/checklayer/cases/common.py52
-rw-r--r--scripts/lib/checklayer/cases/distro.py6
-rw-r--r--scripts/lib/checklayer/context.py4
-rw-r--r--scripts/lib/devtool/__init__.py49
-rw-r--r--scripts/lib/devtool/build.py32
-rw-r--r--scripts/lib/devtool/build_image.py14
-rw-r--r--scripts/lib/devtool/build_sdk.py14
-rw-r--r--scripts/lib/devtool/deploy.py254
-rw-r--r--scripts/lib/devtool/export.py14
-rw-r--r--scripts/lib/devtool/ide_plugins/__init__.py282
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_code.py463
-rw-r--r--scripts/lib/devtool/ide_plugins/ide_none.py53
-rwxr-xr-xscripts/lib/devtool/ide_sdk.py1070
-rw-r--r--scripts/lib/devtool/import.py20
-rw-r--r--scripts/lib/devtool/menuconfig.py81
-rw-r--r--scripts/lib/devtool/package.py12
-rw-r--r--scripts/lib/devtool/runqemu.py12
-rw-r--r--scripts/lib/devtool/sdk.py17
-rw-r--r--scripts/lib/devtool/search.py17
-rw-r--r--scripts/lib/devtool/standard.py806
-rw-r--r--scripts/lib/devtool/upgrade.py252
-rw-r--r--scripts/lib/devtool/utilcmds.py12
-rw-r--r--scripts/lib/recipetool/append.py100
-rw-r--r--scripts/lib/recipetool/create.py402
-rw-r--r--scripts/lib/recipetool/create_buildsys.py72
-rw-r--r--scripts/lib/recipetool/create_buildsys_python.py1131
-rw-r--r--scripts/lib/recipetool/create_go.py777
-rw-r--r--scripts/lib/recipetool/create_kernel.py12
-rw-r--r--scripts/lib/recipetool/create_kmod.py16
-rw-r--r--scripts/lib/recipetool/create_npm.py582
-rw-r--r--scripts/lib/recipetool/edit.py14
-rw-r--r--scripts/lib/recipetool/licenses.csv37
-rw-r--r--scripts/lib/recipetool/newappend.py16
-rw-r--r--scripts/lib/recipetool/setvar.py13
-rw-r--r--scripts/lib/resulttool/__init__.py0
-rw-r--r--scripts/lib/resulttool/log.py107
-rwxr-xr-xscripts/lib/resulttool/manualexecution.py235
-rw-r--r--scripts/lib/resulttool/merge.py46
-rw-r--r--scripts/lib/resulttool/regression.py447
-rw-r--r--scripts/lib/resulttool/report.py315
-rw-r--r--scripts/lib/resulttool/resultutils.py228
-rw-r--r--scripts/lib/resulttool/store.py104
-rw-r--r--scripts/lib/resulttool/template/test_report_full_text.txt79
-rw-r--r--scripts/lib/scriptpath.py12
-rw-r--r--scripts/lib/scriptutils.py99
-rw-r--r--scripts/lib/wic/__init__.py14
-rw-r--r--scripts/lib/wic/canned-wks/efi-bootdisk.wks.in2
-rw-r--r--scripts/lib/wic/canned-wks/qemuloongarch.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemuriscv.wks3
-rw-r--r--scripts/lib/wic/canned-wks/qemux86-directdisk.wks2
-rw-r--r--scripts/lib/wic/canned-wks/sdimage-bootpart.wks4
-rw-r--r--scripts/lib/wic/engine.py152
-rw-r--r--scripts/lib/wic/filemap.py109
-rw-r--r--scripts/lib/wic/help.py167
-rw-r--r--scripts/lib/wic/ksparser.py143
-rw-r--r--scripts/lib/wic/misc.py45
-rw-r--r--scripts/lib/wic/partition.py223
-rw-r--r--scripts/lib/wic/pluginbase.py33
-rw-r--r--scripts/lib/wic/plugins/imager/direct.py291
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-biosplusefi.py213
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-efi.py347
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-partition.py141
-rw-r--r--scripts/lib/wic/plugins/source/bootimg-pcbios.py44
-rw-r--r--scripts/lib/wic/plugins/source/empty.py89
-rw-r--r--scripts/lib/wic/plugins/source/isoimage-isohybrid.py78
-rw-r--r--scripts/lib/wic/plugins/source/rawcopy.py60
-rw-r--r--scripts/lib/wic/plugins/source/rootfs.py186
-rwxr-xr-xscripts/lnr21
-rw-r--r--scripts/multilib_header_wrapper.h36
-rwxr-xr-xscripts/native-intercept/ar32
-rwxr-xr-xscripts/native-intercept/chgrp5
-rwxr-xr-xscripts/native-intercept/chown3
-rwxr-xr-xscripts/nativesdk-intercept/chgrp30
-rwxr-xr-xscripts/nativesdk-intercept/chown30
-rwxr-xr-xscripts/oe-build-perf-report158
-rwxr-xr-xscripts/oe-build-perf-test13
-rwxr-xr-xscripts/oe-buildenv-internal67
-rwxr-xr-xscripts/oe-check-sstate26
-rwxr-xr-xscripts/oe-debuginfod28
-rwxr-xr-xscripts/oe-depends-dot46
-rwxr-xr-xscripts/oe-find-native-sysroot29
-rwxr-xr-xscripts/oe-git-archive199
-rwxr-xr-xscripts/oe-git-proxy22
-rwxr-xr-xscripts/oe-gnome-terminal-phonehome4
-rwxr-xr-xscripts/oe-pkgdata-browser260
-rw-r--r--scripts/oe-pkgdata-browser.glade337
-rwxr-xr-xscripts/oe-pkgdata-util93
-rwxr-xr-xscripts/oe-publish-sdk26
-rwxr-xr-xscripts/oe-pylint15
-rwxr-xr-xscripts/oe-run-native20
-rwxr-xr-xscripts/oe-selftest14
-rwxr-xr-xscripts/oe-setup-build122
-rwxr-xr-xscripts/oe-setup-builddir119
-rwxr-xr-xscripts/oe-setup-layers146
-rwxr-xr-xscripts/oe-setup-vscode93
-rwxr-xr-xscripts/oe-test4
-rwxr-xr-xscripts/oe-time-dd-test.sh106
-rwxr-xr-xscripts/oe-trim-schemas5
-rwxr-xr-xscripts/oepydevshell-internal.py11
-rwxr-xr-xscripts/opkg-query-helper.py17
-rwxr-xr-xscripts/patchtest232
-rwxr-xr-xscripts/patchtest-get-branch81
-rwxr-xr-xscripts/patchtest-get-series115
-rwxr-xr-xscripts/patchtest-send-results110
-rwxr-xr-xscripts/patchtest-setup-sharedir83
-rw-r--r--scripts/patchtest.README153
-rw-r--r--scripts/postinst-intercepts/delay_to_first_boot4
-rwxr-xr-xscripts/postinst-intercepts/postinst_intercept2
-rw-r--r--scripts/postinst-intercepts/update_desktop_database8
-rw-r--r--scripts/postinst-intercepts/update_font_cache9
-rw-r--r--scripts/postinst-intercepts/update_gio_module_cache3
-rw-r--r--scripts/postinst-intercepts/update_gtk_icon_cache21
-rw-r--r--scripts/postinst-intercepts/update_gtk_immodules_cache11
-rw-r--r--scripts/postinst-intercepts/update_icon_cache13
-rw-r--r--scripts/postinst-intercepts/update_mandb18
-rw-r--r--scripts/postinst-intercepts/update_mime_database9
-rw-r--r--scripts/postinst-intercepts/update_pixbuf_cache3
-rw-r--r--scripts/postinst-intercepts/update_udev_hwdb25
-rwxr-xr-xscripts/pybootchartgui/pybootchartgui.py2
-rw-r--r--scripts/pybootchartgui/pybootchartgui/draw.py1529
-rw-r--r--scripts/pybootchartgui/pybootchartgui/gui.py208
-rw-r--r--scripts/pybootchartgui/pybootchartgui/parsing.py53
-rw-r--r--scripts/pybootchartgui/pybootchartgui/samples.py25
-rwxr-xr-xscripts/pythondeps12
-rwxr-xr-xscripts/recipetool12
-rwxr-xr-xscripts/relocate_sdk.py48
-rwxr-xr-xscripts/resulttool78
-rwxr-xr-xscripts/rpm2cpio.sh30
-rwxr-xr-xscripts/runqemu1097
-rwxr-xr-xscripts/runqemu-addptable2image16
-rwxr-xr-xscripts/runqemu-export-rootfs37
-rwxr-xr-xscripts/runqemu-extract-sdk16
-rwxr-xr-xscripts/runqemu-gen-tapdevs145
-rwxr-xr-xscripts/runqemu-ifdown54
-rwxr-xr-xscripts/runqemu-ifup77
-rw-r--r--scripts/runqemu.README16
-rwxr-xr-xscripts/send-error-report14
-rwxr-xr-xscripts/send-pull-request15
-rwxr-xr-xscripts/sstate-cache-management.py329
-rwxr-xr-xscripts/sstate-cache-management.sh469
-rwxr-xr-xscripts/sstate-diff-machines.sh10
-rwxr-xr-xscripts/sstate-sysroot-cruft.sh18
-rwxr-xr-xscripts/sysroot-relativelinks.py6
-rwxr-xr-xscripts/task-time5
-rwxr-xr-xscripts/test-reexec15
-rwxr-xr-xscripts/test-remote-image14
-rwxr-xr-xscripts/tiny/dirsize.py16
-rwxr-xr-xscripts/tiny/ksize.py28
-rwxr-xr-xscripts/tiny/ksum.py60
-rwxr-xr-xscripts/verify-bashisms17
-rwxr-xr-xscripts/wic81
-rwxr-xr-xscripts/yocto-check-layer80
-rwxr-xr-xscripts/yocto-check-layer-wrapper8
-rwxr-xr-xscripts/yocto_testresults_query.py131
223 files changed, 16330 insertions, 7079 deletions
diff --git a/scripts/.oe-layers.json b/scripts/.oe-layers.json
new file mode 100644
index 0000000000..1b00a84b54
--- /dev/null
+++ b/scripts/.oe-layers.json
@@ -0,0 +1,7 @@
+{
+ "layers": [
+ "../meta-poky",
+ "../meta"
+ ],
+ "version": "1.0"
+}
diff --git a/scripts/autobuilder-worker-prereq-tests b/scripts/autobuilder-worker-prereq-tests
new file mode 100755
index 0000000000..54fd3c1004
--- /dev/null
+++ b/scripts/autobuilder-worker-prereq-tests
@@ -0,0 +1,93 @@
+#!/bin/bash
+#
+# Copyright OpenEmbedded Contributors
+#
+# Script which can be run on new autobuilder workers to check all needed configuration is present.
+# Designed to be run in a repo where bitbake/oe-core are already present.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Todo
+# Add testtools/subunit import test
+# Add python3-git test
+# Add pigz test
+# vnc tests/checkvnc?
+# test sendmail works (for QA email notification)
+# test error report submission works
+# test buildistory git repo works?
+#
+
+if [ ! -x $HOME/yocto-autobuilder-helper/scripts/checkvnc ]; then
+ echo "$HOME/yocto-autobuilder-helper should be created."
+ exit 1
+fi
+$HOME/yocto-autobuilder-helper/scripts/checkvnc
+
+. ./oe-init-build-env > /dev/null
+if [ "$?" != "0" ]; then
+ exit 1
+fi
+git config --global user.name > /dev/null
+if [ "$?" != "0" ]; then
+ echo "Please set git config --global user.name"
+ exit 1
+fi
+git config --global user.email > /dev/null
+if [ "$?" != "0" ]; then
+ echo "Please set git config --global user.email"
+ exit 1
+fi
+python3 -c "import jinja2"
+if [ "$?" != "0" ]; then
+ echo "Please ensure jinja2 is available"
+ exit 1
+fi
+bitbake -p
+if [ "$?" != "0" ]; then
+ echo "Bitbake parsing failed"
+ exit 1
+fi
+
+WATCHES=$(PATH="/sbin:/usr/sbin:$PATH" sysctl fs.inotify.max_user_watches -n)
+if (( $WATCHES < 65000 )); then
+ echo 'Need to increase watches (echo fs.inotify.max_user_watches=65536 | sudo tee -a /etc/sysctl.conf'
+ exit 1
+fi
+OPEN_FILES=$(ulimit -n)
+if (( $OPEN_FILES < 65535 )); then
+ echo 'Increase maximum open files in /etc/security/limits.conf'
+ echo '* soft nofile 131072'
+ echo '* hard nofile 131072'
+ exit 1
+fi
+MAX_PROCESSES=$(ulimit -u)
+if (( $MAX_PROCESSES < 514542 )); then
+ echo 'Increase maximum user processes in /etc/security/limits.conf'
+ echo '* hard nproc 515294'
+ echo '* soft nproc 514543'
+ exit 1
+fi
+
+mkdir -p tmp/deploy/images/qemux86-64
+pushd tmp/deploy/images/qemux86-64
+if [ ! -e core-image-minimal-qemux86-64.ext4 ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.ext4
+fi
+if [ ! -e core-image-minimal-qemux86-64.qemuboot.conf ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/core-image-minimal-qemux86-64.qemuboot.conf
+fi
+if [ ! -e bzImage-qemux86-64.bin ]; then
+ wget http://downloads.yoctoproject.org/releases/yocto/yocto-4.0/machines/qemu/qemux86-64/bzImage-qemux86-64.bin
+fi
+popd
+bitbake qemu-helper-native
+DISPLAY=:1 runqemu serialstdio qemux86-64
+if [ "$?" != "0" ]; then
+ echo "Unable to use runqemu"
+ exit 1
+fi
+DISPLAY=:1 runqemu serialstdio qemux86-64 kvm
+if [ "$?" != "0" ]; then
+ echo "Unable to use runqemu with kvm"
+ exit 1
+fi
diff --git a/scripts/bblock b/scripts/bblock
new file mode 100755
index 0000000000..0082059af8
--- /dev/null
+++ b/scripts/bblock
@@ -0,0 +1,184 @@
+#!/usr/bin/env python3
+# bblock
+# lock/unlock task to latest signature
+#
+# Copyright (c) 2023 BayLibre, SAS
+# Author: Julien Stepahn <jstephan@baylibre.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import bb.msg
+
+import argparse_oe
+
+myname = os.path.basename(sys.argv[0])
+logger = bb.msg.logger_create(myname)
+
+
+def getTaskSignatures(tinfoil, pn, tasks):
+ tinfoil.set_event_mask(
+ [
+ "bb.event.GetTaskSignatureResult",
+ "logging.LogRecord",
+ "bb.command.CommandCompleted",
+ "bb.command.CommandFailed",
+ ]
+ )
+ ret = tinfoil.run_command("getTaskSignatures", pn, tasks)
+ if ret:
+ while True:
+ event = tinfoil.wait_event(1)
+ if event:
+ if isinstance(event, bb.command.CommandCompleted):
+ break
+ elif isinstance(event, bb.command.CommandFailed):
+ logger.error(str(event))
+ sys.exit(2)
+ elif isinstance(event, bb.event.GetTaskSignatureResult):
+ sig = event.sig
+ elif isinstance(event, logging.LogRecord):
+ logger.handle(event)
+ else:
+ logger.error("No result returned from getTaskSignatures command")
+ sys.exit(2)
+ return sig
+
+
+def parseRecipe(tinfoil, recipe):
+ try:
+ tinfoil.parse_recipes()
+ d = tinfoil.parse_recipe(recipe)
+ except Exception:
+ logger.error("Failed to get recipe info for: %s" % recipe)
+ sys.exit(1)
+ return d
+
+
+def bblockDump(lockfile):
+ try:
+ with open(lockfile, "r") as lockfile:
+ for line in lockfile:
+ print(line.strip())
+ except IOError:
+ return 1
+ return 0
+
+
+def bblockReset(lockfile, pns, package_archs, tasks):
+ if not pns:
+ logger.info("Unlocking all recipes")
+ try:
+ os.remove(lockfile)
+ except FileNotFoundError:
+ pass
+ else:
+ logger.info("Unlocking {pns}".format(pns=pns))
+ tmp_lockfile = lockfile + ".tmp"
+ with open(lockfile, "r") as infile, open(tmp_lockfile, "w") as outfile:
+ for line in infile:
+ if not (
+ any(element in line for element in pns)
+ and any(element in line for element in package_archs.split())
+ ):
+ outfile.write(line)
+ else:
+ if tasks and not any(element in line for element in tasks):
+ outfile.write(line)
+ os.remove(lockfile)
+ os.rename(tmp_lockfile, lockfile)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Lock and unlock a recipe")
+ parser.add_argument("pn", nargs="*", help="Space separated list of recipe to lock")
+ parser.add_argument(
+ "-t",
+ "--tasks",
+ help="Comma separated list of tasks",
+ type=lambda s: [
+ task if task.startswith("do_") else "do_" + task for task in s.split(",")
+ ],
+ )
+ parser.add_argument(
+ "-r",
+ "--reset",
+ action="store_true",
+ help="Unlock pn recipes, or all recipes if pn is empty",
+ )
+ parser.add_argument(
+ "-d",
+ "--dump",
+ action="store_true",
+ help="Dump generated bblock.conf file",
+ )
+
+ global_args, unparsed_args = parser.parse_known_args()
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+
+ package_archs = tinfoil.config_data.getVar("PACKAGE_ARCHS")
+ builddir = tinfoil.config_data.getVar("TOPDIR")
+ lockfile = "{builddir}/conf/bblock.conf".format(builddir=builddir)
+
+ if global_args.dump:
+ bblockDump(lockfile)
+ return 0
+
+ if global_args.reset:
+ bblockReset(lockfile, global_args.pn, package_archs, global_args.tasks)
+ return 0
+
+ with open(lockfile, "a") as lockfile:
+ s = ""
+ if lockfile.tell() == 0:
+ s = "# Generated by bblock\n"
+ s += 'SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "info"\n'
+ s += 'SIGGEN_LOCKEDSIGS_TYPES += "${PACKAGE_ARCHS}"\n'
+ s += "\n"
+
+ for pn in global_args.pn:
+ d = parseRecipe(tinfoil, pn)
+ package_arch = d.getVar("PACKAGE_ARCH")
+ siggen_locked_sigs_package_arch = d.getVar(
+ "SIGGEN_LOCKEDSIGS_{package_arch}".format(package_arch=package_arch)
+ )
+ sigs = getTaskSignatures(tinfoil, [pn], global_args.tasks)
+ for sig in sigs:
+ new_entry = "{pn}:{taskname}:{sig}".format(
+ pn=sig[0], taskname=sig[1], sig=sig[2]
+ )
+ if (
+ siggen_locked_sigs_package_arch
+ and not new_entry in siggen_locked_sigs_package_arch
+ ) or not siggen_locked_sigs_package_arch:
+ s += 'SIGGEN_LOCKEDSIGS_{package_arch} += "{new_entry}"\n'.format(
+ package_arch=package_arch, new_entry=new_entry
+ )
+ lockfile.write(s)
+ return 0
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/bitbake-prserv-tool b/scripts/bitbake-prserv-tool
index fa31b52584..80028342b1 100755
--- a/scripts/bitbake-prserv-tool
+++ b/scripts/bitbake-prserv-tool
@@ -1,4 +1,9 @@
#!/usr/bin/env bash
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
help ()
{
@@ -12,8 +17,11 @@ help ()
clean_cache()
{
s=`bitbake -e | grep ^CACHE= | cut -f2 -d\"`
+ # Stop any active memory resident server
+ bitbake -m
+ # Remove cache entries since we want to trigger a full reparse
if [ "x${s}" != "x" ]; then
- rm -rf ${s}
+ rm -f ${s}/bb_cache*.dat.*
fi
}
@@ -57,7 +65,7 @@ do_migrate_localcount ()
return 1
fi
- rm -rf $df
+ rm -f $df
clean_cache
echo "Exporting LOCALCOUNT to AUTOINCs..."
bitbake -R conf/migrate_localcount.conf -p
diff --git a/scripts/bitbake-whatchanged b/scripts/bitbake-whatchanged
deleted file mode 100755
index 0207777e63..0000000000
--- a/scripts/bitbake-whatchanged
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# Copyright (c) 2013 Wind River Systems, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-import os
-import sys
-import getopt
-import shutil
-import re
-import warnings
-import subprocess
-import argparse
-
-scripts_path = os.path.abspath(os.path.dirname(os.path.abspath(sys.argv[0])))
-lib_path = scripts_path + '/lib'
-sys.path = sys.path + [lib_path]
-
-import scriptpath
-
-# Figure out where is the bitbake/lib/bb since we need bb.siggen and bb.process
-bitbakepath = scriptpath.add_bitbake_lib_path()
-if not bitbakepath:
- sys.stderr.write("Unable to find bitbake by searching parent directory of this script or PATH\n")
- sys.exit(1)
-scriptpath.add_oe_lib_path()
-import argparse_oe
-
-import bb.siggen
-import bb.process
-
-# Match the stamp's filename
-# group(1): PE_PV (may no PE)
-# group(2): PR
-# group(3): TASK
-# group(4): HASH
-stamp_re = re.compile("(?P<pv>.*)-(?P<pr>r\d+)\.(?P<task>do_\w+)\.(?P<hash>[^\.]*)")
-sigdata_re = re.compile(".*\.sigdata\..*")
-
-def gen_dict(stamps):
- """
- Generate the dict from the stamps dir.
- The output dict format is:
- {fake_f: {pn: PN, pv: PV, pr: PR, task: TASK, path: PATH}}
- Where:
- fake_f: pv + task + hash
- path: the path to the stamp file
- """
- # The member of the sub dict (A "path" will be appended below)
- sub_mem = ("pv", "pr", "task")
- d = {}
- for dirpath, _, files in os.walk(stamps):
- for f in files:
- # The "bitbake -S" would generate ".sigdata", but no "_setscene".
- fake_f = re.sub('_setscene.', '.', f)
- fake_f = re.sub('.sigdata', '', fake_f)
- subdict = {}
- tmp = stamp_re.match(fake_f)
- if tmp:
- for i in sub_mem:
- subdict[i] = tmp.group(i)
- if len(subdict) != 0:
- pn = os.path.basename(dirpath)
- subdict['pn'] = pn
- # The path will be used by os.stat() and bb.siggen
- subdict['path'] = dirpath + "/" + f
- fake_f = tmp.group('pv') + tmp.group('task') + tmp.group('hash')
- d[fake_f] = subdict
- return d
-
-# Re-construct the dict
-def recon_dict(dict_in):
- """
- The output dict format is:
- {pn_task: {pv: PV, pr: PR, path: PATH}}
- """
- dict_out = {}
- for k in dict_in.keys():
- subdict = {}
- # The key
- pn_task = "%s_%s" % (dict_in.get(k).get('pn'), dict_in.get(k).get('task'))
- # If more than one stamps are found, use the latest one.
- if pn_task in dict_out:
- full_path_pre = dict_out.get(pn_task).get('path')
- full_path_cur = dict_in.get(k).get('path')
- if os.stat(full_path_pre).st_mtime > os.stat(full_path_cur).st_mtime:
- continue
- subdict['pv'] = dict_in.get(k).get('pv')
- subdict['pr'] = dict_in.get(k).get('pr')
- subdict['path'] = dict_in.get(k).get('path')
- dict_out[pn_task] = subdict
-
- return dict_out
-
-def split_pntask(s):
- """
- Split the pn_task in to (pn, task) and return it
- """
- tmp = re.match("(.*)_(do_.*)", s)
- return (tmp.group(1), tmp.group(2))
-
-
-def print_added(d_new = None, d_old = None):
- """
- Print the newly added tasks
- """
- added = {}
- for k in list(d_new.keys()):
- if k not in d_old:
- # Add the new one to added dict, and remove it from
- # d_new, so the remaining ones are the changed ones
- added[k] = d_new.get(k)
- del(d_new[k])
-
- if not added:
- return 0
-
- # Format the output, the dict format is:
- # {pn: task1, task2 ...}
- added_format = {}
- counter = 0
- for k in added.keys():
- pn, task = split_pntask(k)
- if pn in added_format:
- # Append the value
- added_format[pn] = "%s %s" % (added_format.get(pn), task)
- else:
- added_format[pn] = task
- counter += 1
- print("=== Newly added tasks: (%s tasks)" % counter)
- for k in added_format.keys():
- print(" %s: %s" % (k, added_format.get(k)))
-
- return counter
-
-def print_vrchanged(d_new = None, d_old = None, vr = None):
- """
- Print the pv or pr changed tasks.
- The arg "vr" is "pv" or "pr"
- """
- pvchanged = {}
- counter = 0
- for k in list(d_new.keys()):
- if d_new.get(k).get(vr) != d_old.get(k).get(vr):
- counter += 1
- pn, task = split_pntask(k)
- if pn not in pvchanged:
- # Format the output, we only print pn (no task) since
- # all the tasks would be changed when pn or pr changed,
- # the dict format is:
- # {pn: pv/pr_old -> pv/pr_new}
- pvchanged[pn] = "%s -> %s" % (d_old.get(k).get(vr), d_new.get(k).get(vr))
- del(d_new[k])
-
- if not pvchanged:
- return 0
-
- print("\n=== %s changed: (%s tasks)" % (vr.upper(), counter))
- for k in pvchanged.keys():
- print(" %s: %s" % (k, pvchanged.get(k)))
-
- return counter
-
-def print_depchanged(d_new = None, d_old = None, verbose = False):
- """
- Print the dependency changes
- """
- depchanged = {}
- counter = 0
- for k in d_new.keys():
- counter += 1
- pn, task = split_pntask(k)
- if (verbose):
- full_path_old = d_old.get(k).get("path")
- full_path_new = d_new.get(k).get("path")
- # No counter since it is not ready here
- if sigdata_re.match(full_path_old) and sigdata_re.match(full_path_new):
- output = bb.siggen.compare_sigfiles(full_path_old, full_path_new)
- if output:
- print("\n=== The verbose changes of %s.%s:" % (pn, task))
- print('\n'.join(output))
- else:
- # Format the output, the format is:
- # {pn: task1, task2, ...}
- if pn in depchanged:
- depchanged[pn] = "%s %s" % (depchanged.get(pn), task)
- else:
- depchanged[pn] = task
-
- if len(depchanged) > 0:
- print("\n=== Dependencies changed: (%s tasks)" % counter)
- for k in depchanged.keys():
- print(" %s: %s" % (k, depchanged[k]))
-
- return counter
-
-
-def main():
- """
- Print what will be done between the current and last builds:
- 1) Run "STAMPS_DIR=<path> bitbake -S recipe" to re-generate the stamps
- 2) Figure out what are newly added and changed, can't figure out
- what are removed since we can't know the previous stamps
- clearly, for example, if there are several builds, we can't know
- which stamps the last build has used exactly.
- 3) Use bb.siggen.compare_sigfiles to diff the old and new stamps
- """
-
- parser = argparse_oe.ArgumentParser(usage = """%(prog)s [options] [package ...]
-print what will be done between the current and last builds, for example:
-
- $ bitbake core-image-sato
- # Edit the recipes
- $ bitbake-whatchanged core-image-sato
-
-The changes will be printed"
-
-Note:
- The amount of tasks is not accurate when the task is "do_build" since
- it usually depends on other tasks.
- The "nostamp" task is not included.
-"""
-)
- parser.add_argument("recipe", help="recipe to check")
- parser.add_argument("-v", "--verbose", help = "print the verbose changes", action = "store_true")
- args = parser.parse_args()
-
- # Get the STAMPS_DIR
- print("Figuring out the STAMPS_DIR ...")
- cmdline = "bitbake -e | sed -ne 's/^STAMPS_DIR=\"\(.*\)\"/\\1/p'"
- try:
- stampsdir, err = bb.process.run(cmdline)
- except:
- raise
- if not stampsdir:
- print("ERROR: No STAMPS_DIR found for '%s'" % args.recipe, file=sys.stderr)
- return 2
- stampsdir = stampsdir.rstrip("\n")
- if not os.path.isdir(stampsdir):
- print("ERROR: stamps directory \"%s\" not found!" % stampsdir, file=sys.stderr)
- return 2
-
- # The new stamps dir
- new_stampsdir = stampsdir + ".bbs"
- if os.path.exists(new_stampsdir):
- print("ERROR: %s already exists!" % new_stampsdir, file=sys.stderr)
- return 2
-
- try:
- # Generate the new stamps dir
- print("Generating the new stamps ... (need several minutes)")
- cmdline = "STAMPS_DIR=%s bitbake -S none %s" % (new_stampsdir, args.recipe)
- # FIXME
- # The "bitbake -S" may fail, not fatal error, the stamps will still
- # be generated, this might be a bug of "bitbake -S".
- try:
- bb.process.run(cmdline)
- except Exception as exc:
- print(exc)
-
- # The dict for the new and old stamps.
- old_dict = gen_dict(stampsdir)
- new_dict = gen_dict(new_stampsdir)
-
- # Remove the same one from both stamps.
- cnt_unchanged = 0
- for k in list(new_dict.keys()):
- if k in old_dict:
- cnt_unchanged += 1
- del(new_dict[k])
- del(old_dict[k])
-
- # Re-construct the dict to easily find out what is added or changed.
- # The dict format is:
- # {pn_task: {pv: PV, pr: PR, path: PATH}}
- new_recon = recon_dict(new_dict)
- old_recon = recon_dict(old_dict)
-
- del new_dict
- del old_dict
-
- # Figure out what are changed, the new_recon would be changed
- # by the print_xxx function.
- # Newly added
- cnt_added = print_added(new_recon, old_recon)
-
- # PV (including PE) and PR changed
- # Let the bb.siggen handle them if verbose
- cnt_rv = {}
- if not args.verbose:
- for i in ('pv', 'pr'):
- cnt_rv[i] = print_vrchanged(new_recon, old_recon, i)
-
- # Dependencies changed (use bitbake-diffsigs)
- cnt_dep = print_depchanged(new_recon, old_recon, args.verbose)
-
- total_changed = cnt_added + (cnt_rv.get('pv') or 0) + (cnt_rv.get('pr') or 0) + cnt_dep
-
- print("\n=== Summary: (%s changed, %s unchanged)" % (total_changed, cnt_unchanged))
- if args.verbose:
- print("Newly added: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_dep))
- else:
- print("Newly added: %s\nPV changed: %s\nPR changed: %s\nDependencies changed: %s\n" % \
- (cnt_added, cnt_rv.get('pv') or 0, cnt_rv.get('pr') or 0, cnt_dep))
- except:
- print("ERROR occurred!")
- raise
- finally:
- # Remove the newly generated stamps dir
- if os.path.exists(new_stampsdir):
- print("Removing the newly generated stamps dir ...")
- shutil.rmtree(new_stampsdir)
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/scripts/buildall-qemu b/scripts/buildall-qemu
new file mode 100755
index 0000000000..ca9aafadf7
--- /dev/null
+++ b/scripts/buildall-qemu
@@ -0,0 +1,120 @@
+#!/bin/sh
+# Copyright (c) 2020 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# buildall-qemu: a tool for automating build testing of recipes
+# TODO: Add support for selecting which qemu architectures to build
+# TODO: Add support for queueing up multiple recipe builds
+# TODO: Add more logging options (e.g. local.conf info, bitbake env info)
+
+usage ()
+{
+ base=$(basename "$0")
+ echo "Usage: $base [options] [recipename/target]"
+ echo "Executes a build of a given target for selected LIBCs. With no options, default to both libc and musl."
+ echo "Options:"
+ echo "-l, --libc Specify one of \"glibc\" or \"musl\""
+}
+
+
+buildall ()
+{
+ # Get path to oe-core directory. Since oe-init-build-env prepends $PATH with
+ # the path to the scripts directory, get it from there
+ SCRIPTS_PATH="$(echo "$PATH" | cut -d ":" -f 1)"
+ OE_CORE_PATH=$(echo "$SCRIPTS_PATH" | sed 's|\(.*\)/.*|\1|')
+
+ # Get target list and host machine information
+ TARGET_LIST=$(find "$OE_CORE_PATH"/meta/conf/machine -maxdepth 1 -type f | grep qemu | sed 's|.*/||' | sed -e 's/\.conf//')
+
+ # Set LIBC value to use for the builds based on options provided by the user
+ if [ -n "$2" ]
+ then
+ LIBC_LIST="$2"
+ echo "$LIBC_LIST"
+ else
+ LIBC_LIST="glibc musl"
+ echo "$LIBC_LIST"
+ fi
+
+ START_TIME=$(date "+%Y-%m-%d_%H:%M:%S")
+ LOG_FILE="$1-buildall.log"
+ OS_INFO=$(grep "PRETTY_NAME=" /etc/os-release | awk -F "=" '{print $2}' | sed -e 's/^"//' -e 's/"$//')
+
+ # Append an existing log file for this build with .old if one exists
+ if [ -f "${LOG_FILE}" ]
+ then
+ mv "${LOG_FILE}" "${LOG_FILE}.old"
+ else
+ touch "${LOG_FILE}"
+ fi
+
+ # Fill the log file with build and host info
+ echo "BUILDALL-QEMU LOG FOR $1" >> "${LOG_FILE}"
+ echo "START TIME: ${START_TIME}" >> "${LOG_FILE}"
+ echo "HOSTNAME: $(uname -n)" >> "${LOG_FILE}"
+ echo "HOST OS: ${OS_INFO}" >> "${LOG_FILE}"
+ echo "HOST KERNEL: $(uname -r)" >> "${LOG_FILE}"
+ echo "===============" >> "${LOG_FILE}"
+ echo "BUILD RESULTS:" >> "${LOG_FILE}"
+
+ # start the builds for each MACHINE and TCLIBC
+ for j in ${LIBC_LIST}
+ do
+ echo "[$j]" >> "${LOG_FILE}"
+ for i in ${TARGET_LIST}
+ do
+ echo "$i" "$j"; \
+ TCLIBC=$j MACHINE=$i bitbake "$1" && echo "PASS: $i" >> "${LOG_FILE}" || echo "FAIL: $i" >> "${LOG_FILE}"
+ done
+ done
+
+ # Get pass/fail totals and add them to the end of the log
+ PASSED=$(grep "PASS:" "${LOG_FILE}" | wc -l)
+ FAILED=$(grep "FAIL:" "${LOG_FILE}" | wc -l)
+
+ echo "===============" >> "${LOG_FILE}"
+ echo "PASSED: ${PASSED}" >> "${LOG_FILE}"
+ echo "FAILED: ${FAILED}" >> "${LOG_FILE}"
+}
+
+
+# fail entire script if any command fails
+set -e
+
+# print usage and exit if not enough args given
+[ $# -eq 0 ] && usage && exit 1
+
+# handle arguments
+RECIPE=
+while [ $# -gt 0 ]
+do
+ arg=$1
+ case $arg in
+ -l|--libc)
+ if [ "$2" = "glibc" ] || [ "$2" = "musl" ]
+ then
+ LIBC_LIST="$2"
+ else
+ echo "Unrecognized libc option."
+ usage && exit 1
+ fi
+ shift
+ shift
+ ;;
+ *)
+ RECIPE="$1"
+ shift
+ ;;
+ esac
+done
+
+set -- "$RECIPE"
+
+# run buildall for the given recipe and LIBC
+if [ -n "$1" ]
+then
+ buildall "$1" "$LIBC_LIST"
+fi
+
diff --git a/scripts/buildhistory-collect-srcrevs b/scripts/buildhistory-collect-srcrevs
index d375b045d8..c937e49c2a 100755
--- a/scripts/buildhistory-collect-srcrevs
+++ b/scripts/buildhistory-collect-srcrevs
@@ -5,18 +5,8 @@
# Copyright 2013 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import os
@@ -63,12 +53,13 @@ def main():
sys.exit(1)
if options.forcevariable:
- forcevariable = '_forcevariable'
+ forcevariable = ':forcevariable'
else:
forcevariable = ''
all_srcrevs = collections.defaultdict(list)
for root, dirs, files in os.walk(options.buildhistory_dir):
+ dirs.sort()
if '.git' in dirs:
dirs.remove('.git')
for fn in files:
@@ -108,9 +99,9 @@ def main():
print('# %s' % curdir)
for pn, name, srcrev in srcrevs:
if name:
- print('SRCREV_%s_pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
+ print('SRCREV_%s:pn-%s%s = "%s"' % (name, pn, forcevariable, srcrev))
else:
- print('SRCREV_pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
+ print('SRCREV:pn-%s%s = "%s"' % (pn, forcevariable, srcrev))
if __name__ == "__main__":
diff --git a/scripts/buildhistory-diff b/scripts/buildhistory-diff
index 70805b0678..a6e785aa23 100755
--- a/scripts/buildhistory-diff
+++ b/scripts/buildhistory-diff
@@ -4,11 +4,13 @@
#
# Copyright (C) 2013 Intel Corporation
# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
import os
import argparse
-from distutils.version import LooseVersion
# Ensure PythonGit is installed (buildhistory_analysis needs it)
try:
@@ -25,10 +27,12 @@ def get_args_parser():
%(prog)s [options] [from-revision [to-revision]]
(if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""")
+ default_dir = os.path.join(os.environ.get('BUILDDIR', '.'), 'buildhistory')
+
parser.add_argument('-p', '--buildhistory-dir',
action='store',
dest='buildhistory_dir',
- default='buildhistory/',
+ default=default_dir,
help="Specify path to buildhistory directory (defaults to buildhistory/ under cwd)")
parser.add_argument('-v', '--report-version',
action='store_true',
@@ -68,20 +72,11 @@ def main():
parser = get_args_parser()
args = parser.parse_args()
- if LooseVersion(git.__version__) < '0.3.1':
- sys.stderr.write("Version of GitPython is too old, please install GitPython (python-git) 0.3.1 or later in order to use this script\n")
- sys.exit(1)
-
if len(args.revisions) > 2:
sys.stderr.write('Invalid argument(s) specified: %s\n\n' % ' '.join(args.revisions[2:]))
parser.print_help()
sys.exit(1)
- if not os.path.exists(args.buildhistory_dir):
- if args.buildhistory_dir == 'buildhistory/':
- cwd = os.getcwd()
- if os.path.basename(cwd) == 'buildhistory':
- args.buildhistory_dir = cwd
if not os.path.exists(args.buildhistory_dir):
sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % args.buildhistory_dir)
diff --git a/scripts/buildstats-diff b/scripts/buildstats-diff
index a128dd324f..c9aa76a8fa 100755
--- a/scripts/buildstats-diff
+++ b/scripts/buildstats-diff
@@ -1,18 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Script for comparing buildstats from two different builds
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import glob
import logging
@@ -120,7 +114,7 @@ def print_ver_diff(bs1, bs2):
print(fmt_str.format(name, field1, field2, maxlen=maxlen))
-def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',)):
+def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absdiff',), only_tasks=[]):
"""Diff task execution times"""
def val_to_str(val, human_readable=False):
"""Convert raw value to printable string"""
@@ -157,8 +151,9 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
"""Get cumulative sum of all tasks"""
total = 0.0
for recipe_data in buildstats.values():
- for bs_task in recipe_data.tasks.values():
- total += getattr(bs_task, val_type)
+ for name, bs_task in recipe_data.tasks.items():
+ if not only_tasks or name in only_tasks:
+ total += getattr(bs_task, val_type)
return total
if min_val:
@@ -169,7 +164,7 @@ def print_task_diff(bs1, bs2, val_type, min_val=0, min_absdiff=0, sort_by=('absd
val_to_str(min_absdiff, True), val_to_str(min_absdiff)))
# Prepare the data
- tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff)
+ tasks_diff = diff_buildstats(bs1, bs2, val_type, min_val, min_absdiff, only_tasks)
# Sort our list
for field in reversed(sort_by):
@@ -254,6 +249,8 @@ Script for comparing buildstats of two separate builds."""
parser.add_argument('--multi', action='store_true',
help="Read all buildstats from the given paths and "
"average over them")
+ parser.add_argument('--only-task', dest='only_tasks', metavar='TASK', action='append', default=[],
+ help="Only include TASK in report. May be specified multiple times")
parser.add_argument('buildstats1', metavar='BUILDSTATS1', help="'Left' buildstat")
parser.add_argument('buildstats2', metavar='BUILDSTATS2', help="'Right' buildstat")
@@ -272,7 +269,6 @@ Script for comparing buildstats of two separate builds."""
return args
-
def main(argv=None):
"""Script entry point"""
args = parse_args(argv)
@@ -296,7 +292,7 @@ def main(argv=None):
print_ver_diff(bs1, bs2)
else:
print_task_diff(bs1, bs2, args.diff_attr, args.min_val,
- args.min_absdiff, sort_by)
+ args.min_absdiff, sort_by, args.only_tasks)
except ScriptError as err:
log.error(str(err))
return 1
diff --git a/scripts/buildstats-summary b/scripts/buildstats-summary
new file mode 100755
index 0000000000..b10c671b29
--- /dev/null
+++ b/scripts/buildstats-summary
@@ -0,0 +1,126 @@
+#!/usr/bin/env python3
+#
+# Dump a summary of the specified buildstats to the terminal, filtering and
+# sorting by walltime.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+
+import argparse
+import dataclasses
+import datetime
+import enum
+import os
+import pathlib
+import sys
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, "lib"))
+import buildstats
+
+
+@dataclasses.dataclass
+class Task:
+ recipe: str
+ task: str
+ start: datetime.datetime
+ duration: datetime.timedelta
+
+
+class Sorting(enum.Enum):
+ start = 1
+ duration = 2
+
+ # argparse integration
+ def __str__(self) -> str:
+ return self.name
+
+ def __repr__(self) -> str:
+ return self.name
+
+ @staticmethod
+ def from_string(s: str):
+ try:
+ return Sorting[s]
+ except KeyError:
+ return s
+
+
+def read_buildstats(path: pathlib.Path) -> buildstats.BuildStats:
+ if not path.exists():
+ raise Exception(f"No such file or directory: {path}")
+ if path.is_file():
+ return buildstats.BuildStats.from_file_json(path)
+ if (path / "build_stats").is_file():
+ return buildstats.BuildStats.from_dir(path)
+ raise Exception(f"Cannot find buildstats in {path}")
+
+
+def dump_buildstats(args, bs: buildstats.BuildStats):
+ tasks = []
+ for recipe in bs.values():
+ for task, stats in recipe.tasks.items():
+ t = Task(
+ recipe.name,
+ task,
+ datetime.datetime.fromtimestamp(stats["start_time"]),
+ datetime.timedelta(seconds=int(stats.walltime)),
+ )
+ tasks.append(t)
+
+ tasks.sort(key=lambda t: getattr(t, args.sort.name))
+
+ minimum = datetime.timedelta(seconds=args.shortest)
+ highlight = datetime.timedelta(seconds=args.highlight)
+
+ for t in tasks:
+ if t.duration >= minimum:
+ line = f"{t.duration} {t.recipe}:{t.task}"
+ if args.highlight and t.duration >= highlight:
+ print(f"\033[1m{line}\033[0m")
+ else:
+ print(line)
+
+
+def main(argv=None) -> int:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ parser.add_argument(
+ "buildstats", metavar="BUILDSTATS", help="Buildstats file", type=pathlib.Path
+ )
+ parser.add_argument(
+ "--sort",
+ "-s",
+ type=Sorting.from_string,
+ choices=list(Sorting),
+ default=Sorting.start,
+ help="Sort tasks",
+ )
+ parser.add_argument(
+ "--shortest",
+ "-t",
+ type=int,
+ default=1,
+ metavar="SECS",
+ help="Hide tasks shorter than SECS seconds",
+ )
+ parser.add_argument(
+ "--highlight",
+ "-g",
+ type=int,
+ default=60,
+ metavar="SECS",
+ help="Highlight tasks longer than SECS seconds (0 disabled)",
+ )
+
+ args = parser.parse_args(argv)
+
+ bs = read_buildstats(args.buildstats)
+ dump_buildstats(args, bs)
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/combo-layer b/scripts/combo-layer
index d04d88b070..4a715914af 100755
--- a/scripts/combo-layer
+++ b/scripts/combo-layer
@@ -7,18 +7,8 @@
# Paul Eggleton <paul.eggleton@intel.com>
# Richard Purdie <richard.purdie@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import fnmatch
import os, sys
@@ -29,9 +19,8 @@ import tempfile
import configparser
import re
import copy
-import pipes
+import shlex
import shutil
-from collections import OrderedDict
from string import Template
from functools import reduce
@@ -90,7 +79,7 @@ class Configuration(object):
logger.debug("Loading config file %s" % self.conffile)
self.parser = configparser.ConfigParser()
with open(self.conffile) as f:
- self.parser.readfp(f)
+ self.parser.read_file(f)
# initialize default values
self.commit_msg_template = "Automatic commit to update last_revision"
@@ -202,6 +191,23 @@ def runcmd(cmd,destdir=None,printerr=True,out=None,env=None):
logger.debug("output: %s" % output.replace(chr(0), '\\0'))
return output
+def action_sync_revs(conf, args):
+ """
+ Update the last_revision config option for each repo with the latest
+ revision in the remote's branch. Useful if multiple people are using
+ combo-layer.
+ """
+ repos = get_repos(conf, args[1:])
+
+ for name in repos:
+ repo = conf.repos[name]
+ ldir = repo['local_repo_dir']
+ branch = repo.get('branch', "master")
+ runcmd("git fetch", ldir)
+ lastrev = runcmd('git rev-parse origin/%s' % branch, ldir).strip()
+ print("Updating %s to %s" % (name, lastrev))
+ conf.update(name, "last_revision", lastrev)
+
def action_init(conf, args):
"""
Clone component repositories
@@ -246,7 +252,7 @@ def action_init(conf, args):
# traditional behavior from "git archive" (preserved
# here) it to choose the first one. This might not be
# intended, so at least warn about it.
- logger.warn("%s: initial revision '%s' not unique, picking result of rev-parse = %s" %
+ logger.warning("%s: initial revision '%s' not unique, picking result of rev-parse = %s" %
(name, initialrev, refs[0]))
initialrev = rev
except:
@@ -477,7 +483,7 @@ def check_repo_clean(repodir):
exit if repo is dirty
"""
output=runcmd("git status --porcelain", repodir)
- r = re.compile('\?\? patch-.*/')
+ r = re.compile(r'\?\? patch-.*/')
dirtyout = [item for item in output.splitlines() if not r.match(item)]
if dirtyout:
logger.error("git repo %s is dirty, please fix it first", repodir)
@@ -518,7 +524,7 @@ def check_patch(patchfile):
f.close()
if of:
of.close()
- os.rename(patchfile + '.tmp', patchfile)
+ os.rename(of.name, patchfile)
def drop_to_shell(workdir=None):
if not sys.stdin.isatty():
@@ -1269,7 +1275,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
target = os.path.join(wargs["destdir"], dest_dir)
if not os.path.isdir(target):
os.makedirs(target)
- quoted_target = pipes.quote(target)
+ quoted_target = shlex.quote(target)
# os.sysconf('SC_ARG_MAX') is lying: running a command with
# string length 629343 already failed with "Argument list too
# long" although SC_ARG_MAX = 2097152. "man execve" explains
@@ -1281,7 +1287,7 @@ def apply_commit(parent, rev, largs, wargs, dest_dir, file_filter=None):
unquoted_args = []
cmdsize = 100 + len(quoted_target)
while update:
- quoted_next = pipes.quote(update[0])
+ quoted_next = shlex.quote(update[0])
size_next = len(quoted_next) + len(dest_dir) + 1
logger.debug('cmdline length %d + %d < %d?' % (cmdsize, size_next, os.sysconf('SC_ARG_MAX')))
if cmdsize + size_next < max_cmdsize:
@@ -1312,6 +1318,7 @@ actions = {
"update": action_update,
"pull": action_pull,
"splitpatch": action_splitpatch,
+ "sync-revs": action_sync_revs,
}
def main():
@@ -1322,10 +1329,11 @@ def main():
Create and update a combination layer repository from multiple component repositories.
Action:
- init initialise the combo layer repo
- update [components] get patches from component repos and apply them to the combo repo
- pull [components] just pull component repos only
- splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
+ init initialise the combo layer repo
+ update [components] get patches from component repos and apply them to the combo repo
+ pull [components] just pull component repos only
+ sync-revs [components] update the config file's last_revision for each repository
+ splitpatch [commit] generate commit patch and split per component, default commit is HEAD""")
parser.add_option("-c", "--conf", help = "specify the config file (conf/combo-layer.conf is the default).",
action = "store", dest = "conffile", default = "conf/combo-layer.conf")
diff --git a/scripts/combo-layer-hook-default.sh b/scripts/combo-layer-hook-default.sh
index 1e3a3b9bc8..fb9651b31f 100755
--- a/scripts/combo-layer-hook-default.sh
+++ b/scripts/combo-layer-hook-default.sh
@@ -1,4 +1,9 @@
#!/bin/sh
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Hook to add source component/revision info to commit message
# Parameter:
# $1 patch-file
diff --git a/scripts/contrib/bb-perf/bb-matrix-plot.sh b/scripts/contrib/bb-perf/bb-matrix-plot.sh
index 136a25570d..6672189c95 100755
--- a/scripts/contrib/bb-perf/bb-matrix-plot.sh
+++ b/scripts/contrib/bb-perf/bb-matrix-plot.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script operates on the .dat file generated by bb-matrix.sh. It tolerates
@@ -29,8 +16,8 @@
# Setup the defaults
DATFILE="bb-matrix.dat"
-XLABEL="BB_NUMBER_THREADS"
-YLABEL="PARALLEL_MAKE"
+XLABEL="BB\\\\_NUMBER\\\\_THREADS"
+YLABEL="PARALLEL\\\\_MAKE"
FIELD=3
DEF_TITLE="Elapsed Time (seconds)"
PM3D_FRAGMENT="unset surface; set pm3d at s hidden3d 100"
diff --git a/scripts/contrib/bb-perf/bb-matrix.sh b/scripts/contrib/bb-perf/bb-matrix.sh
index 106456584d..b1fff0f344 100755
--- a/scripts/contrib/bb-perf/bb-matrix.sh
+++ b/scripts/contrib/bb-perf/bb-matrix.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script runs BB_CMD (typically building core-image-sato) for all
diff --git a/scripts/contrib/bb-perf/buildstats-plot.sh b/scripts/contrib/bb-perf/buildstats-plot.sh
index 7e8ae0410e..45c27d0b97 100755
--- a/scripts/contrib/bb-perf/buildstats-plot.sh
+++ b/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -1,21 +1,8 @@
#!/usr/bin/env bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
#
@@ -52,7 +39,10 @@ set -o errexit
BS_DIR="tmp/buildstats"
N=10
+RECIPE=""
+TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="utime"
+ACCUMULATE=""
SUM=""
OUTDATA_FILE="$PWD/buildstats-plot.out"
@@ -64,11 +54,15 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
(default: "$BS_DIR")
-n N Top N recipes to display. Ignored if -S is present
(default: "$N")
+ -r recipe The recipe mask to be searched
+ -t tasks The tasks to be computed
+ (default: "$TASKS")
-s stats The stats to be matched. If more that one stat, units
should be the same because data is plot as histogram.
(see buildstats.sh -h for all options) or any other defined
(build)stat separated by colons, i.e. stime:utime
(default: "$STATS")
+ -a Accumulate all stats values for found recipes
-S Sum values for a particular stat for found recipes
-o Output data file.
(default: "$OUTDATA_FILE")
@@ -77,32 +71,41 @@ EOM
}
# Parse and validate arguments
-while getopts "b:n:s:o:Sh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- n)
- N="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- S)
- SUM="y"
- ;;
- o)
- OUTDATA_FILE="$OPTARG"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:n:r:t:s:o:aSh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ n)
+ N="$OPTARG"
+ ;;
+ r)
+ RECIPE="-r $OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="-a"
+ ;;
+ S)
+ SUM="y"
+ ;;
+ o)
+ OUTDATA_FILE="$OPTARG"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Get number of stats
@@ -114,10 +117,10 @@ CD=$(dirname $0)
# Parse buildstats recipes to produce a single table
OUTBUILDSTATS="$PWD/buildstats.log"
-$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+$CD/buildstats.sh -b "$BS_DIR" -s "$STATS" -t "$TASKS" $RECIPE $ACCUMULATE -H > $OUTBUILDSTATS
# Get headers
-HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e 's/\(.*\)/"\1"/' -e '1s/ /\\\\\\\\ /g' -e 's/_/\\\\\\\\_/g' -e '1s/:/" "/gp')
echo -e "set boxwidth 0.9 relative"
echo -e "set style data histograms"
@@ -126,7 +129,7 @@ echo -e "set xtics rotate by 45 right"
# Get output data
if [ -z "$SUM" ]; then
- cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+ cat $OUTBUILDSTATS | sed -e '1d' -e 's/_/\\\\_/g' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
# include task at recipe column
sed -i -e "1i\
${HEADERS}" $OUTDATA_FILE
@@ -138,8 +141,8 @@ else
declare -a sumargs
j=0
for i in `seq $nstats`; do
- sumargs[j]=sum; j=$(( $j + 1 ))
- sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
+ sumargs[j]=sum; j=$(( $j + 1 ))
+ sumargs[j]=`expr 3 + $i - 1`; j=$(( $j + 1 ))
done
# Do the processing with datamash
diff --git a/scripts/contrib/bb-perf/buildstats.sh b/scripts/contrib/bb-perf/buildstats.sh
index 8d7e2488f0..e45cfc146d 100755
--- a/scripts/contrib/bb-perf/buildstats.sh
+++ b/scripts/contrib/bb-perf/buildstats.sh
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# Given 'buildstats' data (generate by bitbake when setting
@@ -49,8 +36,10 @@ Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rus
Child rusage ru_nivcsw"
BS_DIR="tmp/buildstats"
+RECIPE=""
TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
STATS="$TIME"
+ACCUMULATE=""
HEADER="" # No header by default
function usage {
@@ -59,6 +48,7 @@ cat <<EOM
Usage: $CMD [-b buildstats_dir] [-t do_task]
-b buildstats The path where the folder resides
(default: "$BS_DIR")
+ -r recipe The recipe to be computed
-t tasks The tasks to be computed
(default: "$TASKS")
-s stats The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
@@ -69,87 +59,109 @@ Usage: $CMD [-b buildstats_dir] [-t do_task]
IO=$IO
RUSAGE=$RUSAGE
CHILD_RUSAGE=$CHILD_RUSAGE
+ -a Accumulate all stats values for found recipes
-h Display this help message
EOM
}
# Parse and validate arguments
-while getopts "b:t:s:Hh" OPT; do
- case $OPT in
- b)
- BS_DIR="$OPTARG"
- ;;
- t)
- TASKS="$OPTARG"
- ;;
- s)
- STATS="$OPTARG"
- ;;
- H)
- HEADER="y"
- ;;
- h)
- usage
- exit 0
- ;;
- *)
- usage
- exit 1
- ;;
- esac
+while getopts "b:r:t:s:aHh" OPT; do
+ case $OPT in
+ b)
+ BS_DIR="$OPTARG"
+ ;;
+ r)
+ RECIPE="$OPTARG"
+ ;;
+ t)
+ TASKS="$OPTARG"
+ ;;
+ s)
+ STATS="$OPTARG"
+ ;;
+ a)
+ ACCUMULATE="y"
+ ;;
+ H)
+ HEADER="y"
+ ;;
+ h)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
done
# Ensure the buildstats folder exists
if [ ! -d "$BS_DIR" ]; then
- echo "ERROR: $BS_DIR does not exist"
- usage
- exit 1
+ echo "ERROR: $BS_DIR does not exist"
+ usage
+ exit 1
fi
stats=""
IFS=":"
for stat in ${STATS}; do
- case $stat in
- TIME)
- stats="${stats}:${TIME}"
- ;;
- IO)
- stats="${stats}:${IO}"
- ;;
- RUSAGE)
- stats="${stats}:${RUSAGE}"
- ;;
- CHILD_RUSAGE)
- stats="${stats}:${CHILD_RUSAGE}"
- ;;
- *)
- stats="${STATS}"
- esac
+ case $stat in
+ TIME)
+ stats="${stats}:${TIME}"
+ ;;
+ IO)
+ stats="${stats}:${IO}"
+ ;;
+ RUSAGE)
+ stats="${stats}:${RUSAGE}"
+ ;;
+ CHILD_RUSAGE)
+ stats="${stats}:${CHILD_RUSAGE}"
+ ;;
+ *)
+ stats="${STATS}"
+ ;;
+ esac
done
# remove possible colon at the beginning
stats="$(echo "$stats" | sed -e 's/^://1')"
# Provide a header if required by the user
-[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
+if [ -n "$HEADER" ] ; then
+ if [ -n "$ACCUMULATE" ]; then
+ echo "task:recipe:accumulated(${stats//:/;})"
+ else
+ echo "task:recipe:$stats"
+ fi
+fi
for task in ${TASKS}; do
task="do_${task}"
- for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+ for file in $(find ${BS_DIR} -type f -path *${RECIPE}*/${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
recipe="$(basename $(dirname $file))"
- times=""
- for stat in ${stats}; do
- [ -z "$stat" ] && { echo "empty stats"; }
- time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
- # in case the stat is not present, set the value as NA
- [ -z "$time" ] && { time="NA"; }
- # Append it to times
- if [ -z "$times" ]; then
- times="${time}"
- else
- times="${times} ${time}"
- fi
- done
+ times=""
+ for stat in ${stats}; do
+ [ -z "$stat" ] && { echo "empty stats"; }
+ time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+ # in case the stat is not present, set the value as NA
+ [ -z "$time" ] && { time="NA"; }
+ # Append it to times
+ if [ -z "$times" ]; then
+ times="${time}"
+ else
+ times="${times} ${time}"
+ fi
+ done
+ if [ -n "$ACCUMULATE" ]; then
+ IFS=' '; valuesarray=(${times}); IFS=':'
+ times=0
+ for value in "${valuesarray[@]}"; do
+ [ "$value" == "NA" ] && { echo "ERROR: stat is not present."; usage; exit 1; }
+ times=$(( $times + $value ))
+ done
+ fi
echo "${task} ${recipe} ${times}"
done
done
diff --git a/scripts/contrib/bbvars.py b/scripts/contrib/bbvars.py
index 286b5a9405..a9cdf082ab 100755
--- a/scripts/contrib/bbvars.py
+++ b/scripts/contrib/bbvars.py
@@ -1,18 +1,6 @@
#!/usr/bin/env python3
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) Darren Hart <dvhart@linux.intel.com>, 2010
@@ -48,8 +36,8 @@ def bbvar_is_documented(var, documented_vars):
def collect_documented_vars(docfiles):
''' Walk the docfiles and collect the documented variables '''
documented_vars = []
- prog = re.compile(".*($|[^A-Z_])<glossentry id=\'var-")
- var_prog = re.compile('<glossentry id=\'var-(.*)\'>')
+ prog = re.compile(r".*($|[^A-Z_])<glossentry id=\'var-")
+ var_prog = re.compile(r'<glossentry id=\'var-(.*)\'>')
for d in docfiles:
with open(d) as f:
documented_vars += var_prog.findall(f.read())
@@ -57,7 +45,7 @@ def collect_documented_vars(docfiles):
return documented_vars
def bbvar_doctag(var, docconf):
- prog = re.compile('^%s\[doc\] *= *"(.*)"' % (var))
+ prog = re.compile(r'^%s\[doc\] *= *"(.*)"' % (var))
if docconf == "":
return "?"
diff --git a/scripts/contrib/build-perf-test-wrapper.sh b/scripts/contrib/build-perf-test-wrapper.sh
index 19bee1dd03..0a85e6e708 100755
--- a/scripts/contrib/build-perf-test-wrapper.sh
+++ b/scripts/contrib/build-perf-test-wrapper.sh
@@ -4,15 +4,7 @@
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
+# SPDX-License-Identifier: GPL-2.0-only
#
# This script is a simple wrapper around the actual build performance tester
# script. This script initializes the build environment, runs
@@ -33,7 +25,9 @@ Optional arguments:
-c COMMITISH test (checkout) this commit, <branch>:<commit> can be
specified to test specific commit of certain branch
-C GIT_REPO commit results into Git
+ -d DOWNLOAD_DIR directory to store downloaded sources in
-E EMAIL_ADDR send email report
+ -g GLOBALRES_DIR where to place the globalres file
-P GIT_REMOTE push results to a remote Git repository
-R DEST rsync reports to a remote destination
-w WORK_DIR work dir for this script
@@ -51,19 +45,26 @@ get_os_release_var () {
commitish=""
oe_build_perf_test_extra_opts=()
oe_git_archive_extra_opts=()
-while getopts "ha:c:C:E:P:R:w:x" opt; do
+while getopts "ha:c:C:d:E:g:P:R:w:x" opt; do
case $opt in
h) usage
exit 0
;;
- a) archive_dir=`realpath -s "$OPTARG"`
+ a) mkdir -p "$OPTARG"
+ archive_dir=`realpath -s "$OPTARG"`
;;
c) commitish=$OPTARG
;;
- C) results_repo=`realpath -s "$OPTARG"`
+ C) mkdir -p "$OPTARG"
+ results_repo=`realpath -s "$OPTARG"`
+ ;;
+ d) download_dir=`realpath -s "$OPTARG"`
;;
E) email_to="$OPTARG"
;;
+ g) mkdir -p "$OPTARG"
+ globalres_dir=`realpath -s "$OPTARG"`
+ ;;
P) oe_git_archive_extra_opts+=("--push" "$OPTARG")
;;
R) rsync_dst="$OPTARG"
@@ -89,7 +90,7 @@ fi
# Open a file descriptor for flock and acquire lock
LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
if ! exec 3> "$LOCK_FILE"; then
- echo "ERROR: Unable to open lock file"
+ echo "ERROR: Unable to open loemack file"
exit 1
fi
if ! flock -n 3; then
@@ -146,11 +147,18 @@ if [ -z "$base_dir" ]; then
fi
echo "Using working dir $base_dir"
+if [ -z "$download_dir" ]; then
+ download_dir="$base_dir/downloads"
+fi
+if [ -z "$globalres_dir" ]; then
+ globalres_dir="$base_dir"
+fi
+
timestamp=`date "+%Y%m%d%H%M%S"`
git_rev=$(git rev-parse --short HEAD) || exit 1
build_dir="$base_dir/build-$git_rev-$timestamp"
results_dir="$base_dir/results-$git_rev-$timestamp"
-globalres_log="$base_dir/globalres.log"
+globalres_log="$globalres_dir/globalres.log"
machine="qemux86"
mkdir -p "$base_dir"
@@ -161,7 +169,7 @@ auto_conf="$build_dir/conf/auto.conf"
echo "MACHINE = \"$machine\"" > "$auto_conf"
echo 'BB_NUMBER_THREADS = "8"' >> "$auto_conf"
echo 'PARALLEL_MAKE = "-j 8"' >> "$auto_conf"
-echo "DL_DIR = \"$base_dir/downloads\"" >> "$auto_conf"
+echo "DL_DIR = \"$download_dir\"" >> "$auto_conf"
# Disabling network sanity check slightly reduces the variance of timing results
echo 'CONNECTIVITY_CHECK_URIS = ""' >> "$auto_conf"
# Possibility to define extra settings
@@ -207,7 +215,7 @@ if [ -n "$results_repo" ]; then
if [ -n "$email_to" ]; then
echo "Emailing test report"
os_name=`get_os_release_var PRETTY_NAME`
- "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt --html $report_html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+ "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text $report_txt "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
fi
# Upload report files, unless we're on detached head
diff --git a/scripts/contrib/build-perf-test.sh b/scripts/contrib/build-perf-test.sh
deleted file mode 100755
index 9a091edb0a..0000000000
--- a/scripts/contrib/build-perf-test.sh
+++ /dev/null
@@ -1,400 +0,0 @@
-#!/bin/bash
-#
-# This script runs a series of tests (with and without sstate) and reports build time (and tmp/ size)
-#
-# Build performance test script
-#
-# Copyright 2013 Intel Corporation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-#
-# AUTHORS:
-# Stefan Stanacar <stefanx.stanacar@intel.com>
-
-
-ME=$(basename $0)
-
-#
-# usage and setup
-#
-
-usage () {
-cat << EOT
-Usage: $ME [-h]
- $ME [-c <commit>] [-v] [-m <val>] [-j <val>] [-t <val>] [-i <image-name>] [-d <path>]
-Options:
- -h
- Display this help and exit.
- -c <commit>
- git checkout <commit> before anything else
- -v
- Show bitbake output, don't redirect it to a log.
- -m <machine>
- Value for MACHINE. Default is qemux86.
- -j <val>
- Value for PARALLEL_MAKE. Default is 8.
- -t <val>
- Value for BB_NUMBER_THREADS. Default is 8.
- -i <image-name>
- Instead of timing against core-image-sato, use <image-name>
- -d <path>
- Use <path> as DL_DIR
- -p <githash>
- Cherry pick githash onto the commit
-
-Note: current working directory must be inside a poky git clone.
-
-EOT
-}
-
-
-if clonedir=$(git rev-parse --show-toplevel); then
- cd $clonedir
-else
- echo "The current working dir doesn't seem to be a poky git clone. Please cd there before running $ME"
- exit 1
-fi
-
-IMAGE="core-image-sato"
-verbose=0
-dldir=
-commit=
-pmake=
-cherrypicks=
-while getopts "hvc:m:j:t:i:d:p:" opt; do
- case $opt in
- h) usage
- exit 0
- ;;
- v) verbose=1
- ;;
- c) commit=$OPTARG
- ;;
- m) export MACHINE=$OPTARG
- ;;
- j) pmake=$OPTARG
- ;;
- t) export BB_NUMBER_THREADS=$OPTARG
- ;;
- i) IMAGE=$OPTARG
- ;;
- d) dldir=$OPTARG
- ;;
- p) cherrypicks="$cherrypicks $OPTARG"
- ;;
- *) usage
- exit 1
- ;;
- esac
-done
-
-
-#drop cached credentials and test for sudo access without a password
-sudo -k -n ls > /dev/null 2>&1
-reqpass=$?
-if [ $reqpass -ne 0 ]; then
- echo "The script requires sudo access to drop caches between builds (echo 3 > /proc/sys/vm/drop_caches)"
- read -s -p "Please enter your sudo password: " pass
- echo
-fi
-
-if [ -n "$commit" ]; then
- echo "git checkout -f $commit"
- git pull > /dev/null 2>&1
- git checkout -f $commit || exit 1
- git pull > /dev/null 2>&1
-fi
-
-if [ -n "$cherrypicks" ]; then
- for c in $cherrypicks; do
- git cherry-pick $c
- done
-fi
-
-rev=$(git rev-parse --short HEAD) || exit 1
-OUTDIR="$clonedir/build-perf-test/results-$rev-`date "+%Y%m%d%H%M%S"`"
-BUILDDIR="$OUTDIR/build"
-resultsfile="$OUTDIR/results.log"
-cmdoutput="$OUTDIR/commands.log"
-myoutput="$OUTDIR/output.log"
-globalres="$clonedir/build-perf-test/globalres.log"
-
-mkdir -p $OUTDIR || exit 1
-
-log () {
- local msg="$1"
- echo "`date`: $msg" | tee -a $myoutput
-}
-
-
-#
-# Config stuff
-#
-
-branch=`git branch 2>&1 | grep "^* " | tr -d "* "`
-gitcommit=$(git rev-parse HEAD) || exit 1
-log "Running on $branch:$gitcommit"
-
-source ./oe-init-build-env $OUTDIR/build >/dev/null || exit 1
-cd $OUTDIR/build
-
-[ -n "$MACHINE" ] || export MACHINE="qemux86"
-[ -n "$BB_NUMBER_THREADS" ] || export BB_NUMBER_THREADS="8"
-
-if [ -n "$pmake" ]; then
- export PARALLEL_MAKE="-j $pmake"
-else
- export PARALLEL_MAKE="-j 8"
-fi
-
-if [ -n "$dldir" ]; then
- echo "DL_DIR = \"$dldir\"" >> conf/local.conf
-else
- echo "DL_DIR = \"$clonedir/build-perf-test/downloads\"" >> conf/local.conf
-fi
-
-# Sometimes I've noticed big differences in timings for the same commit, on the same machine
-# Disabling the network sanity check helps a bit (because of my crappy network connection and/or proxy)
-echo "CONNECTIVITY_CHECK_URIS =\"\"" >> conf/local.conf
-
-
-#
-# Functions
-#
-
-declare -a TIMES
-time_count=0
-declare -a SIZES
-size_count=0
-
-time_cmd () {
- log " Timing: $*"
-
- if [ $verbose -eq 0 ]; then
- /usr/bin/time -v -o $resultsfile "$@" >> $cmdoutput
- else
- /usr/bin/time -v -o $resultsfile "$@"
- fi
- ret=$?
- if [ $ret -eq 0 ]; then
- t=`grep wall $resultsfile | sed 's/.*m:ss): //'`
- log " TIME: $t"
- TIMES[(( time_count++ ))]="$t"
- else
- log "ERROR: exit status was non-zero, will report time as 0."
- TIMES[(( time_count++ ))]="0"
- fi
-
- #time by default overwrites the output file and we want to keep the results
- #it has an append option but I don't want to clobber the results in the same file
- i=`ls $OUTDIR/results.log* |wc -l`
- mv $resultsfile "${resultsfile}.${i}"
- log "More stats can be found in ${resultsfile}.${i}"
-}
-
-bbtime () {
- time_cmd bitbake "$@"
-}
-
-#we don't time bitbake here
-bbnotime () {
- local arg="$@"
- log " Running: bitbake ${arg}"
- if [ $verbose -eq 0 ]; then
- bitbake ${arg} >> $cmdoutput
- else
- bitbake ${arg}
- fi
- ret=$?
- if [ $ret -eq 0 ]; then
- log " Finished bitbake ${arg}"
- else
- log "ERROR: exit status was non-zero. Exit.."
- exit $ret
- fi
-
-}
-
-do_rmtmp() {
- log " Removing tmp"
- rm -rf bitbake.lock pseudodone conf/sanity_info cache tmp
-}
-do_rmsstate () {
- log " Removing sstate-cache"
- rm -rf sstate-cache
-}
-do_sync () {
- log " Syncing and dropping caches"
- sync; sync
- if [ $reqpass -eq 0 ]; then
- sudo sh -c "echo 3 > /proc/sys/vm/drop_caches"
- else
- echo "$pass" | sudo -S sh -c "echo 3 > /proc/sys/vm/drop_caches"
- echo
- fi
- sleep 3
-}
-
-write_results() {
- echo -n "`uname -n`,$branch:$gitcommit,`git describe`," >> $globalres
- for i in "${TIMES[@]}"; do
- echo -n "$i," >> $globalres
- done
- for i in "${SIZES[@]}"; do
- echo -n "$i," >> $globalres
- done
- echo >> $globalres
- sed -i '$ s/,$//' $globalres
-}
-
-####
-
-#
-# Test 1
-# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir (w/o rm_work and w/ rm_work)
-# Pre: Downloaded sources, no sstate
-# Steps:
-# Part1:
-# - fetchall
-# - clean build dir
-# - time bitbake core-image-sato
-# - collect data
-# Part2:
-# - bitbake virtual/kernel -c cleansstate
-# - time bitbake virtual/kernel
-# Part3:
-# - add INHERIT to local.conf
-# - clean build dir
-# - build
-# - report size, remove INHERIT
-
-test1_p1 () {
- log "Running Test 1, part 1/3: Measure wall clock of bitbake $IMAGE and size of tmp/ dir"
- bbnotime $IMAGE --runall=fetch
- do_rmtmp
- do_rmsstate
- do_sync
- bbtime $IMAGE
- s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
- SIZES[(( size_count++ ))]="$s"
- log "SIZE of tmp dir is: $s"
- log "Buildstats are saved in $OUTDIR/buildstats-test1"
- mv tmp/buildstats $OUTDIR/buildstats-test1
-}
-
-
-test1_p2 () {
- log "Running Test 1, part 2/3: bitbake virtual/kernel -c cleansstate and time bitbake virtual/kernel"
- bbnotime virtual/kernel -c cleansstate
- do_sync
- bbtime virtual/kernel
-}
-
-test1_p3 () {
- log "Running Test 1, part 3/3: Build $IMAGE w/o sstate and report size of tmp/dir with rm_work enabled"
- echo "INHERIT += \"rm_work\"" >> conf/local.conf
- do_rmtmp
- do_rmsstate
- do_sync
- bbtime $IMAGE
- sed -i 's/INHERIT += \"rm_work\"//' conf/local.conf
- s=`du -s tmp | sed 's/tmp//' | sed 's/[ \t]*$//'`
- SIZES[(( size_count++ ))]="$s"
- log "SIZE of tmp dir is: $s"
- log "Buildstats are saved in $OUTDIR/buildstats-test13"
- mv tmp/buildstats $OUTDIR/buildstats-test13
-}
-
-
-#
-# Test 2
-# Measure: Wall clock of "bitbake core-image-sato" and size of tmp/dir
-# Pre: populated sstate cache
-
-test2 () {
- # Assuming test 1 has run
- log "Running Test 2: Measure wall clock of bitbake $IMAGE -c rootfs with sstate"
- do_rmtmp
- do_sync
- bbtime $IMAGE -c rootfs
-}
-
-
-# Test 3
-# parsing time metrics
-#
-# Start with
-# i) "rm -rf tmp/cache; time bitbake -p"
-# ii) "rm -rf tmp/cache/default-glibc/; time bitbake -p"
-# iii) "time bitbake -p"
-
-
-test3 () {
- log "Running Test 3: Parsing time metrics (bitbake -p)"
- log " Removing tmp/cache && cache"
- rm -rf tmp/cache cache
- bbtime -p
- log " Removing tmp/cache/default-glibc/"
- rm -rf tmp/cache/default-glibc/
- bbtime -p
- bbtime -p
-}
-
-#
-# Test 4 - eSDK
-# Measure: eSDK size and installation time
-test4 () {
- log "Running Test 4: eSDK size and installation time"
- bbnotime $IMAGE -c do_populate_sdk_ext
-
- esdk_installer=(tmp/deploy/sdk/*-toolchain-ext-*.sh)
-
- if [ ${#esdk_installer[*]} -eq 1 ]; then
- s=$((`stat -c %s "$esdk_installer"` / 1024))
- SIZES[(( size_count++ ))]="$s"
- log "Download SIZE of eSDK is: $s kB"
-
- do_sync
- time_cmd "$esdk_installer" -y -d "tmp/esdk-deploy"
-
- s=$((`du -sb "tmp/esdk-deploy" | cut -f1` / 1024))
- SIZES[(( size_count++ ))]="$s"
- log "Install SIZE of eSDK is: $s kB"
- else
- log "ERROR: other than one sdk found (${esdk_installer[*]}), reporting size and time as 0."
- SIZES[(( size_count++ ))]="0"
- TIMES[(( time_count++ ))]="0"
- fi
-
-}
-
-
-# RUN!
-
-test1_p1
-test1_p2
-test1_p3
-test2
-test3
-test4
-
-# if we got til here write to global results
-write_results
-
-log "All done, cleaning up..."
-
-do_rmtmp
-do_rmsstate
diff --git a/scripts/contrib/convert-overrides.py b/scripts/contrib/convert-overrides.py
new file mode 100755
index 0000000000..c69acb4095
--- /dev/null
+++ b/scripts/contrib/convert-overrides.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python3
+#
+# Conversion script to add new override syntax to existing bitbake metadata
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+#
+# To use this script on a new layer you need to list the overrides the
+# layer is known to use in the list below.
+#
+# Known constraint: Matching is 'loose' and in particular will find variable
+# and function names with "_append" and "_remove" in them. Those need to be
+# filtered out manually or in the skip list below.
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+import argparse
+
+parser = argparse.ArgumentParser(description="Convert override syntax")
+parser.add_argument("--override", "-o", action="append", default=[], help="Add additional strings to consider as an override (e.g. custom machines/distros")
+parser.add_argument("--skip", "-s", action="append", default=[], help="Add additional string to skip and not consider an override")
+parser.add_argument("--skip-ext", "-e", action="append", default=[], help="Additional file suffixes to skip when processing (e.g. '.foo')")
+parser.add_argument("--package-vars", action="append", default=[], help="Additional variables to treat as package variables")
+parser.add_argument("--image-vars", action="append", default=[], help="Additional variables to treat as image variables")
+parser.add_argument("--short-override", action="append", default=[], help="Additional strings to treat as short overrides")
+parser.add_argument("path", nargs="+", help="Paths to convert")
+
+args = parser.parse_args()
+
+# List of strings to treat as overrides
+vars = args.override
+vars += ["append", "prepend", "remove"]
+vars += ["qemuarm", "qemux86", "qemumips", "qemuppc", "qemuriscv", "qemuall"]
+vars += ["genericx86", "edgerouter", "beaglebone-yocto"]
+vars += ["armeb", "arm", "armv5", "armv6", "armv4", "powerpc64", "aarch64", "riscv32", "riscv64", "x86", "mips64", "powerpc"]
+vars += ["mipsarch", "x86-x32", "mips16e", "microblaze", "e5500-64b", "mipsisa32", "mipsisa64"]
+vars += ["class-native", "class-target", "class-cross-canadian", "class-cross", "class-devupstream"]
+vars += ["tune-", "pn-", "forcevariable"]
+vars += ["libc-musl", "libc-glibc", "libc-newlib","libc-baremetal"]
+vars += ["task-configure", "task-compile", "task-install", "task-clean", "task-image-qa", "task-rm_work", "task-image-complete", "task-populate-sdk"]
+vars += ["toolchain-clang", "mydistro", "nios2", "sdkmingw32", "overrideone", "overridetwo"]
+vars += ["linux-gnux32", "linux-muslx32", "linux-gnun32", "mingw32", "poky", "darwin", "linuxstdbase"]
+vars += ["linux-gnueabi", "eabi"]
+vars += ["virtclass-multilib", "virtclass-mcextend"]
+
+# List of strings to treat as overrides but only with whitespace following or another override (more restricted matching).
+# Handles issues with arc matching arch.
+shortvars = ["arc", "mips", "mipsel", "sh4"] + args.short_override
+
+# Variables which take packagenames as an override
+packagevars = ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY",
+ "pkg_postrm", "pkg_postinst_ontarget", "pkg_postinst", "INITSCRIPT_NAME", "INITSCRIPT_PARAMS", "DEBIAN_NOAUTONAME", "ALTERNATIVE",
+ "PKGE", "PKGV", "PKGR", "USERADD_PARAM", "GROUPADD_PARAM", "CONFFILES", "SYSTEMD_SERVICE", "LICENSE", "SECTION", "pkg_preinst",
+ "pkg_prerm", "RREPLACES", "GROUPMEMS_PARAM", "SYSTEMD_AUTO_ENABLE", "SKIP_FILEDEPS", "PRIVATE_LIBS", "PACKAGE_ADD_METADATA",
+ "INSANE_SKIP", "DEBIANNAME", "SYSTEMD_SERVICE_ESCAPED"] + args.package_vars
+
+# Expressions to skip if encountered, these are not overrides
+skips = args.skip
+skips += ["parser_append", "recipe_to_append", "extra_append", "to_remove", "show_appends", "applied_appends", "file_appends", "handle_remove"]
+skips += ["expanded_removes", "color_remove", "test_remove", "empty_remove", "toaster_prepend", "num_removed", "licfiles_append", "_write_append"]
+skips += ["no_report_remove", "test_prepend", "test_append", "multiple_append", "test_remove", "shallow_remove", "do_remove_layer", "first_append"]
+skips += ["parser_remove", "to_append", "no_remove", "bblayers_add_remove", "bblayers_remove", "apply_append", "is_x86", "base_dep_prepend"]
+skips += ["autotools_dep_prepend", "go_map_arm", "alt_remove_links", "systemd_append_file", "file_append", "process_file_darwin"]
+skips += ["run_loaddata_poky", "determine_if_poky_env", "do_populate_poky_src", "libc_cv_include_x86_isa_level", "test_rpm_remove", "do_install_armmultilib"]
+skips += ["get_appends_for_files", "test_doubleref_remove", "test_bitbakelayers_add_remove", "elf32_x86_64", "colour_remove", "revmap_remove"]
+skips += ["test_rpm_remove", "test_bitbakelayers_add_remove", "recipe_append_file", "log_data_removed", "recipe_append", "systemd_machine_unit_append"]
+skips += ["recipetool_append", "changetype_remove", "try_appendfile_wc", "test_qemux86_directdisk", "test_layer_appends", "tgz_removed"]
+
+imagevars = ["IMAGE_CMD", "EXTRA_IMAGECMD", "IMAGE_TYPEDEP", "CONVERSION_CMD", "COMPRESS_CMD"] + args.image_vars
+packagevars += imagevars
+
+skip_ext = [".html", ".patch", ".m4", ".diff"] + args.skip_ext
+
+vars_re = {}
+for exp in vars:
+ vars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp), r"\1:" + exp)
+
+shortvars_re = {}
+for exp in shortvars:
+ shortvars_re[exp] = (re.compile(r'((^|[#\'"\s\-\+])[A-Za-z0-9_\-:${}\.]+)_' + exp + r'([\(\'"\s:])'), r"\1:" + exp + r"\3")
+
+package_re = {}
+for exp in packagevars:
+ package_re[exp] = (re.compile(r'(^|[#\'"\s\-\+]+)' + exp + r'_' + r'([$a-z"\'\s%\[<{\\\*].)'), r"\1" + exp + r":\2")
+
+# Other substitutions to make
+subs = {
+ 'r = re.compile(r"([^:]+):\s*(.*)")' : 'r = re.compile(r"(^.+?):\s+(.*)")',
+ "val = d.getVar('%s_%s' % (var, pkg))" : "val = d.getVar('%s:%s' % (var, pkg))",
+ "f.write('%s_%s: %s\\n' % (var, pkg, encode(val)))" : "f.write('%s:%s: %s\\n' % (var, pkg, encode(val)))",
+ "d.getVar('%s_%s' % (scriptlet_name, pkg))" : "d.getVar('%s:%s' % (scriptlet_name, pkg))",
+ 'ret.append(v + "_" + p)' : 'ret.append(v + ":" + p)',
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ skip = False
+ for s in skips:
+ if s in line:
+ skip = True
+ if "ptest_append" in line or "ptest_remove" in line or "ptest_prepend" in line:
+ skip = False
+ for sub in subs:
+ if sub in line:
+ line = line.replace(sub, subs[sub])
+ skip = True
+ if not skip:
+ for pvar in packagevars:
+ line = package_re[pvar][0].sub(package_re[pvar][1], line)
+ for var in vars:
+ line = vars_re[var][0].sub(vars_re[var][1], line)
+ for shortvar in shortvars:
+ line = shortvars_re[shortvar][0].sub(shortvars_re[shortvar][1], line)
+ if "pkg_postinst:ontarget" in line:
+ line = line.replace("pkg_postinst:ontarget", "pkg_postinst_ontarget")
+ new_file.write(line)
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.9.3"
+
+for p in args.path:
+ if os.path.isfile(p):
+ processfile(p)
+ else:
+ print("processing directory '%s'" % p)
+ for root, dirs, files in os.walk(p):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or any(fn.endswith(ext) for ext in skip_ext):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-spdx-licenses.py b/scripts/contrib/convert-spdx-licenses.py
new file mode 100755
index 0000000000..4e194dee3f
--- /dev/null
+++ b/scripts/contrib/convert-spdx-licenses.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+#
+# Conversion script to change LICENSE entries to SPDX identifiers
+#
+# Copyright (C) 2021-2022 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+license_map = {
+"AGPL-3" : "AGPL-3.0-only",
+"AGPL-3+" : "AGPL-3.0-or-later",
+"AGPLv3" : "AGPL-3.0-only",
+"AGPLv3+" : "AGPL-3.0-or-later",
+"AGPLv3.0" : "AGPL-3.0-only",
+"AGPLv3.0+" : "AGPL-3.0-or-later",
+"AGPL-3.0" : "AGPL-3.0-only",
+"AGPL-3.0+" : "AGPL-3.0-or-later",
+"BSD-0-Clause" : "0BSD",
+"GPL-1" : "GPL-1.0-only",
+"GPL-1+" : "GPL-1.0-or-later",
+"GPLv1" : "GPL-1.0-only",
+"GPLv1+" : "GPL-1.0-or-later",
+"GPLv1.0" : "GPL-1.0-only",
+"GPLv1.0+" : "GPL-1.0-or-later",
+"GPL-1.0" : "GPL-1.0-only",
+"GPL-1.0+" : "GPL-1.0-or-later",
+"GPL-2" : "GPL-2.0-only",
+"GPL-2+" : "GPL-2.0-or-later",
+"GPLv2" : "GPL-2.0-only",
+"GPLv2+" : "GPL-2.0-or-later",
+"GPLv2.0" : "GPL-2.0-only",
+"GPLv2.0+" : "GPL-2.0-or-later",
+"GPL-2.0" : "GPL-2.0-only",
+"GPL-2.0+" : "GPL-2.0-or-later",
+"GPL-3" : "GPL-3.0-only",
+"GPL-3+" : "GPL-3.0-or-later",
+"GPLv3" : "GPL-3.0-only",
+"GPLv3+" : "GPL-3.0-or-later",
+"GPLv3.0" : "GPL-3.0-only",
+"GPLv3.0+" : "GPL-3.0-or-later",
+"GPL-3.0" : "GPL-3.0-only",
+"GPL-3.0+" : "GPL-3.0-or-later",
+"LGPLv2" : "LGPL-2.0-only",
+"LGPLv2+" : "LGPL-2.0-or-later",
+"LGPLv2.0" : "LGPL-2.0-only",
+"LGPLv2.0+" : "LGPL-2.0-or-later",
+"LGPL-2.0" : "LGPL-2.0-only",
+"LGPL-2.0+" : "LGPL-2.0-or-later",
+"LGPL2.1" : "LGPL-2.1-only",
+"LGPL2.1+" : "LGPL-2.1-or-later",
+"LGPLv2.1" : "LGPL-2.1-only",
+"LGPLv2.1+" : "LGPL-2.1-or-later",
+"LGPL-2.1" : "LGPL-2.1-only",
+"LGPL-2.1+" : "LGPL-2.1-or-later",
+"LGPLv3" : "LGPL-3.0-only",
+"LGPLv3+" : "LGPL-3.0-or-later",
+"LGPL-3.0" : "LGPL-3.0-only",
+"LGPL-3.0+" : "LGPL-3.0-or-later",
+"MPL-1" : "MPL-1.0",
+"MPLv1" : "MPL-1.0",
+"MPLv1.1" : "MPL-1.1",
+"MPLv2" : "MPL-2.0",
+"MIT-X" : "MIT",
+"MIT-style" : "MIT",
+"openssl" : "OpenSSL",
+"PSF" : "PSF-2.0",
+"PSFv2" : "PSF-2.0",
+"Python-2" : "Python-2.0",
+"Apachev2" : "Apache-2.0",
+"Apache-2" : "Apache-2.0",
+"Artisticv1" : "Artistic-1.0",
+"Artistic-1" : "Artistic-1.0",
+"AFL-2" : "AFL-2.0",
+"AFL-1" : "AFL-1.2",
+"AFLv2" : "AFL-2.0",
+"AFLv1" : "AFL-1.2",
+"CDDLv1" : "CDDL-1.0",
+"CDDL-1" : "CDDL-1.0",
+"EPLv1.0" : "EPL-1.0",
+"FreeType" : "FTL",
+"Nauman" : "Naumen",
+"tcl" : "TCL",
+"vim" : "Vim",
+"SGIv1" : "SGI-1",
+}
+
+def processfile(fn):
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if not line.startswith("LICENSE"):
+ new_file.write(line)
+ continue
+ orig = line
+ for license in sorted(license_map, key=len, reverse=True):
+ for ending in ['"', "'", " ", ")"]:
+ line = line.replace(license + ending, license_map[license] + ending)
+ if orig != line:
+ modified = True
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.01"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-srcuri.py b/scripts/contrib/convert-srcuri.py
new file mode 100755
index 0000000000..587392334f
--- /dev/null
+++ b/scripts/contrib/convert-srcuri.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+#
+# Conversion script to update SRC_URI to add branch to git urls
+#
+# Copyright (C) 2021 Richard Purdie
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+def processfile(fn):
+ def matchline(line):
+ if "MIRROR" in line or ".*" in line or "GNOME_GIT" in line:
+ return False
+ return True
+ print("processing file '%s'" % fn)
+ try:
+ if "distro_alias.inc" in fn or "linux-yocto-custom.bb" in fn:
+ return
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ for line in old_file:
+ if ("git://" in line or "gitsm://" in line) and "branch=" not in line and matchline(line):
+ if line.endswith('"\n'):
+ line = line.replace('"\n', ';branch=master"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';branch=master \\\\', line)
+ modified = True
+ if ("git://" in line or "gitsm://" in line) and "github.com" in line and "protocol=https" not in line and matchline(line):
+ if "protocol=git" in line:
+ line = line.replace('protocol=git', 'protocol=https')
+ elif line.endswith('"\n'):
+ line = line.replace('"\n', ';protocol=https"\n')
+ elif re.search('\s*\\\\$', line):
+ line = re.sub('\s*\\\\$', ';protocol=https \\\\', line)
+ modified = True
+ new_file.write(line)
+ if modified:
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/convert-variable-renames.py b/scripts/contrib/convert-variable-renames.py
new file mode 100755
index 0000000000..eded90ca61
--- /dev/null
+++ b/scripts/contrib/convert-variable-renames.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python3
+#
+# Conversion script to rename variables to versions with improved terminology.
+# Also highlights potentially problematic language and removed variables.
+#
+# Copyright (C) 2021 Richard Purdie
+# Copyright (C) 2022 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import re
+import os
+import sys
+import tempfile
+import shutil
+import mimetypes
+
+if len(sys.argv) < 2:
+ print("Please specify a directory to run the conversion script against.")
+ sys.exit(1)
+
+renames = {
+"BB_ENV_WHITELIST" : "BB_ENV_PASSTHROUGH",
+"BB_ENV_EXTRAWHITE" : "BB_ENV_PASSTHROUGH_ADDITIONS",
+"BB_HASHCONFIG_WHITELIST" : "BB_HASHCONFIG_IGNORE_VARS",
+"BB_SETSCENE_ENFORCE_WHITELIST" : "BB_SETSCENE_ENFORCE_IGNORE_TASKS",
+"BB_HASHBASE_WHITELIST" : "BB_BASEHASH_IGNORE_VARS",
+"BB_HASHTASK_WHITELIST" : "BB_TASKHASH_IGNORE_TASKS",
+"CVE_CHECK_PN_WHITELIST" : "CVE_CHECK_SKIP_RECIPE",
+"CVE_CHECK_WHITELIST" : "CVE_CHECK_IGNORE",
+"MULTI_PROVIDER_WHITELIST" : "BB_MULTI_PROVIDER_ALLOWED",
+"PNBLACKLIST" : "SKIP_RECIPE",
+"SDK_LOCAL_CONF_BLACKLIST" : "ESDK_LOCALCONF_REMOVE",
+"SDK_LOCAL_CONF_WHITELIST" : "ESDK_LOCALCONF_ALLOW",
+"SDK_INHERIT_BLACKLIST" : "ESDK_CLASS_INHERIT_DISABLE",
+"SSTATE_DUPWHITELIST" : "SSTATE_ALLOW_OVERLAP_FILES",
+"SYSROOT_DIRS_BLACKLIST" : "SYSROOT_DIRS_IGNORE",
+"UNKNOWN_CONFIGURE_WHITELIST" : "UNKNOWN_CONFIGURE_OPT_IGNORE",
+"ICECC_USER_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_SYSTEM_CLASS_BL" : "ICECC_CLASS_DISABLE",
+"ICECC_USER_PACKAGE_WL" : "ICECC_RECIPE_ENABLE",
+"ICECC_USER_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"ICECC_SYSTEM_PACKAGE_BL" : "ICECC_RECIPE_DISABLE",
+"LICENSE_FLAGS_WHITELIST" : "LICENSE_FLAGS_ACCEPTED",
+}
+
+removed_list = [
+"BB_STAMP_WHITELIST",
+"BB_STAMP_POLICY",
+"INHERIT_BLACKLIST",
+"TUNEABI_WHITELIST",
+]
+
+context_check_list = [
+"blacklist",
+"whitelist",
+"abort",
+]
+
+def processfile(fn):
+
+ print("processing file '%s'" % fn)
+ try:
+ fh, abs_path = tempfile.mkstemp()
+ modified = False
+ with os.fdopen(fh, 'w') as new_file:
+ with open(fn, "r") as old_file:
+ lineno = 0
+ for line in old_file:
+ lineno += 1
+ if not line or "BB_RENAMED_VARIABLE" in line:
+ continue
+ # Do the renames
+ for old_name, new_name in renames.items():
+ if old_name in line:
+ line = line.replace(old_name, new_name)
+ modified = True
+ # Find removed names
+ for removed_name in removed_list:
+ if removed_name in line:
+ print("%s needs further work at line %s because %s has been deprecated" % (fn, lineno, removed_name))
+ for check_word in context_check_list:
+ if re.search(check_word, line, re.IGNORECASE):
+ print("%s needs further work at line %s since it contains %s"% (fn, lineno, check_word))
+ new_file.write(line)
+ new_file.close()
+ if modified:
+ print("*** Modified file '%s'" % (fn))
+ shutil.copymode(fn, abs_path)
+ os.remove(fn)
+ shutil.move(abs_path, fn)
+ except UnicodeDecodeError:
+ pass
+
+ourname = os.path.basename(sys.argv[0])
+ourversion = "0.1"
+
+if os.path.isfile(sys.argv[1]):
+ processfile(sys.argv[1])
+ sys.exit(0)
+
+for targetdir in sys.argv[1:]:
+ print("processing directory '%s'" % targetdir)
+ for root, dirs, files in os.walk(targetdir):
+ for name in files:
+ if name == ourname:
+ continue
+ fn = os.path.join(root, name)
+ if os.path.islink(fn):
+ continue
+ if "ChangeLog" in fn or "/.git/" in fn or fn.endswith(".html") or fn.endswith(".patch") or fn.endswith(".m4") or fn.endswith(".diff") or fn.endswith(".orig"):
+ continue
+ processfile(fn)
+
+print("All files processed with version %s" % ourversion)
diff --git a/scripts/contrib/ddimage b/scripts/contrib/ddimage
index ab929957a5..70eee8ebea 100755
--- a/scripts/contrib/ddimage
+++ b/scripts/contrib/ddimage
@@ -1,8 +1,9 @@
#!/bin/sh
-
-# Default to avoiding the first two disks on typical Linux and Mac OS installs
-# Better safe than sorry :-)
-BLACKLIST_DEVICES="/dev/sda /dev/sdb /dev/disk1 /dev/disk2"
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# 1MB blocksize
BLOCKSIZE=1048576
@@ -29,7 +30,6 @@ image_details() {
}
device_details() {
- DEV=$1
BLOCK_SIZE=512
echo "Device details"
@@ -42,11 +42,17 @@ device_details() {
fi
# Default / Linux information collection
- echo " device: $DEVICE"
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+ DEV=`basename $ACTUAL_DEVICE`
+ if [ "$ACTUAL_DEVICE" != "$DEVICE" ] ; then
+ echo " device: $DEVICE -> $ACTUAL_DEVICE"
+ else
+ echo " device: $DEVICE"
+ fi
if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
else
- echo " vendor: UNKOWN"
+ echo " vendor: UNKNOWN"
fi
if [ -f "/sys/class/block/$DEV/device/model" ]; then
echo " model: $(cat /sys/class/block/$DEV/device/model)"
@@ -61,6 +67,49 @@ device_details() {
echo ""
}
+check_mount_device() {
+ if cat /proc/self/mounts | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1$" ; then
+ return 0
+ fi
+ return 1
+}
+
+is_mounted() {
+ if [ "$(uname)" = "Darwin" ]; then
+ if df | awk '{ print $1 }' | grep /dev/ | grep -q -E "^$1(s[0-9]+)?$" ; then
+ return 0
+ fi
+ else
+ if check_mount_device $1 ; then
+ return 0
+ fi
+ DEV=`basename $1`
+ if [ -d /sys/class/block/$DEV/ ] ; then
+ PARENT_BLKDEV=`basename $(readlink -f "/sys/class/block/$DEV/..")`
+ if [ "$PARENT_BLKDEV" != "block" ] ; then
+ if check_mount_device $PARENT_BLKDEV ; then
+ return 0
+ fi
+ fi
+ for CHILD_BLKDEV in `find /sys/class/block/$DEV/ -mindepth 1 -maxdepth 1 -name "$DEV*" -type d`
+ do
+ if check_mount_device /dev/`basename $CHILD_BLKDEV` ; then
+ return 0
+ fi
+ done
+ fi
+ fi
+ return 1
+}
+
+is_inuse() {
+ HOLDERS_DIR="/sys/class/block/`basename $1`/holders"
+ if [ -d $HOLDERS_DIR ] && [ `ls -A $HOLDERS_DIR` ] ; then
+ return 0
+ fi
+ return 1
+}
+
if [ $# -ne 2 ]; then
usage
exit 1
@@ -75,22 +124,37 @@ if [ ! -e "$IMAGE" ]; then
exit 1
fi
+if [ ! -e "$DEVICE" ]; then
+ echo "ERROR: Device $DEVICE does not exist"
+ usage
+ exit 1
+fi
-for i in ${BLACKLIST_DEVICES}; do
- if [ "$i" = "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE is blacklisted"
- exit 1
- fi
-done
+if [ "$(uname)" = "Darwin" ]; then
+ # readlink doesn't support -f on MacOS, just assume it isn't a symlink
+ ACTUAL_DEVICE=$DEVICE
+else
+ ACTUAL_DEVICE=`readlink -f $DEVICE`
+fi
+if is_mounted $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently mounted - check if this is the right device, and unmount it first if so"
+ device_details
+ exit 1
+fi
+if is_inuse $ACTUAL_DEVICE ; then
+ echo "ERROR: Device $DEVICE is currently in use (possibly part of LVM) - check if this is the right device!"
+ device_details
+ exit 1
+fi
if [ ! -w "$DEVICE" ]; then
- echo "ERROR: Device $DEVICE does not exist or is not writable"
+ echo "ERROR: Device $DEVICE is not writable - possibly use sudo?"
usage
exit 1
fi
image_details $IMAGE
-device_details $(basename $DEVICE)
+device_details
printf "Write $IMAGE to $DEVICE [y/N]? "
read RESPONSE
diff --git a/scripts/contrib/devtool-stress.py b/scripts/contrib/devtool-stress.py
index d555c51a65..81046ecf49 100755
--- a/scripts/contrib/devtool-stress.py
+++ b/scripts/contrib/devtool-stress.py
@@ -6,18 +6,7 @@
#
# Copyright 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
diff --git a/scripts/contrib/dialog-power-control b/scripts/contrib/dialog-power-control
index 7550ea53be..82c84baa1d 100755
--- a/scripts/contrib/dialog-power-control
+++ b/scripts/contrib/dialog-power-control
@@ -1,5 +1,9 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Simple script to show a manual power prompt for when you want to use
# automated hardware testing with testimage.bbclass but you don't have a
# web-enabled power strip or similar to do the power on/off/cycle.
diff --git a/scripts/contrib/documentation-audit.sh b/scripts/contrib/documentation-audit.sh
index 2144aac936..7197a2fcea 100755
--- a/scripts/contrib/documentation-audit.sh
+++ b/scripts/contrib/documentation-audit.sh
@@ -1,5 +1,9 @@
#!/bin/bash
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Perform an audit of which packages provide documentation and which
# are missing -doc packages.
#
@@ -7,7 +11,6 @@
# this script after source'ing the build environment script, so you're
# running it from build/ directory.
#
-# Maintainer: Scott Garman <scott.a.garman@intel.com>
REPORT_DOC_SIMPLE="documentation_exists.txt"
REPORT_DOC_DETAIL="documentation_exists_detail.txt"
@@ -25,8 +28,8 @@ if [ -z "$BITBAKE" ]; then
fi
echo "REMINDER: you need to build for MACHINE=qemux86 or you won't get useful results"
-echo "REMINDER: you need to set LICENSE_FLAGS_WHITELIST appropriately in local.conf or "
-echo " you'll get false positives. For example, LICENSE_FLAGS_WHITELIST = \"Commercial\""
+echo "REMINDER: you need to set LICENSE_FLAGS_ACCEPTED appropriately in local.conf or "
+echo " you'll get false positives. For example, LICENSE_FLAGS_ACCEPTED = \"commercial\""
for pkg in `bitbake -s | awk '{ print \$1 }'`; do
if [[ "$pkg" == "Loading" || "$pkg" == "Loaded" ||
diff --git a/scripts/contrib/graph-tool b/scripts/contrib/graph-tool
index 1df5b8c345..26488930e0 100755
--- a/scripts/contrib/graph-tool
+++ b/scripts/contrib/graph-tool
@@ -7,21 +7,17 @@
#
# Copyright 2013 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
+import os
+import argparse
+
+scripts_lib_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'lib'))
+sys.path.insert(0, scripts_lib_path)
+import argparse_oe
+
def get_path_networkx(dotfile, fromnode, tonode):
try:
@@ -45,47 +41,78 @@ def get_path_networkx(dotfile, fromnode, tonode):
return networkx.all_simple_paths(graph, source=fromnode, target=tonode)
-def find_paths(args, usage):
- if len(args) < 3:
- usage()
- sys.exit(1)
-
- fromnode = args[1]
- tonode = args[2]
-
+def find_paths(args):
path = None
- for path in get_path_networkx(args[0], fromnode, tonode):
+ for path in get_path_networkx(args.dotfile, args.fromnode, args.tonode):
print(" -> ".join(map(str, path)))
if not path:
- print("ERROR: no path from %s to %s in graph" % (fromnode, tonode))
- sys.exit(1)
+ print("ERROR: no path from %s to %s in graph" % (args.fromnode, args.tonode))
+ return 1
+
+
+def filter_graph(args):
+ import fnmatch
+
+ exclude_tasks = []
+ if args.exclude_tasks:
+ for task in args.exclude_tasks.split(','):
+ if not task.startswith('do_'):
+ task = 'do_%s' % task
+ exclude_tasks.append(task)
+
+ def checkref(strval):
+ strval = strval.strip().strip('"')
+ target, taskname = strval.rsplit('.', 1)
+ if exclude_tasks:
+ for extask in exclude_tasks:
+ if fnmatch.fnmatch(taskname, extask):
+ return False
+ if strval in args.ref or target in args.ref:
+ return True
+ return False
+
+ with open(args.infile, 'r') as f:
+ for line in f:
+ line = line.rstrip()
+ if line.startswith(('digraph', '}')):
+ print(line)
+ elif '->' in line:
+ linesplit = line.split('->')
+ if checkref(linesplit[0]) and checkref(linesplit[1]):
+ print(line)
+ elif (not args.no_nodes) and checkref(line.split()[0]):
+ print(line)
+
def main():
- import optparse
- parser = optparse.OptionParser(
- usage = '''%prog [options] <command> <arguments>
+ parser = argparse_oe.ArgumentParser(description='Small utility for working with .dot graph files')
-Available commands:
- find-paths <dotfile> <from> <to>
- Find all of the paths between two nodes in a dot graph''')
+ subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
- #parser.add_option("-d", "--debug",
- # help = "Report all SRCREV values, not just ones where AUTOREV has been used",
- # action="store_true", dest="debug", default=False)
+ parser_find_paths = subparsers.add_parser('find-paths',
+ help='Find all of the paths between two nodes in a dot graph',
+ description='Finds all of the paths between two nodes in a dot graph')
+ parser_find_paths.add_argument('dotfile', help='.dot graph to search in')
+ parser_find_paths.add_argument('fromnode', help='starting node name')
+ parser_find_paths.add_argument('tonode', help='ending node name')
+ parser_find_paths.set_defaults(func=find_paths)
- options, args = parser.parse_args(sys.argv)
- args = args[1:]
+ parser_filter = subparsers.add_parser('filter',
+ help='Pare down a task graph to contain only the specified references',
+ description='Pares down a task-depends.dot graph produced by bitbake -g to contain only the specified references')
+ parser_filter.add_argument('infile', help='Input file')
+ parser_filter.add_argument('ref', nargs='+', help='Reference to include (either recipe/target name or full target.taskname specification)')
+ parser_filter.add_argument('-n', '--no-nodes', action='store_true', help='Skip node formatting lines')
+ parser_filter.add_argument('-x', '--exclude-tasks', help='Comma-separated list of tasks to exclude (do_ prefix optional, wildcards allowed)')
+ parser_filter.set_defaults(func=filter_graph)
- if len(args) < 1:
- parser.print_help()
- sys.exit(1)
+ args = parser.parse_args()
- if args[0] == "find-paths":
- find_paths(args[1:], parser.print_help)
- else:
- parser.print_help()
- sys.exit(1)
+ ret = args.func(args)
+ return ret
if __name__ == "__main__":
- main()
+ ret = main()
+ sys.exit(ret)
diff --git a/scripts/contrib/image-manifest b/scripts/contrib/image-manifest
new file mode 100755
index 0000000000..4d65a99258
--- /dev/null
+++ b/scripts/contrib/image-manifest
@@ -0,0 +1,523 @@
+#!/usr/bin/env python3
+
+# Script to extract information from image manifests
+#
+# Copyright (C) 2018 Intel Corporation
+# Copyright (C) 2021 Wind River Systems, Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import argparse
+import logging
+import json
+import shutil
+import tempfile
+import tarfile
+from collections import OrderedDict
+
+scripts_path = os.path.dirname(__file__)
+lib_path = scripts_path + '/../lib'
+sys.path = sys.path + [lib_path]
+
+import scriptutils
+logger = scriptutils.logger_create(os.path.basename(__file__))
+
+import argparse_oe
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+logger.debug('Using standard bitbake path %s' % bitbakepath)
+scriptpath.add_oe_lib_path()
+
+import bb.tinfoil
+import bb.utils
+import oe.utils
+import oe.recipeutils
+
+def get_pkg_list(manifest):
+ pkglist = []
+ with open(manifest, 'r') as f:
+ for line in f:
+ linesplit = line.split()
+ if len(linesplit) == 3:
+ # manifest file
+ pkglist.append(linesplit[0])
+ elif len(linesplit) == 1:
+ # build dependency file
+ pkglist.append(linesplit[0])
+ return sorted(pkglist)
+
+def list_packages(args):
+ pkglist = get_pkg_list(args.manifest)
+ for pkg in pkglist:
+ print('%s' % pkg)
+
+def pkg2recipe(tinfoil, pkg):
+ if "-native" in pkg:
+ logger.info('skipping %s' % pkg)
+ return None
+
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ pkgdatafile = os.path.join(pkgdata_dir, 'runtime-reverse', pkg)
+ logger.debug('pkgdatafile %s' % pkgdatafile)
+ try:
+ f = open(pkgdatafile, 'r')
+ for line in f:
+ if line.startswith('PN:'):
+ recipe = line.split(':', 1)[1].strip()
+ return recipe
+ except Exception:
+ logger.warning('%s is missing' % pkgdatafile)
+ return None
+
+def get_recipe_list(manifest, tinfoil):
+ pkglist = get_pkg_list(manifest)
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+
+ return sorted(recipelist)
+
+def list_recipes(args):
+ import bb.tinfoil
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+ recipelist = get_recipe_list(args.manifest, tinfoil)
+ for recipe in sorted(recipelist):
+ print('%s' % recipe)
+
+def list_layers(args):
+
+ def find_git_repo(pth):
+ checkpth = pth
+ while checkpth != os.sep:
+ if os.path.exists(os.path.join(checkpth, '.git')):
+ return checkpth
+ checkpth = os.path.dirname(checkpth)
+ return None
+
+ def get_git_remote_branch(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', '--abbrev-ref', '--symbolic-full-name', '@{u}'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_head_commit(repodir):
+ try:
+ stdout, _ = bb.process.run(['git', 'rev-parse', 'HEAD'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ return stdout.strip()
+ else:
+ return None
+
+ def get_git_repo_url(repodir, remote='origin'):
+ import bb.process
+ # Try to get upstream repo location from origin remote
+ try:
+ stdout, _ = bb.process.run(['git', 'remote', '-v'], cwd=repodir)
+ except bb.process.ExecutionError as e:
+ stdout = None
+ if stdout:
+ for line in stdout.splitlines():
+ splitline = line.split()
+ if len(splitline) > 1:
+ if splitline[0] == remote and scriptutils.is_src_url(splitline[1]):
+ return splitline[1]
+ return None
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+ layers = OrderedDict()
+ for layerdir in tinfoil.config_data.getVar('BBLAYERS').split():
+ layerdata = OrderedDict()
+ layername = os.path.basename(layerdir)
+ logger.debug('layername %s, layerdir %s' % (layername, layerdir))
+ if layername in layers:
+ logger.warning('layername %s is not unique in configuration' % layername)
+ layername = os.path.basename(os.path.dirname(layerdir)) + '_' + os.path.basename(layerdir)
+ logger.debug('trying layername %s' % layername)
+ if layername in layers:
+ logger.error('Layer name %s is not unique in configuration' % layername)
+ sys.exit(2)
+ repodir = find_git_repo(layerdir)
+ if repodir:
+ remotebranch = get_git_remote_branch(repodir)
+ remote = 'origin'
+ if remotebranch and '/' in remotebranch:
+ rbsplit = remotebranch.split('/', 1)
+ layerdata['actual_branch'] = rbsplit[1]
+ remote = rbsplit[0]
+ layerdata['vcs_url'] = get_git_repo_url(repodir, remote)
+ if os.path.abspath(repodir) != os.path.abspath(layerdir):
+ layerdata['vcs_subdir'] = os.path.relpath(layerdir, repodir)
+ commit = get_git_head_commit(repodir)
+ if commit:
+ layerdata['vcs_commit'] = commit
+ layers[layername] = layerdata
+
+ json.dump(layers, args.output, indent=2)
+
+def get_recipe(args):
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=True)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ print(' %s package provided by %s' % (args.package, recipe))
+
+def pkg_dependencies(args):
+ def get_recipe_info(tinfoil, recipe):
+ try:
+ info = tinfoil.get_recipe_info(recipe)
+ except Exception:
+ logger.error('Failed to get recipe info for: %s' % recipe)
+ sys.exit(1)
+ if not info:
+ logger.warning('No recipe info found for: %s' % recipe)
+ sys.exit(1)
+ append_files = tinfoil.get_file_appends(info.fn)
+ appends = True
+ data = tinfoil.parse_recipe_file(info.fn, appends, append_files)
+ data.pn = info.pn
+ data.pv = info.pv
+ return data
+
+ def find_dependencies(tinfoil, assume_provided, recipe_info, packages, rn, order):
+ spaces = ' ' * order
+ data = recipe_info[rn]
+ if args.native:
+ logger.debug('%s- %s' % (spaces, data.pn))
+ elif "-native" not in data.pn:
+ if "cross" not in data.pn:
+ logger.debug('%s- %s' % (spaces, data.pn))
+
+ depends = []
+ for dep in data.depends:
+ if dep not in assume_provided:
+ depends.append(dep)
+
+ # First find all dependencies not in package list.
+ for dep in depends:
+ if dep not in packages:
+ packages.append(dep)
+ dep_data = get_recipe_info(tinfoil, dep)
+ # Do this once now to reduce the number of bitbake calls.
+ dep_data.depends = dep_data.getVar('DEPENDS').split()
+ recipe_info[dep] = dep_data
+
+ # Then recursively analyze all of the dependencies for the current recipe.
+ for dep in depends:
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, dep, order + 1)
+
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare()
+
+ assume_provided = tinfoil.config_data.getVar('ASSUME_PROVIDED').split()
+ logger.debug('assumed provided:')
+ for ap in sorted(assume_provided):
+ logger.debug(' - %s' % ap)
+
+ recipe = pkg2recipe(tinfoil, args.package)
+ data = get_recipe_info(tinfoil, recipe)
+ data.depends = []
+ depends = data.getVar('DEPENDS').split()
+ for dep in depends:
+ if dep not in assume_provided:
+ data.depends.append(dep)
+
+ recipe_info = dict([(recipe, data)])
+ packages = []
+ find_dependencies(tinfoil, assume_provided, recipe_info, packages, recipe, order=1)
+
+ print('\nThe following packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" not in data.pn:
+ if "cross" not in data.pn:
+ print(" %s (%s)" % (data.pn,p))
+
+ if args.native:
+ print('\nThe following native packages are required to build %s' % recipe)
+ for p in sorted(packages):
+ data = recipe_info[p]
+ if "-native" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+ if "cross" in data.pn:
+ print(" %s(%s)" % (data.pn,p))
+
+def default_config():
+ vlist = OrderedDict()
+ vlist['PV'] = 'yes'
+ vlist['SUMMARY'] = 'no'
+ vlist['DESCRIPTION'] = 'no'
+ vlist['SECTION'] = 'no'
+ vlist['LICENSE'] = 'yes'
+ vlist['HOMEPAGE'] = 'no'
+ vlist['BUGTRACKER'] = 'no'
+ vlist['PROVIDES'] = 'no'
+ vlist['BBCLASSEXTEND'] = 'no'
+ vlist['DEPENDS'] = 'no'
+ vlist['PACKAGECONFIG'] = 'no'
+ vlist['SRC_URI'] = 'yes'
+ vlist['SRCREV'] = 'yes'
+ vlist['EXTRA_OECONF'] = 'no'
+ vlist['EXTRA_OESCONS'] = 'no'
+ vlist['EXTRA_OECMAKE'] = 'no'
+ vlist['EXTRA_OEMESON'] = 'no'
+
+ clist = OrderedDict()
+ clist['variables'] = vlist
+ clist['filepath'] = 'no'
+ clist['sha256sum'] = 'no'
+ clist['layerdir'] = 'no'
+ clist['layer'] = 'no'
+ clist['inherits'] = 'no'
+ clist['source_urls'] = 'no'
+ clist['packageconfig_opts'] = 'no'
+ clist['patches'] = 'no'
+ clist['packagedir'] = 'no'
+ return clist
+
+def dump_config(args):
+ config = default_config()
+ f = open('default_config.json', 'w')
+ json.dump(config, f, indent=2)
+ logger.info('Default config list dumped to default_config.json')
+
+def export_manifest_info(args):
+
+ def handle_value(value):
+ if value:
+ return oe.utils.squashspaces(value)
+ else:
+ return value
+
+ if args.config:
+ logger.debug('config: %s' % args.config)
+ f = open(args.config, 'r')
+ config = json.load(f, object_pairs_hook=OrderedDict)
+ else:
+ config = default_config()
+ if logger.isEnabledFor(logging.DEBUG):
+ print('Configuration:')
+ json.dump(config, sys.stdout, indent=2)
+ print('')
+
+ tmpoutdir = tempfile.mkdtemp(prefix=os.path.basename(__file__)+'-')
+ logger.debug('tmp dir: %s' % tmpoutdir)
+
+ # export manifest
+ shutil.copy2(args.manifest,os.path.join(tmpoutdir, "manifest"))
+
+ with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
+ tinfoil.logger.setLevel(logger.getEffectiveLevel())
+ tinfoil.prepare(config_only=False)
+
+ pkglist = get_pkg_list(args.manifest)
+ # export pkg list
+ f = open(os.path.join(tmpoutdir, "pkgs"), 'w')
+ for pkg in pkglist:
+ f.write('%s\n' % pkg)
+ f.close()
+
+ recipelist = []
+ for pkg in pkglist:
+ recipe = pkg2recipe(tinfoil,pkg)
+ if recipe:
+ if not recipe in recipelist:
+ recipelist.append(recipe)
+ recipelist.sort()
+ # export recipe list
+ f = open(os.path.join(tmpoutdir, "recipes"), 'w')
+ for recipe in recipelist:
+ f.write('%s\n' % recipe)
+ f.close()
+
+ try:
+ rvalues = OrderedDict()
+ for pn in sorted(recipelist):
+ logger.debug('Package: %s' % pn)
+ rd = tinfoil.parse_recipe(pn)
+
+ rvalues[pn] = OrderedDict()
+
+ for varname in config['variables']:
+ if config['variables'][varname] == 'yes':
+ rvalues[pn][varname] = handle_value(rd.getVar(varname))
+
+ fpth = rd.getVar('FILE')
+ layerdir = oe.recipeutils.find_layerdir(fpth)
+ if config['filepath'] == 'yes':
+ rvalues[pn]['filepath'] = os.path.relpath(fpth, layerdir)
+ if config['sha256sum'] == 'yes':
+ rvalues[pn]['sha256sum'] = bb.utils.sha256_file(fpth)
+
+ if config['layerdir'] == 'yes':
+ rvalues[pn]['layerdir'] = layerdir
+
+ if config['layer'] == 'yes':
+ rvalues[pn]['layer'] = os.path.basename(layerdir)
+
+ if config['inherits'] == 'yes':
+ gr = set(tinfoil.config_data.getVar("__inherit_cache") or [])
+ lr = set(rd.getVar("__inherit_cache") or [])
+ rvalues[pn]['inherits'] = sorted({os.path.splitext(os.path.basename(r))[0] for r in lr if r not in gr})
+
+ if config['source_urls'] == 'yes':
+ rvalues[pn]['source_urls'] = []
+ for url in (rd.getVar('SRC_URI') or '').split():
+ if not url.startswith('file://'):
+ url = url.split(';')[0]
+ rvalues[pn]['source_urls'].append(url)
+
+ if config['packageconfig_opts'] == 'yes':
+ rvalues[pn]['packageconfig_opts'] = OrderedDict()
+ for key in rd.getVarFlags('PACKAGECONFIG').keys():
+ if key == 'doc':
+ continue
+ rvalues[pn]['packageconfig_opts'][key] = rd.getVarFlag('PACKAGECONFIG', key)
+
+ if config['patches'] == 'yes':
+ patches = oe.recipeutils.get_recipe_patches(rd)
+ rvalues[pn]['patches'] = []
+ if patches:
+ recipeoutdir = os.path.join(tmpoutdir, pn, 'patches')
+ bb.utils.mkdirhier(recipeoutdir)
+ for patch in patches:
+ # Patches may be in other layers too
+ patchlayerdir = oe.recipeutils.find_layerdir(patch)
+ # patchlayerdir will be None for remote patches, which we ignore
+ # (since currently they are considered as part of sources)
+ if patchlayerdir:
+ rvalues[pn]['patches'].append((os.path.basename(patchlayerdir), os.path.relpath(patch, patchlayerdir)))
+ shutil.copy(patch, recipeoutdir)
+
+ if config['packagedir'] == 'yes':
+ pn_dir = os.path.join(tmpoutdir, pn)
+ bb.utils.mkdirhier(pn_dir)
+ f = open(os.path.join(pn_dir, 'recipe.json'), 'w')
+ json.dump(rvalues[pn], f, indent=2)
+ f.close()
+
+ with open(os.path.join(tmpoutdir, 'recipes.json'), 'w') as f:
+ json.dump(rvalues, f, indent=2)
+
+ if args.output:
+ outname = os.path.basename(args.output)
+ else:
+ outname = os.path.splitext(os.path.basename(args.manifest))[0]
+ if outname.endswith('.tar.gz'):
+ outname = outname[:-7]
+ elif outname.endswith('.tgz'):
+ outname = outname[:-4]
+
+ tarfn = outname
+ if tarfn.endswith(os.sep):
+ tarfn = tarfn[:-1]
+ if not tarfn.endswith(('.tar.gz', '.tgz')):
+ tarfn += '.tar.gz'
+ with open(tarfn, 'wb') as f:
+ with tarfile.open(None, "w:gz", f) as tar:
+ tar.add(tmpoutdir, outname)
+ finally:
+ shutil.rmtree(tmpoutdir)
+
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="Image manifest utility",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+
+ # get recipe info
+ parser_get_recipes = subparsers.add_parser('recipe-info',
+ help='Get recipe info',
+ description='Get recipe information for a package')
+ parser_get_recipes.add_argument('package', help='Package name')
+ parser_get_recipes.set_defaults(func=get_recipe)
+
+ # list runtime dependencies
+ parser_pkg_dep = subparsers.add_parser('list-depends',
+ help='List dependencies',
+ description='List dependencies required to build the package')
+ parser_pkg_dep.add_argument('--native', help='also print native and cross packages', action='store_true')
+ parser_pkg_dep.add_argument('package', help='Package name')
+ parser_pkg_dep.set_defaults(func=pkg_dependencies)
+
+ # list recipes
+ parser_recipes = subparsers.add_parser('list-recipes',
+ help='List recipes producing packages within an image',
+ description='Lists recipes producing the packages that went into an image, using the manifest and pkgdata')
+ parser_recipes.add_argument('manifest', help='Manifest file')
+ parser_recipes.set_defaults(func=list_recipes)
+
+ # list packages
+ parser_packages = subparsers.add_parser('list-packages',
+ help='List packages within an image',
+ description='Lists packages that went into an image, using the manifest')
+ parser_packages.add_argument('manifest', help='Manifest file')
+ parser_packages.set_defaults(func=list_packages)
+
+ # list layers
+ parser_layers = subparsers.add_parser('list-layers',
+ help='List included layers',
+ description='Lists included layers')
+ parser_layers.add_argument('-o', '--output', help='Output file - defaults to stdout if not specified',
+ default=sys.stdout, type=argparse.FileType('w'))
+ parser_layers.set_defaults(func=list_layers)
+
+ # dump default configuration file
+ parser_dconfig = subparsers.add_parser('dump-config',
+ help='Dump default config',
+ description='Dump default config to default_config.json')
+ parser_dconfig.set_defaults(func=dump_config)
+
+ # export recipe info for packages in manifest
+ parser_export = subparsers.add_parser('manifest-info',
+ help='Export recipe info for a manifest',
+ description='Export recipe information using the manifest')
+ parser_export.add_argument('-c', '--config', help='load config from json file')
+ parser_export.add_argument('-o', '--output', help='Output file (tarball) - defaults to manifest name if not specified')
+ parser_export.add_argument('manifest', help='Manifest file')
+ parser_export.set_defaults(func=export_manifest_info)
+
+ args = parser.parse_args()
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ logger.debug("Debug Enabled")
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ ret = args.func(args)
+
+ return ret
+
+
+if __name__ == "__main__":
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/contrib/list-packageconfig-flags.py b/scripts/contrib/list-packageconfig-flags.py
index 7ce718624a..bb288e9099 100755
--- a/scripts/contrib/list-packageconfig-flags.py
+++ b/scripts/contrib/list-packageconfig-flags.py
@@ -1,21 +1,10 @@
#!/usr/bin/env python3
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation.
-#
# Copyright (C) 2013 Wind River Systems, Inc.
# Copyright (C) 2014 Intel Corporation
#
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
# - list available recipes which have PACKAGECONFIG flags
# - list available PACKAGECONFIG flags and all affected recipes
# - list all recipes and PACKAGECONFIG information
@@ -44,7 +33,7 @@ import bb.tinfoil
def get_fnlist(bbhandler, pkg_pn, preferred):
''' Get all recipe file names '''
if preferred:
- (latest_versions, preferred_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
+ (latest_versions, preferred_versions, required_versions) = bb.providers.findProviders(bbhandler.config_data, bbhandler.cooker.recipecaches[''], pkg_pn)
fn_list = []
for pn in sorted(pkg_pn):
@@ -76,7 +65,7 @@ def collect_pkgs(data_dict):
for fn in data_dict:
pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
pkgconfigflags.pop('doc', None)
- pkgname = data_dict[fn].getVar("P")
+ pkgname = data_dict[fn].getVar("PN")
pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
return pkg_dict
diff --git a/scripts/contrib/mkefidisk.sh b/scripts/contrib/mkefidisk.sh
deleted file mode 100755
index ac4ec9c7fb..0000000000
--- a/scripts/contrib/mkefidisk.sh
+++ /dev/null
@@ -1,464 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2012, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-LANG=C
-
-echo
-echo "WARNING: This script is deprecated and will be removed soon."
-echo "Please consider using wic EFI images instead."
-echo
-
-# Set to 1 to enable additional output
-DEBUG=0
-OUT="/dev/null"
-
-#
-# Defaults
-#
-# 20 Mb for the boot partition
-BOOT_SIZE=20
-# 5% for swap
-SWAP_RATIO=5
-
-# Cleanup after die()
-cleanup() {
- debug "Syncing and unmounting devices"
- # Unmount anything we mounted
- unmount $ROOTFS_MNT || error "Failed to unmount $ROOTFS_MNT"
- unmount $BOOTFS_MNT || error "Failed to unmount $BOOTFS_MNT"
- unmount $HDDIMG_ROOTFS_MNT || error "Failed to unmount $HDDIMG_ROOTFS_MNT"
- unmount $HDDIMG_MNT || error "Failed to unmount $HDDIMG_MNT"
-
- # Remove the TMPDIR
- debug "Removing temporary files"
- if [ -d "$TMPDIR" ]; then
- rm -rf $TMPDIR || error "Failed to remove $TMPDIR"
- fi
-}
-
-trap 'die "Signal Received, Aborting..."' HUP INT TERM
-
-# Logging routines
-WARNINGS=0
-ERRORS=0
-CLEAR="$(tput sgr0)"
-INFO="$(tput bold)"
-RED="$(tput setaf 1)$(tput bold)"
-GREEN="$(tput setaf 2)$(tput bold)"
-YELLOW="$(tput setaf 3)$(tput bold)"
-info() {
- echo "${INFO}$1${CLEAR}"
-}
-error() {
- ERRORS=$((ERRORS+1))
- echo "${RED}$1${CLEAR}"
-}
-warn() {
- WARNINGS=$((WARNINGS+1))
- echo "${YELLOW}$1${CLEAR}"
-}
-success() {
- echo "${GREEN}$1${CLEAR}"
-}
-die() {
- error "$1"
- cleanup
- exit 1
-}
-debug() {
- if [ $DEBUG -eq 1 ]; then
- echo "$1"
- fi
-}
-
-usage() {
- echo "Usage: $(basename $0) [-v] DEVICE HDDIMG TARGET_DEVICE"
- echo " -v: Verbose debug"
- echo " DEVICE: The device to write the image to, e.g. /dev/sdh"
- echo " HDDIMG: The hddimg file to generate the efi disk from"
- echo " TARGET_DEVICE: The device the target will boot from, e.g. /dev/mmcblk0"
-}
-
-image_details() {
- IMG=$1
- info "Image details"
- echo " image: $(stat --printf '%N\n' $IMG)"
- echo " size: $(stat -L --printf '%s bytes\n' $IMG)"
- echo " modified: $(stat -L --printf '%y\n' $IMG)"
- echo " type: $(file -L -b $IMG)"
- echo ""
-}
-
-device_details() {
- DEV=$1
- BLOCK_SIZE=512
-
- info "Device details"
- echo " device: $DEVICE"
- if [ -f "/sys/class/block/$DEV/device/vendor" ]; then
- echo " vendor: $(cat /sys/class/block/$DEV/device/vendor)"
- else
- echo " vendor: UNKOWN"
- fi
- if [ -f "/sys/class/block/$DEV/device/model" ]; then
- echo " model: $(cat /sys/class/block/$DEV/device/model)"
- else
- echo " model: UNKNOWN"
- fi
- if [ -f "/sys/class/block/$DEV/size" ]; then
- echo " size: $(($(cat /sys/class/block/$DEV/size) * $BLOCK_SIZE)) bytes"
- else
- echo " size: UNKNOWN"
- fi
- echo ""
-}
-
-unmount_device() {
- grep -q $DEVICE /proc/mounts
- if [ $? -eq 0 ]; then
- warn "$DEVICE listed in /proc/mounts, attempting to unmount"
- umount $DEVICE* 2>/dev/null
- return $?
- fi
- return 0
-}
-
-unmount() {
- if [ "$1" = "" ] ; then
- return 0
- fi
- grep -q $1 /proc/mounts
- if [ $? -eq 0 ]; then
- debug "Unmounting $1"
- umount $1
- return $?
- fi
- return 0
-}
-
-#
-# Parse and validate arguments
-#
-if [ $# -lt 3 ] || [ $# -gt 4 ]; then
- if [ $# -eq 1 ]; then
- AVAILABLE_DISK=`lsblk | grep "disk" | cut -f 1 -d " "`
- X=0
- for disk in `echo $AVAILABLE_DISK`; do
- mounted=`lsblk /dev/$disk | awk {'print $7'} | sed "s/MOUNTPOINT//"`
- if [ -z "$mounted" ]; then
- UNMOUNTED_AVAILABLES="$UNMOUNTED_AVAILABLES /dev/$disk"
- info "$X - /dev/$disk"
- X=`expr $X + 1`
- fi
- done
- if [ $X -eq 0 ]; then
- die "No unmounted device found."
- fi
- read -p "Choose unmounted device number: " DISK_NUMBER
- X=0
- for line in `echo $UNMOUNTED_AVAILABLES`; do
- if [ $DISK_NUMBER -eq $X ]; then
- DISK_TO_BE_FLASHED=$line
- break
- else
- X=`expr $X + 1`
- fi
- done
- if [ -z "$DISK_TO_BE_FLASHED" ]; then
- die "Option \"$DISK_NUMBER\" is invalid. Choose a valid option"
- else
- if [ -z `echo $DISK_TO_BE_FLASHED | grep "mmc"` ]; then
- TARGET_TO_BE_BOOT="/dev/sda"
- else
- TARGET_TO_BE_BOOT="/dev/mmcblk0"
- fi
- fi
- echo ""
- echo "Choose a name of the device that will be boot from"
- echo -n "Recommended name is: "
- info "$TARGET_TO_BE_BOOT"
- read -p "Is target device okay? [y/N]: " RESPONSE
- if [ "$RESPONSE" != "y" ]; then
- read -p "Choose target device name: " TARGET_TO_BE_BOOT
- fi
- echo ""
- if [ -z "$TARGET_TO_BE_BOOT" ]; then
- die "Error: choose a valid target name"
- fi
- else
- usage
- exit 1
- fi
-fi
-
-if [ "$1" = "-v" ]; then
- DEBUG=1
- OUT="1"
- shift
-fi
-
-if [ -z "$AVAILABLE_DISK" ]; then
- DEVICE=$1
- HDDIMG=$2
- TARGET_DEVICE=$3
-else
- DEVICE=$DISK_TO_BE_FLASHED
- HDDIMG=$1
- TARGET_DEVICE=$TARGET_TO_BE_BOOT
-fi
-
-LINK=$(readlink $DEVICE)
-if [ $? -eq 0 ]; then
- DEVICE="$LINK"
-fi
-
-if [ ! -w "$DEVICE" ]; then
- usage
- if [ ! -e "${DEVICE}" ] ; then
- die "Device $DEVICE cannot be found"
- else
- die "Device $DEVICE is not writable (need to run under sudo?)"
- fi
-fi
-
-if [ ! -e "$HDDIMG" ]; then
- usage
- die "HDDIMG $HDDIMG does not exist"
-fi
-
-#
-# Ensure the hddimg is not mounted
-#
-unmount "$HDDIMG" || die "Failed to unmount $HDDIMG"
-
-#
-# Check if any $DEVICE partitions are mounted
-#
-unmount_device || die "Failed to unmount $DEVICE"
-
-#
-# Confirm device with user
-#
-image_details $HDDIMG
-device_details $(basename $DEVICE)
-echo -n "${INFO}Prepare EFI image on $DEVICE [y/N]?${CLEAR} "
-read RESPONSE
-if [ "$RESPONSE" != "y" ]; then
- echo "Image creation aborted"
- exit 0
-fi
-
-
-#
-# Prepare the temporary working space
-#
-TMPDIR=$(mktemp -d mkefidisk-XXX) || die "Failed to create temporary mounting directory."
-HDDIMG_MNT=$TMPDIR/hddimg
-HDDIMG_ROOTFS_MNT=$TMPDIR/hddimg_rootfs
-ROOTFS_MNT=$TMPDIR/rootfs
-BOOTFS_MNT=$TMPDIR/bootfs
-mkdir $HDDIMG_MNT || die "Failed to create $HDDIMG_MNT"
-mkdir $HDDIMG_ROOTFS_MNT || die "Failed to create $HDDIMG_ROOTFS_MNT"
-mkdir $ROOTFS_MNT || die "Failed to create $ROOTFS_MNT"
-mkdir $BOOTFS_MNT || die "Failed to create $BOOTFS_MNT"
-
-
-#
-# Partition $DEVICE
-#
-DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
-# If the device size is not reported there may not be a valid label
-if [ "$DEVICE_SIZE" = "" ] ; then
- parted -s $DEVICE mklabel msdos || die "Failed to create MSDOS partition table"
- DEVICE_SIZE=$(parted -s $DEVICE unit mb print | grep ^Disk | cut -d" " -f 3 | sed -e "s/MB//")
-fi
-SWAP_SIZE=$((DEVICE_SIZE*SWAP_RATIO/100))
-ROOTFS_SIZE=$((DEVICE_SIZE-BOOT_SIZE-SWAP_SIZE))
-ROOTFS_START=$((BOOT_SIZE))
-ROOTFS_END=$((ROOTFS_START+ROOTFS_SIZE))
-SWAP_START=$((ROOTFS_END))
-
-# MMC devices use a partition prefix character 'p'
-PART_PREFIX=""
-if [ ! "${DEVICE#/dev/mmcblk}" = "${DEVICE}" ] || [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
- PART_PREFIX="p"
-fi
-BOOTFS=$DEVICE${PART_PREFIX}1
-ROOTFS=$DEVICE${PART_PREFIX}2
-SWAP=$DEVICE${PART_PREFIX}3
-
-TARGET_PART_PREFIX=""
-if [ ! "${TARGET_DEVICE#/dev/mmcblk}" = "${TARGET_DEVICE}" ]; then
- TARGET_PART_PREFIX="p"
-fi
-TARGET_ROOTFS=$TARGET_DEVICE${TARGET_PART_PREFIX}2
-TARGET_SWAP=$TARGET_DEVICE${TARGET_PART_PREFIX}3
-
-echo ""
-info "Boot partition size: $BOOT_SIZE MB ($BOOTFS)"
-info "ROOTFS partition size: $ROOTFS_SIZE MB ($ROOTFS)"
-info "Swap partition size: $SWAP_SIZE MB ($SWAP)"
-echo ""
-
-# Use MSDOS by default as GPT cannot be reliably distributed in disk image form
-# as it requires the backup table to be on the last block of the device, which
-# of course varies from device to device.
-
-info "Partitioning installation media ($DEVICE)"
-
-debug "Deleting partition table on $DEVICE"
-dd if=/dev/zero of=$DEVICE bs=512 count=2 >$OUT 2>&1 || die "Failed to zero beginning of $DEVICE"
-
-debug "Creating new partition table (MSDOS) on $DEVICE"
-parted -s $DEVICE mklabel msdos >$OUT 2>&1 || die "Failed to create MSDOS partition table"
-
-debug "Creating boot partition on $BOOTFS"
-parted -s $DEVICE mkpart primary 0% $BOOT_SIZE >$OUT 2>&1 || die "Failed to create BOOT partition"
-
-debug "Enabling boot flag on $BOOTFS"
-parted -s $DEVICE set 1 boot on >$OUT 2>&1 || die "Failed to enable boot flag"
-
-debug "Creating ROOTFS partition on $ROOTFS"
-parted -s $DEVICE mkpart primary $ROOTFS_START $ROOTFS_END >$OUT 2>&1 || die "Failed to create ROOTFS partition"
-
-debug "Creating swap partition on $SWAP"
-parted -s $DEVICE mkpart primary $SWAP_START 100% >$OUT 2>&1 || die "Failed to create SWAP partition"
-
-if [ $DEBUG -eq 1 ]; then
- parted -s $DEVICE print
-fi
-
-
-#
-# Check if any $DEVICE partitions are mounted after partitioning
-#
-unmount_device || die "Failed to unmount $DEVICE partitions"
-
-
-#
-# Format $DEVICE partitions
-#
-info "Formatting partitions"
-debug "Formatting $BOOTFS as vfat"
-if [ ! "${DEVICE#/dev/loop}" = "${DEVICE}" ]; then
- mkfs.vfat -I $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
-else
- mkfs.vfat $BOOTFS -n "EFI" >$OUT 2>&1 || die "Failed to format $BOOTFS"
-fi
-
-debug "Formatting $ROOTFS as ext3"
-mkfs.ext3 -F $ROOTFS -L "ROOT" >$OUT 2>&1 || die "Failed to format $ROOTFS"
-
-debug "Formatting swap partition ($SWAP)"
-mkswap $SWAP >$OUT 2>&1 || die "Failed to prepare swap"
-
-
-#
-# Installing to $DEVICE
-#
-debug "Mounting images and device in preparation for installation"
-mount -o ro,loop $HDDIMG $HDDIMG_MNT >$OUT 2>&1 || error "Failed to mount $HDDIMG"
-mount -o ro,loop $HDDIMG_MNT/rootfs.img $HDDIMG_ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount rootfs.img"
-mount $ROOTFS $ROOTFS_MNT >$OUT 2>&1 || error "Failed to mount $ROOTFS on $ROOTFS_MNT"
-mount $BOOTFS $BOOTFS_MNT >$OUT 2>&1 || error "Failed to mount $BOOTFS on $BOOTFS_MNT"
-
-info "Preparing boot partition"
-EFIDIR="$BOOTFS_MNT/EFI/BOOT"
-cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz"
-# Copy the efi loader and configs (booti*.efi and grub.cfg if it exists)
-cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir"
-# Silently ignore a missing systemd-boot loader dir (we might just be a GRUB image)
-cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1
-
-# Update the boot loaders configurations for an installed image
-# Remove any existing root= kernel parameters and:
-# o Add a root= parameter with the target rootfs
-# o Specify ro so fsck can be run during boot
-# o Specify rootwait in case the target media is an asyncronous block device
-# such as MMC or USB disks
-# o Specify "quiet" to minimize boot time when using slow serial consoles
-
-# Look for a GRUB installation
-GRUB_CFG="$EFIDIR/grub.cfg"
-if [ -e "$GRUB_CFG" ]; then
- info "Configuring GRUB"
- # Delete the install entry
- sed -i "/menuentry 'install'/,/^}/d" $GRUB_CFG
- # Delete the initrd lines
- sed -i "/initrd /d" $GRUB_CFG
- # Delete any LABEL= strings
- sed -i "s/ LABEL=[^ ]*/ /" $GRUB_CFG
-
- sed -i "s@ root=[^ ]*@ @" $GRUB_CFG
- sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GRUB_CFG
-fi
-
-# Look for a systemd-boot installation
-SYSTEMD_BOOT_ENTRIES="$BOOTFS_MNT/loader/entries"
-SYSTEMD_BOOT_CFG="$SYSTEMD_BOOT_ENTRIES/boot.conf"
-if [ -d "$SYSTEMD_BOOT_ENTRIES" ]; then
- info "Configuring SystemD-boot"
- # remove the install target if it exists
- rm $SYSTEMD_BOOT_ENTRIES/install.conf >$OUT 2>&1
-
- if [ ! -e "$SYSTEMD_BOOT_CFG" ]; then
- echo "ERROR: $SYSTEMD_BOOT_CFG not found"
- fi
-
- sed -i "/initrd /d" $SYSTEMD_BOOT_CFG
- sed -i "s@ root=[^ ]*@ @" $SYSTEMD_BOOT_CFG
- sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $SYSTEMD_BOOT_CFG
-fi
-
-# Ensure we have at least one EFI bootloader configured
-if [ ! -e $GRUB_CFG ] && [ ! -e $SYSTEMD_BOOT_CFG ]; then
- die "No EFI bootloader configuration found"
-fi
-
-
-info "Copying ROOTFS files (this may take a while)"
-cp -a $HDDIMG_ROOTFS_MNT/* $ROOTFS_MNT >$OUT 2>&1 || die "Root FS copy failed"
-
-echo "$TARGET_SWAP swap swap defaults 0 0" >> $ROOTFS_MNT/etc/fstab
-
-# We dont want udev to mount our root device while we're booting...
-if [ -d $ROOTFS_MNT/etc/udev/ ] ; then
- echo "$TARGET_DEVICE" >> $ROOTFS_MNT/etc/udev/mount.blacklist
-fi
-
-# Add startup.nsh script for automated boot
-printf "fs0:\%s\BOOT\%s\n" "EFI" "bootx64.efi" > $BOOTFS_MNT/startup.nsh
-
-
-# Call cleanup to unmount devices and images and remove the TMPDIR
-cleanup
-
-echo ""
-if [ $WARNINGS -ne 0 ] && [ $ERRORS -eq 0 ]; then
- echo "${YELLOW}Installation completed with warnings${CLEAR}"
- echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
-elif [ $ERRORS -ne 0 ]; then
- echo "${RED}Installation encountered errors${CLEAR}"
- echo "${RED}Errors: $ERRORS${CLEAR}"
- echo "${YELLOW}Warnings: $WARNINGS${CLEAR}"
-else
- success "Installation completed successfully"
-fi
-echo ""
diff --git a/scripts/contrib/oe-build-perf-report-email.py b/scripts/contrib/oe-build-perf-report-email.py
index 913847bbed..7192113c28 100755
--- a/scripts/contrib/oe-build-perf-report-email.py
+++ b/scripts/contrib/oe-build-perf-report-email.py
@@ -4,15 +4,9 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import base64
import logging
@@ -25,8 +19,6 @@ import socket
import subprocess
import sys
import tempfile
-from email.mime.image import MIMEImage
-from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
@@ -35,30 +27,6 @@ logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-# Find js scaper script
-SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
- 'scrape-html-report.js')
-if not os.path.isfile(SCRAPE_JS):
- log.error("Unableto find oe-build-perf-report-scrape.js")
- sys.exit(1)
-
-
-class ReportError(Exception):
- """Local errors"""
- pass
-
-
-def check_utils():
- """Check that all needed utils are installed in the system"""
- missing = []
- for cmd in ('phantomjs', 'optipng'):
- if not shutil.which(cmd):
- missing.append(cmd)
- if missing:
- log.error("The following tools are missing: %s", ' '.join(missing))
- sys.exit(1)
-
-
def parse_args(argv):
"""Parse command line arguments"""
description = """Email build perf test report"""
@@ -83,137 +51,19 @@ def parse_args(argv):
"the email parts")
parser.add_argument('--text',
help="Plain text message")
- parser.add_argument('--html',
- help="HTML peport generated by oe-build-perf-report")
- parser.add_argument('--phantomjs-args', action='append',
- help="Extra command line arguments passed to PhantomJS")
args = parser.parse_args(argv)
- if not args.html and not args.text:
- parser.error("Please specify --html and/or --text")
+ if not args.text:
+ parser.error("Please specify --text")
return args
-def decode_png(infile, outfile):
- """Parse/decode/optimize png data from a html element"""
- with open(infile) as f:
- raw_data = f.read()
-
- # Grab raw base64 data
- b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
- b64_data = re.sub('">.+$', '', b64_data, 1)
-
- # Replace file with proper decoded png
- with open(outfile, 'wb') as f:
- f.write(base64.b64decode(b64_data))
-
- subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
-
-
-def mangle_html_report(infile, outfile, pngs):
- """Mangle html file into a email compatible format"""
- paste = True
- png_dir = os.path.dirname(outfile)
- with open(infile) as f_in:
- with open(outfile, 'w') as f_out:
- for line in f_in.readlines():
- stripped = line.strip()
- # Strip out scripts
- if stripped == '<!--START-OF-SCRIPTS-->':
- paste = False
- elif stripped == '<!--END-OF-SCRIPTS-->':
- paste = True
- elif paste:
- if re.match('^.+href="data:image/png;base64', stripped):
- # Strip out encoded pngs (as they're huge in size)
- continue
- elif 'www.gstatic.com' in stripped:
- # HACK: drop references to external static pages
- continue
-
- # Replace charts with <img> elements
- match = re.match('<div id="(?P<id>\w+)"', stripped)
- if match and match.group('id') in pngs:
- f_out.write('<img src="cid:{}"\n'.format(match.group('id')))
- else:
- f_out.write(line)
-
-
-def scrape_html_report(report, outdir, phantomjs_extra_args=None):
- """Scrape html report into a format sendable by email"""
- tmpdir = tempfile.mkdtemp(dir='.')
- log.debug("Using tmpdir %s for phantomjs output", tmpdir)
-
- if not os.path.isdir(outdir):
- os.mkdir(outdir)
- if os.path.splitext(report)[1] not in ('.html', '.htm'):
- raise ReportError("Invalid file extension for report, needs to be "
- "'.html' or '.htm'")
-
- try:
- log.info("Scraping HTML report with PhangomJS")
- extra_args = phantomjs_extra_args if phantomjs_extra_args else []
- subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
- [SCRAPE_JS, report, tmpdir],
- stderr=subprocess.STDOUT)
-
- pngs = []
- images = []
- for fname in os.listdir(tmpdir):
- base, ext = os.path.splitext(fname)
- if ext == '.png':
- log.debug("Decoding %s", fname)
- decode_png(os.path.join(tmpdir, fname),
- os.path.join(outdir, fname))
- pngs.append(base)
- images.append(fname)
- elif ext in ('.html', '.htm'):
- report_file = fname
- else:
- log.warning("Unknown file extension: '%s'", ext)
- #shutil.move(os.path.join(tmpdir, fname), outdir)
-
- log.debug("Mangling html report file %s", report_file)
- mangle_html_report(os.path.join(tmpdir, report_file),
- os.path.join(outdir, report_file), pngs)
- return (os.path.join(outdir, report_file),
- [os.path.join(outdir, i) for i in images])
- finally:
- shutil.rmtree(tmpdir)
-
-def send_email(text_fn, html_fn, image_fns, subject, recipients, copy=[],
- blind_copy=[]):
- """Send email"""
+def send_email(text_fn, subject, recipients, copy=[], blind_copy=[]):
# Generate email message
- text_msg = html_msg = None
- if text_fn:
- with open(text_fn) as f:
- text_msg = MIMEText("Yocto build performance test report.\n" +
- f.read(), 'plain')
- if html_fn:
- html_msg = msg = MIMEMultipart('related')
- with open(html_fn) as f:
- html_msg.attach(MIMEText(f.read(), 'html'))
- for img_fn in image_fns:
- # Expect that content id is same as the filename
- cid = os.path.splitext(os.path.basename(img_fn))[0]
- with open(img_fn, 'rb') as f:
- image_msg = MIMEImage(f.read())
- image_msg['Content-ID'] = '<{}>'.format(cid)
- html_msg.attach(image_msg)
-
- if text_msg and html_msg:
- msg = MIMEMultipart('alternative')
- msg.attach(text_msg)
- msg.attach(html_msg)
- elif text_msg:
- msg = text_msg
- elif html_msg:
- msg = html_msg
- else:
- raise ReportError("Neither plain text nor html body specified")
+ with open(text_fn) as f:
+ msg = MIMEText("Yocto build performance test report.\n" + f.read(), 'plain')
pw_data = pwd.getpwuid(os.getuid())
full_name = pw_data.pw_gecos.split(',')[0]
@@ -240,8 +90,6 @@ def main(argv=None):
if args.debug:
log.setLevel(logging.DEBUG)
- check_utils()
-
if args.outdir:
outdir = args.outdir
if not os.path.exists(outdir):
@@ -251,25 +99,16 @@ def main(argv=None):
try:
log.debug("Storing email parts in %s", outdir)
- html_report = images = None
- if args.html:
- html_report, images = scrape_html_report(args.html, outdir,
- args.phantomjs_args)
-
if args.to:
log.info("Sending email to %s", ', '.join(args.to))
if args.cc:
log.info("Copying to %s", ', '.join(args.cc))
if args.bcc:
log.info("Blind copying to %s", ', '.join(args.bcc))
- send_email(args.text, html_report, images, args.subject,
- args.to, args.cc, args.bcc)
+ send_email(args.text, args.subject, args.to, args.cc, args.bcc)
except subprocess.CalledProcessError as err:
log.error("%s, with output:\n%s", str(err), err.output.decode())
return 1
- except ReportError as err:
- log.error(err)
- return 1
finally:
if not args.outdir:
log.debug("Wiping %s", outdir)
diff --git a/scripts/contrib/patchreview.py b/scripts/contrib/patchreview.py
index 1086c95f67..bceae06561 100755
--- a/scripts/contrib/patchreview.py
+++ b/scripts/contrib/patchreview.py
@@ -1,10 +1,25 @@
#! /usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import collections
+import json
+import os
+import os.path
+import pathlib
+import re
+import subprocess
# TODO
# - option to just list all broken files
# - test suite
# - validate signed-off-by
+status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied", "inactive-upstream")
class Result:
# Whether the patch has an Upstream-Status or not
@@ -29,27 +44,25 @@ def blame_patch(patch):
From a patch filename, return a list of "commit summary (author name <author
email>)" strings representing the history.
"""
- import subprocess
return subprocess.check_output(("git", "log",
"--follow", "--find-renames", "--diff-filter=A",
"--format=%s (%aN <%aE>)",
"--", patch)).decode("utf-8").splitlines()
def patchreview(patches):
- import re
# General pattern: start of line, optional whitespace, tag with optional
# hyphen or spaces, maybe a colon, some whitespace, then the value, all case
# insensitive.
sob_re = re.compile(r"^[\t ]*(Signed[-_ ]off[-_ ]by:?)[\t ]*(.+)", re.IGNORECASE | re.MULTILINE)
- status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*(\w*)", re.IGNORECASE | re.MULTILINE)
- status_values = ("accepted", "pending", "inappropriate", "backport", "submitted", "denied")
+ status_re = re.compile(r"^[\t ]*(Upstream[-_ ]Status:?)[\t ]*([\w-]*)", re.IGNORECASE | re.MULTILINE)
cve_tag_re = re.compile(r"^[\t ]*(CVE:)[\t ]*(.*)", re.IGNORECASE | re.MULTILINE)
cve_re = re.compile(r"cve-[0-9]{4}-[0-9]{4,6}", re.IGNORECASE)
results = {}
for patch in patches:
+
result = Result()
results[patch] = result
@@ -122,6 +135,8 @@ def analyse(results, want_blame=False, verbose=True):
missing_status += 1
if r.malformed_upstream_status or r.unknown_upstream_status:
malformed_status += 1
+ # Count patches with no status as pending
+ pending_patches +=1
if r.missing_cve:
missing_cve += 1
if r.upstream_status == "pending":
@@ -132,7 +147,6 @@ def analyse(results, want_blame=False, verbose=True):
need_blame = True
if verbose:
print("Missing Signed-off-by tag (%s)" % patch)
-
if r.malformed_sob:
need_blame = True
if verbose:
@@ -185,27 +199,79 @@ Patches in Pending state: %s""" % (total_patches,
def histogram(results):
from toolz import recipes, dicttoolz
import math
+
counts = recipes.countby(lambda r: r.upstream_status, results.values())
bars = dicttoolz.valmap(lambda v: "#" * int(math.ceil(float(v) / len(results) * 100)), counts)
for k in bars:
print("%-20s %s (%d)" % (k.capitalize() if k else "No status", bars[k], counts[k]))
+def find_layers(candidate):
+ # candidate can either be the path to a layer directly (eg meta-intel), or a
+ # repository that contains other layers (meta-arm). We can determine what by
+ # looking for a conf/layer.conf file. If that file exists then it's a layer,
+ # otherwise its a repository of layers and we can assume they're called
+ # meta-*.
+
+ if (candidate / "conf" / "layer.conf").exists():
+ return [candidate.absolute()]
+ else:
+ return [d.absolute() for d in candidate.iterdir() if d.is_dir() and (d.name == "meta" or d.name.startswith("meta-"))]
+
+# TODO these don't actually handle dynamic-layers/
+
+def gather_patches(layers):
+ patches = []
+ for directory in layers:
+ filenames = subprocess.check_output(("git", "-C", directory, "ls-files", "recipes-*/**/*.patch", "recipes-*/**/*.diff"), universal_newlines=True).split()
+ patches += [os.path.join(directory, f) for f in filenames]
+ return patches
+
+def count_recipes(layers):
+ count = 0
+ for directory in layers:
+ output = subprocess.check_output(["git", "-C", directory, "ls-files", "recipes-*/**/*.bb"], universal_newlines=True)
+ count += len(output.splitlines())
+ return count
if __name__ == "__main__":
- import argparse, subprocess, os
-
args = argparse.ArgumentParser(description="Patch Review Tool")
args.add_argument("-b", "--blame", action="store_true", help="show blame for malformed patches")
args.add_argument("-v", "--verbose", action="store_true", help="show per-patch results")
args.add_argument("-g", "--histogram", action="store_true", help="show patch histogram")
- args.add_argument("directory", nargs="?", help="directory to scan")
+ args.add_argument("-j", "--json", help="update JSON")
+ args.add_argument("directory", type=pathlib.Path, metavar="DIRECTORY", help="directory to scan (layer, or repository of layers)")
args = args.parse_args()
- if args.directory:
- os.chdir(args.directory)
- patches = subprocess.check_output(("git", "ls-files", "*.patch", "*.diff")).decode("utf-8").split()
+ layers = find_layers(args.directory)
+ print(f"Found layers {' '.join((d.name for d in layers))}")
+ patches = gather_patches(layers)
results = patchreview(patches)
analyse(results, want_blame=args.blame, verbose=args.verbose)
+
+ if args.json:
+ if os.path.isfile(args.json):
+ data = json.load(open(args.json))
+ else:
+ data = []
+
+ row = collections.Counter()
+ row["total"] = len(results)
+ row["date"] = subprocess.check_output(["git", "-C", args.directory, "show", "-s", "--pretty=format:%cd", "--date=format:%s"], universal_newlines=True).strip()
+ row["commit"] = subprocess.check_output(["git", "-C", args.directory, "rev-parse", "HEAD"], universal_newlines=True).strip()
+ row['commit_count'] = subprocess.check_output(["git", "-C", args.directory, "rev-list", "--count", "HEAD"], universal_newlines=True).strip()
+ row['recipe_count'] = count_recipes(layers)
+
+ for r in results.values():
+ if r.upstream_status in status_values:
+ row[r.upstream_status] += 1
+ if r.malformed_upstream_status or r.missing_upstream_status:
+ row['malformed-upstream-status'] += 1
+ if r.malformed_sob or r.missing_sob:
+ row['malformed-sob'] += 1
+
+ data.append(row)
+ json.dump(data, open(args.json, "w"), sort_keys=True, indent="\t")
+
if args.histogram:
print()
histogram(results)
diff --git a/scripts/contrib/patchtest.sh b/scripts/contrib/patchtest.sh
index 7fe566666e..b1e1ea334b 100755
--- a/scripts/contrib/patchtest.sh
+++ b/scripts/contrib/patchtest.sh
@@ -1,26 +1,12 @@
#!/bin/bash
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# patchtest: Run patchtest on commits starting at master
#
# Copyright (c) 2017, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
+
set -o errexit
# Default values
diff --git a/scripts/contrib/serdevtry b/scripts/contrib/serdevtry
index 74bd7b7161..9144730e7e 100755
--- a/scripts/contrib/serdevtry
+++ b/scripts/contrib/serdevtry
@@ -2,7 +2,8 @@
# Copyright (C) 2014 Intel Corporation
#
-# Released under the MIT license (see COPYING.MIT)
+# SPDX-License-Identifier: MIT
+#
if [ "$1" = "" -o "$1" = "--help" ] ; then
echo "Usage: $0 <serial terminal command>"
diff --git a/scripts/contrib/test_build_time.sh b/scripts/contrib/test_build_time.sh
index 9e5725ae54..4012ac7ba7 100755
--- a/scripts/contrib/test_build_time.sh
+++ b/scripts/contrib/test_build_time.sh
@@ -3,22 +3,8 @@
# Build performance regression test script
#
# Copyright 2011 Intel Corporation
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to be used in conjunction with "git bisect run"
@@ -111,7 +97,7 @@ if [ $? != 0 ] ; then
exit 251
fi
-if [ "$BB_ENV_EXTRAWHITE" != "" ] ; then
+if [ "BB_ENV_PASSTHROUGH_ADDITIONS" != "" ] ; then
echo "WARNING: you are running after sourcing the build environment script, this is not recommended"
fi
diff --git a/scripts/contrib/test_build_time_worker.sh b/scripts/contrib/test_build_time_worker.sh
index 8e20a9ea7d..a2879d2336 100755
--- a/scripts/contrib/test_build_time_worker.sh
+++ b/scripts/contrib/test_build_time_worker.sh
@@ -1,5 +1,9 @@
#!/bin/bash
-
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This is an example script to be used in conjunction with test_build_time.sh
if [ "$TEST_BUILDDIR" = "" ] ; then
diff --git a/scripts/contrib/uncovered b/scripts/contrib/uncovered
index a8399ad170..f16128cb7a 100755
--- a/scripts/contrib/uncovered
+++ b/scripts/contrib/uncovered
@@ -1,23 +1,10 @@
#!/bin/bash -eur
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Find python modules uncovered by oe-seltest
#
# Copyright (c) 2016, Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# Author: Ed Bartosh <ed.bartosh@linux.intel.com>
#
diff --git a/scripts/contrib/verify-homepage.py b/scripts/contrib/verify-homepage.py
index 76f1749cfa..a90b5010bc 100755
--- a/scripts/contrib/verify-homepage.py
+++ b/scripts/contrib/verify-homepage.py
@@ -1,5 +1,9 @@
#!/usr/bin/env python3
-
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# This script can be used to verify HOMEPAGE values for all recipes in
# the current configuration.
# The result is influenced by network environment, since the timeout of connect url is 5 seconds as default.
@@ -27,7 +31,7 @@ logger = scriptutils.logger_create('verify_homepage')
def wgetHomepage(pn, homepage):
result = subprocess.call('wget ' + '-q -T 5 -t 1 --spider ' + homepage, shell = True)
if result:
- logger.warn("%s: failed to verify HOMEPAGE: %s " % (pn, homepage))
+ logger.warning("%s: failed to verify HOMEPAGE: %s " % (pn, homepage))
return 1
else:
return 0
diff --git a/scripts/cp-noerror b/scripts/cp-noerror
index 35eb211be3..13a098eee0 100755
--- a/scripts/cp-noerror
+++ b/scripts/cp-noerror
@@ -1,5 +1,9 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Allow copying of $1 to $2 but if files in $1 disappear during the copy operation,
# don't error.
# Also don't error if $1 disappears.
diff --git a/scripts/create-pull-request b/scripts/create-pull-request
index 280880b3f7..885105fab3 100755
--- a/scripts/create-pull-request
+++ b/scripts/create-pull-request
@@ -1,21 +1,8 @@
#!/bin/sh
#
# Copyright (c) 2010-2013, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -136,20 +123,12 @@ fi
# Rewrite private URLs to public URLs
# Determine the repository name for use in the WEB_URL later
-case "$REMOTE_URL" in
-*@*)
- USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
- PROTO_RE="[a-z][a-z+]*://"
- GIT_RE="\(^\($PROTO_RE\)\?$USER_RE@\)\([^:/]*\)[:/]\(.*\)"
- REMOTE_URL=${REMOTE_URL%.git}
- REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\4#")
- REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#git://\3/\4#")
- ;;
-*)
- echo "WARNING: Unrecognized remote URL: $REMOTE_URL"
- echo " The pull and browse URLs will likely be incorrect"
- ;;
-esac
+USER_RE="[A-Za-z0-9_.@][A-Za-z0-9_.@-]*\$\?"
+PROTO_RE="[a-z][a-z+]*://"
+GIT_RE="\(^\($PROTO_RE\)\?\)\($USER_RE@\)\?\([^:/]*\)[:/]\(.*\)"
+REMOTE_URL=${REMOTE_URL%.git}
+REMOTE_REPO=$(echo $REMOTE_URL | sed "s#$GIT_RE#\5#")
+REMOTE_URL=$(echo $REMOTE_URL | sed "s#$GIT_RE#https://\4/\5#")
if [ -z "$BRANCH" ]; then
BRANCH=$(git branch | grep -e "^\* " | cut -d' ' -f2)
@@ -170,13 +149,10 @@ fi
WEB_URL=""
case "$REMOTE_URL" in
*git.yoctoproject.org*)
- WEB_URL="http://git.yoctoproject.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
- ;;
- *git.pokylinux.org*)
- WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.yoctoproject.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*git.openembedded.org*)
- WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
+ WEB_URL="https://git.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
;;
*github.com*)
WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
@@ -278,7 +254,7 @@ fi
# Replace the SUBJECT token with it.
if [ -n "$SUBJECT" ]; then
- sed -i -e "s/\*\*\* SUBJECT HERE \*\*\*/$SUBJECT/" "$CL"
+ sed -i -e "s\`\*\*\* SUBJECT HERE \*\*\*\`$SUBJECT\`" "$CL"
fi
diff --git a/scripts/cross-intercept/ar b/scripts/cross-intercept/ar
new file mode 120000
index 0000000000..bc68ffd7a2
--- /dev/null
+++ b/scripts/cross-intercept/ar
@@ -0,0 +1 @@
+../native-intercept/ar \ No newline at end of file
diff --git a/scripts/crosstap b/scripts/crosstap
index e33fa4ad46..5aa72f14d4 100755
--- a/scripts/crosstap
+++ b/scripts/crosstap
@@ -17,20 +17,9 @@
# related to kernel.
#
# Copyright (c) 2018, Cisco Systems.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import re
@@ -364,7 +353,7 @@ bitbake workspace.
Anything after -- option is passed directly to stap.
-Legacy script invocation style supported but depreciated:
+Legacy script invocation style supported but deprecated:
%prog <user@hostname> <sytemtap-script> [systemtap options]
To enable most out of systemtap the following site.conf or local.conf
@@ -376,13 +365,13 @@ IMAGE_FSTYPES_DEBUGFS = "tar.bz2"
USER_CLASSES += "image-combined-dbg"
# enables kernel debug symbols
-KERNEL_EXTRA_FEATURES_append = " features/debug/debug-kernel.scc"
+KERNEL_EXTRA_FEATURES:append = " features/debug/debug-kernel.scc"
# minimal, just run-time systemtap configuration in target image
-PACKAGECONFIG_pn-systemtap = "monitor"
+PACKAGECONFIG:pn-systemtap = "monitor"
# add systemtap run-time into target image if it is not there yet
-IMAGE_INSTALL_append = " systemtap"
+IMAGE_INSTALL:append = " systemtap"
"""
option_parser = optparse.OptionParser(usage=usage)
diff --git a/scripts/devtool b/scripts/devtool
index d681a1929a..60ea3e8298 100755
--- a/scripts/devtool
+++ b/scripts/devtool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -51,7 +41,7 @@ class ConfigHandler(object):
def __init__(self, filename):
self.config_file = filename
- self.config_obj = configparser.SafeConfigParser()
+ self.config_obj = configparser.ConfigParser()
def get(self, section, option, default=None):
try:
@@ -110,10 +100,11 @@ def read_workspace():
_enable_workspace_layer(config.workspace_path, config, basepath)
logger.debug('Reading workspace in %s' % config.workspace_path)
- externalsrc_re = re.compile(r'^EXTERNALSRC(_pn-([^ =]+))? *= *"([^"]*)"$')
+ externalsrc_re = re.compile(r'^EXTERNALSRC(:pn-([^ =]+))? *= *"([^"]*)"$')
for fn in glob.glob(os.path.join(config.workspace_path, 'appends', '*.bbappend')):
with open(fn, 'r') as f:
pnvalues = {}
+ pn = None
for line in f:
res = externalsrc_re.match(line.rstrip())
if res:
@@ -133,6 +124,9 @@ def read_workspace():
elif line.startswith('# srctreebase: '):
pnvalues['srctreebase'] = line.split(':', 1)[1].strip()
if pnvalues:
+ if not pn:
+ raise DevtoolError("Found *.bbappend in %s, but could not determine EXTERNALSRC:pn-*. "
+ "Maybe still using old syntax?" % config.workspace_path)
if not pnvalues.get('srctreebase', None):
pnvalues['srctreebase'] = pnvalues['srctree']
logger.debug('Found recipe %s' % pnvalues)
@@ -143,17 +137,27 @@ def create_workspace(args, config, basepath, workspace):
workspacedir = os.path.abspath(args.layerpath)
else:
workspacedir = os.path.abspath(os.path.join(basepath, 'workspace'))
- _create_workspace(workspacedir, config, basepath)
+ layerseries = None
+ if args.layerseries:
+ layerseries = args.layerseries
+ _create_workspace(workspacedir, config, basepath, layerseries)
if not args.create_only:
_enable_workspace_layer(workspacedir, config, basepath)
-def _create_workspace(workspacedir, config, basepath):
+def _create_workspace(workspacedir, config, basepath, layerseries=None):
import bb
confdir = os.path.join(workspacedir, 'conf')
if os.path.exists(os.path.join(confdir, 'layer.conf')):
logger.info('Specified workspace already set up, leaving as-is')
else:
+ if not layerseries:
+ tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+ try:
+ layerseries = tinfoil.config_data.getVar('LAYERSERIES_CORENAMES')
+ finally:
+ tinfoil.shutdown()
+
# Add a config file
bb.utils.mkdirhier(confdir)
with open(os.path.join(confdir, 'layer.conf'), 'w') as f:
@@ -165,7 +169,7 @@ def _create_workspace(workspacedir, config, basepath):
f.write('BBFILE_PATTERN_workspacelayer = "^$' + '{LAYERDIR}/"\n')
f.write('BBFILE_PATTERN_IGNORE_EMPTY_workspacelayer = "1"\n')
f.write('BBFILE_PRIORITY_workspacelayer = "99"\n')
- f.write('LAYERSERIES_COMPAT_workspacelayer = "${LAYERSERIES_COMPAT_core}"\n')
+ f.write('LAYERSERIES_COMPAT_workspacelayer = "%s"\n' % layerseries)
# Add a README file
with open(os.path.join(workspacedir, 'README'), 'w') as f:
f.write('This layer was created by the OpenEmbedded devtool utility in order to\n')
@@ -295,8 +299,9 @@ def main():
return 2
# Search BBPATH first to allow layers to override plugins in scripts_path
- for path in global_args.bbpath.split(':') + [scripts_path]:
- pluginpath = os.path.join(path, 'lib', 'devtool')
+ pluginpaths = [os.path.join(path, 'lib', 'devtool') for path in global_args.bbpath.split(':') + [scripts_path]]
+ context.pluginpaths = pluginpaths
+ for pluginpath in pluginpaths:
scriptutils.load_plugins(logger, plugins, pluginpath)
subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
@@ -315,6 +320,7 @@ def main():
description='Sets up a new workspace. NOTE: other devtool subcommands will create a workspace automatically as needed, so you only need to use %(prog)s if you want to specify where the workspace should be located.',
group='advanced')
parser_create_workspace.add_argument('layerpath', nargs='?', help='Path in which the workspace layer should be created')
+ parser_create_workspace.add_argument('--layerseries', help='Layer series the workspace should be set to be compatible with')
parser_create_workspace.add_argument('--create-only', action="store_true", help='Only create the workspace layer, do not alter configuration')
parser_create_workspace.set_defaults(func=create_workspace, no_workspace=True)
@@ -324,10 +330,10 @@ def main():
args = parser.parse_args(unparsed_args, namespace=global_args)
- if not getattr(args, 'no_workspace', False):
- read_workspace()
-
try:
+ if not getattr(args, 'no_workspace', False):
+ read_workspace()
+
ret = args.func(args, config, basepath, workspace)
except DevtoolError as err:
if str(err):
diff --git a/scripts/distro/build-recipe-list.py b/scripts/distro/build-recipe-list.py
deleted file mode 100755
index 2162764850..0000000000
--- a/scripts/distro/build-recipe-list.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (c) 2017, Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-
-import os
-import shutil
-import csv
-import sys
-import argparse
-
-__version__ = "0.1.0"
-
-# set of BPNs
-recipenames = set()
-# map of recipe -> data
-allrecipes = {}
-
-def make_bpn(recipe):
- prefixes = ("nativesdk-",)
- suffixes = ("-native", "-cross", "-initial", "-intermediate", "-crosssdk", "-cross-canadian")
- for ix in prefixes + suffixes:
- if ix in recipe:
- recipe = recipe.replace(ix, "")
- return recipe
-
-def gather_recipes(rows):
- for row in rows:
- recipe = row[0]
- bpn = make_bpn(recipe)
- if bpn not in recipenames:
- recipenames.add(bpn)
- if recipe not in allrecipes:
- allrecipes[recipe] = row
-
-def generate_recipe_list():
- # machine list
- machine_list = ( "qemuarm64", "qemuarm", "qemumips64", "qemumips", "qemuppc", "qemux86-64", "qemux86" )
- # set filename format
- fnformat = 'distrodata.%s.csv'
-
- # store all data files in distrodata
- datadir = 'distrodata'
-
- # create the directory if it does not exists
- if not os.path.exists(datadir):
- os.mkdir(datadir)
-
- # doing bitbake distrodata
- for machine in machine_list:
- os.system('MACHINE='+ machine + ' bitbake -k universe -c distrodata')
- shutil.copy('tmp/log/distrodata.csv', 'distrodata/' + fnformat % machine)
-
- for machine in machine_list:
- with open('distrodata/' + fnformat % machine) as f:
- reader = csv.reader(f)
- rows = reader.__iter__()
- gather_recipes(rows)
-
- with open('recipe-list.txt', 'w') as f:
- for recipe in sorted(recipenames):
- f.write("%s\n" % recipe)
- print("file : recipe-list.txt is created with %d entries." % len(recipenames))
-
- with open('all-recipe-list.txt', 'w') as f:
- for recipe, row in sorted(allrecipes.items()):
- f.write("%s\n" % ','.join(row))
-
-
-def diff_for_new_recipes(recipe1, recipe2):
- prev_recipe_path = recipe1 + '/'
- curr_recipe_path = recipe2 + '/'
- if not os.path.isfile(prev_recipe_path + 'recipe-list.txt') or not os.path.isfile(curr_recipe_path + 'recipe-list.txt'):
- print("recipe files do not exists. please verify that the file exists.")
- exit(1)
-
- import csv
-
- prev = []
- new = []
-
- with open(prev_recipe_path + 'recipe-list.txt') as f:
- prev = f.readlines()
-
- with open(curr_recipe_path + 'recipe-list.txt') as f:
- new = f.readlines()
-
- updates = []
- for pn in new:
- if not pn in prev:
- updates.append(pn.rstrip())
-
- allrecipe = []
- with open(recipe1 + '_' + recipe2 + '_new_recipe_list.txt','w') as dr:
- with open(curr_recipe_path + 'all-recipe-list.txt') as f:
- reader = csv.reader(f, delimiter=',')
- for row in reader:
- if row[0] in updates:
- dr.write("%s,%s,%s" % (row[0], row[3], row[5]))
- if len(row[9:]) > 0:
- dr.write(",%s" % ','.join(row[9:]))
- dr.write("\n")
-
-def main(argv):
- if argv[0] == "generate_recipe_list":
- generate_recipe_list()
- elif argv[0] == "compare_recipe":
- diff_for_new_recipes(argv[1], argv[2])
- else:
- print("no such option. choose either 'generate_recipe_list' or 'compare_recipe'")
-
- exit(0)
-
-if __name__ == "__main__":
- try:
- sys.exit(main(sys.argv[1:]))
- except Exception as e:
- print("Exception :", e)
- sys.exit(1)
-
diff --git a/scripts/distro/distrocompare.sh b/scripts/distro/distrocompare.sh
deleted file mode 100755
index 908760c235..0000000000
--- a/scripts/distro/distrocompare.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2017, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# distrocompare.sh : provides capability to get a list of new packages
-# based on two distinct branches. This script takes
-# 2 parameters; either a commit-ish or a branch name
-#
-# To run : distrocompare.sh <older hash> <newer hash>
-# E.g. distrocompare.sh morty 92aa0e7
-# E.g. distrocompare.sh morty pyro
-#
-
-# get input as version
-previous_version=$1
-current_version=$2
-
-# set previous and current version
-if [ -z "$2" ]; then
- previous_version=$1
- current_version="current"
-fi
-
-# get script location. That's where the source supposedly located as well.
-scriptdir="$( realpath $(dirname "${BASH_SOURCE[0]}" ))"
-sourcedir="$( realpath $scriptdir/../.. )"
-
-# create working directory
-workdir=$(mktemp -d)
-
-# prepare to rollback to the branch if not similar
-branch=`cd $sourcedir; git branch | grep \* | cut -d ' ' -f2`
-
-# set current workdir to store final result
-currentworkdir=`pwd`
-
-# persists the file after local repo change
-cp $scriptdir/build-recipe-list.py $workdir
-
-#==================================================================
-
-function bake_distrodata {
- # get to source directory of the git
- cd $sourcedir
-
- # change the branch / commit. Do not change if input is current
- if [ "$1" != "current" ]; then
- output=$(git checkout $1 2>&1)
-
- # exit if git fails
- if [[ $output == *"error"* ]]; then
- echo "git error : $output"
- echo "exiting ... "
- rm -rf $workdir
- exit
- fi
- fi
-
- # make tmp as workdir
- cd $workdir
-
- # source oe-init to generate a new build folder
- source $sourcedir/oe-init-build-env $1
-
- # if file already exists with distrodata, do not append
- if ! grep -q "distrodata" "conf/local.conf"; then
- # add inherit distrodata to local.conf to enable distrodata feature
- echo 'INHERIT += "distrodata"' >> conf/local.conf
- fi
-
- # use from tmp
- $workdir/build-recipe-list.py generate_recipe_list
-}
-
-bake_distrodata $previous_version
-bake_distrodata $current_version
-
-#==================================================================
-
-cd $workdir
-
-# compare the 2 generated recipe-list.txt
-$workdir/build-recipe-list.py compare_recipe $previous_version $current_version
-
-# copy final result to current working directory
-cp $workdir/*_new_recipe_list.txt $currentworkdir
-
-if [ $? -ne 0 ]; then
- rm -rf $workdir/$previous_version
- rm -rf $workdir/$current_version
- rm $workdir/build-recipe-list.py
- # preserve the result in /tmp/distrodata if fail to copy the result over
- exit
-fi
-
-# cleanup
-rm -rf $workdir
-
-# perform rollback branch
-cd $sourcedir
-currentbranch=`git branch | grep \* | cut -d ' ' -f2`
-if [ "$currentbranch" != "$branch" ]; then
- git checkout $branch
-fi
-
-cd $currentworkdir
-
-#==================================================================
diff --git a/scripts/esdk-tools/devtool b/scripts/esdk-tools/devtool
new file mode 120000
index 0000000000..176a01ca68
--- /dev/null
+++ b/scripts/esdk-tools/devtool
@@ -0,0 +1 @@
+../devtool \ No newline at end of file
diff --git a/scripts/esdk-tools/oe-find-native-sysroot b/scripts/esdk-tools/oe-find-native-sysroot
new file mode 120000
index 0000000000..d3493f3310
--- /dev/null
+++ b/scripts/esdk-tools/oe-find-native-sysroot
@@ -0,0 +1 @@
+../oe-find-native-sysroot \ No newline at end of file
diff --git a/scripts/esdk-tools/recipetool b/scripts/esdk-tools/recipetool
new file mode 120000
index 0000000000..60a95dd936
--- /dev/null
+++ b/scripts/esdk-tools/recipetool
@@ -0,0 +1 @@
+../recipetool \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu b/scripts/esdk-tools/runqemu
new file mode 120000
index 0000000000..ae7e7ad7c2
--- /dev/null
+++ b/scripts/esdk-tools/runqemu
@@ -0,0 +1 @@
+../runqemu \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-addptable2image b/scripts/esdk-tools/runqemu-addptable2image
new file mode 120000
index 0000000000..afcd00e79d
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-addptable2image
@@ -0,0 +1 @@
+../runqemu-addptable2image \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-export-rootfs b/scripts/esdk-tools/runqemu-export-rootfs
new file mode 120000
index 0000000000..a26fcf6110
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-export-rootfs
@@ -0,0 +1 @@
+../runqemu-export-rootfs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-extract-sdk b/scripts/esdk-tools/runqemu-extract-sdk
new file mode 120000
index 0000000000..cc858aaad5
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-extract-sdk
@@ -0,0 +1 @@
+../runqemu-extract-sdk \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-gen-tapdevs b/scripts/esdk-tools/runqemu-gen-tapdevs
new file mode 120000
index 0000000000..dbdf79134c
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-gen-tapdevs
@@ -0,0 +1 @@
+../runqemu-gen-tapdevs \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifdown b/scripts/esdk-tools/runqemu-ifdown
new file mode 120000
index 0000000000..0097693ca3
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifdown
@@ -0,0 +1 @@
+../runqemu-ifdown \ No newline at end of file
diff --git a/scripts/esdk-tools/runqemu-ifup b/scripts/esdk-tools/runqemu-ifup
new file mode 120000
index 0000000000..41026d2c0a
--- /dev/null
+++ b/scripts/esdk-tools/runqemu-ifup
@@ -0,0 +1 @@
+../runqemu-ifup \ No newline at end of file
diff --git a/scripts/esdk-tools/wic b/scripts/esdk-tools/wic
new file mode 120000
index 0000000000..a9d908aa25
--- /dev/null
+++ b/scripts/esdk-tools/wic
@@ -0,0 +1 @@
+../wic \ No newline at end of file
diff --git a/scripts/gen-lockedsig-cache b/scripts/gen-lockedsig-cache
index 6765891d19..023015ec41 100755
--- a/scripts/gen-lockedsig-cache
+++ b/scripts/gen-lockedsig-cache
@@ -1,10 +1,16 @@
#!/usr/bin/env python3
+#
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
-import glob
import shutil
import errno
+import time
def mkdir(d):
try:
@@ -13,6 +19,38 @@ def mkdir(d):
if e.errno != errno.EEXIST:
raise e
+# extract the hash from past the last colon to last underscore
+def extract_sha(filename):
+ return filename.split(':')[7].split('_')[0]
+
+# get all files in a directory, extract hash and make
+# a map from hash to list of file with that hash
+def map_sha_to_files(dir_, prefix, sha_map):
+ sstate_prefix_path = dir_ + '/' + prefix + '/'
+ if not os.path.exists(sstate_prefix_path):
+ return
+ sstate_files = os.listdir(sstate_prefix_path)
+ for f in sstate_files:
+ try:
+ sha = extract_sha(f)
+ if sha not in sha_map:
+ sha_map[sha] = []
+ sha_map[sha].append(sstate_prefix_path + f)
+ except IndexError:
+ continue
+
+# given a prefix build a map of hash to list of files
+def build_sha_cache(prefix):
+ sha_map = {}
+
+ sstate_dir = sys.argv[2]
+ map_sha_to_files(sstate_dir, prefix, sha_map)
+
+ native_sstate_dir = sys.argv[2] + '/' + sys.argv[4]
+ map_sha_to_files(native_sstate_dir, prefix, sha_map)
+
+ return sha_map
+
if len(sys.argv) < 5:
print("Incorrect number of arguments specified")
print("syntax: gen-lockedsig-cache <locked-sigs.inc> <input-cachedir> <output-cachedir> <nativelsbstring> [filterfile]")
@@ -38,18 +76,28 @@ with open(sys.argv[1]) as f:
sigs.append(sig)
print('Gathering file list')
+start_time = time.perf_counter()
files = set()
+sstate_content_cache = {}
for s in sigs:
- p = sys.argv[2] + "/" + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
- p = sys.argv[2] + "/%s/" % sys.argv[4] + s[:2] + "/*" + s + "*"
- files |= set(glob.glob(p))
+ prefix = s[:2]
+ prefix2 = s[2:4]
+ if prefix not in sstate_content_cache:
+ sstate_content_cache[prefix] = {}
+ if prefix2 not in sstate_content_cache[prefix]:
+ sstate_content_cache[prefix][prefix2] = build_sha_cache(prefix + "/" + prefix2)
+
+ if s in sstate_content_cache[prefix][prefix2]:
+ for f in sstate_content_cache[prefix][prefix2][s]:
+ files.add(f)
+
+elapsed = time.perf_counter() - start_time
+print("Gathering file list took %.1fs" % elapsed)
print('Processing files')
for f in files:
sys.stdout.write('Processing %s... ' % f)
- _, ext = os.path.splitext(f)
- if not ext in ['.tgz', '.siginfo', '.sig']:
+ if not f.endswith(('.tar.zst', '.siginfo', '.sig')):
# Most likely a temp file, skip it
print('skipping')
continue
diff --git a/scripts/gen-site-config b/scripts/gen-site-config
index 7da7a0bd8a..727b809c0f 100755
--- a/scripts/gen-site-config
+++ b/scripts/gen-site-config
@@ -1,18 +1,8 @@
#! /bin/sh
# Copyright (c) 2005-2008 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
cat << EOF
AC_PREREQ(2.57)
diff --git a/scripts/git b/scripts/git
new file mode 100755
index 0000000000..689adbf9dd
--- /dev/null
+++ b/scripts/git
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+# Wrapper around 'git' that doesn't think we are root
+
+import os
+import shutil
+import sys
+
+os.environ['PSEUDO_UNLOAD'] = '1'
+
+# calculate path to the real 'git'
+path = os.environ['PATH']
+# we need to remove our path but also any other copy of this script which
+# may be present, e.g. eSDK.
+replacements = [os.path.dirname(sys.argv[0])]
+for p in path.split(":"):
+ if p.endswith("/scripts"):
+ replacements.append(p)
+for r in replacements:
+ path = path.replace(r, '/ignoreme')
+real_git = shutil.which('git', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_git, 'git')
+
+os.execv(real_git, sys.argv)
diff --git a/scripts/install-buildtools b/scripts/install-buildtools
new file mode 100755
index 0000000000..2218f3ffac
--- /dev/null
+++ b/scripts/install-buildtools
@@ -0,0 +1,357 @@
+#!/usr/bin/env python3
+
+# Buildtools and buildtools extended installer helper script
+#
+# Copyright (C) 2017-2020 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# NOTE: --with-extended-buildtools is on by default
+#
+# Example usage (extended buildtools from milestone):
+# (1) using --url and --filename
+# $ install-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/milestones/yocto-3.1_M3/buildtools \
+# --filename x86_64-buildtools-extended-nativesdk-standalone-3.0+snapshot-20200315.sh
+# (2) using --base-url, --release, --installer-version and --build-date
+# $ install-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.1_M3 \
+# --installer-version 3.0+snapshot
+# --build-date 202000315
+#
+# Example usage (standard buildtools from release):
+# (3) using --url and --filename
+# $ install-buildtools --without-extended-buildtools \
+# --url http://downloads.yoctoproject.org/releases/yocto/yocto-3.0.2/buildtools \
+# --filename x86_64-buildtools-nativesdk-standalone-3.0.2.sh
+# (4) using --base-url, --release and --installer-version
+# $ install-buildtools --without-extended-buildtools \
+# --base-url http://downloads.yoctoproject.org/releases/yocto \
+# --release yocto-3.0.2 \
+# --installer-version 3.0.2
+#
+
+import argparse
+import logging
+import os
+import platform
+import re
+import shutil
+import shlex
+import stat
+import subprocess
+import sys
+import tempfile
+from urllib.parse import quote
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+
+
+PROGNAME = 'install-buildtools'
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+DEFAULT_INSTALL_DIR = os.path.join(os.path.split(scripts_path)[0],'buildtools')
+DEFAULT_BASE_URL = 'http://downloads.yoctoproject.org/releases/yocto'
+DEFAULT_RELEASE = 'yocto-4.1'
+DEFAULT_INSTALLER_VERSION = '4.1'
+DEFAULT_BUILDDATE = '202110XX'
+
+# Python version sanity check
+if not (sys.version_info.major == 3 and sys.version_info.minor >= 4):
+ logger.error("This script requires Python 3.4 or greater")
+ logger.error("You have Python %s.%s" %
+ (sys.version_info.major, sys.version_info.minor))
+ sys.exit(1)
+
+# The following three functions are copied directly from
+# bitbake/lib/bb/utils.py, in order to allow this script
+# to run on versions of python earlier than what bitbake
+# supports (e.g. less than Python 3.5 for YP 3.1 release)
+
+def _hasher(method, filename):
+ import mmap
+
+ with open(filename, "rb") as f:
+ try:
+ with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
+ for chunk in iter(lambda: mm.read(8192), b''):
+ method.update(chunk)
+ except ValueError:
+ # You can't mmap() an empty file so silence this exception
+ pass
+ return method.hexdigest()
+
+
+def md5_file(filename):
+ """
+ Return the hex string representation of the MD5 checksum of filename.
+ """
+ import hashlib
+ return _hasher(hashlib.md5(), filename)
+
+def sha256_file(filename):
+ """
+ Return the hex string representation of the 256-bit SHA checksum of
+ filename.
+ """
+ import hashlib
+ return _hasher(hashlib.sha256(), filename)
+
+
+def main():
+ global DEFAULT_INSTALL_DIR
+ global DEFAULT_BASE_URL
+ global DEFAULT_RELEASE
+ global DEFAULT_INSTALLER_VERSION
+ global DEFAULT_BUILDDATE
+ filename = ""
+ release = ""
+ buildtools_url = ""
+ install_dir = ""
+ arch = platform.machine()
+
+ parser = argparse.ArgumentParser(
+ description="Buildtools installation helper",
+ add_help=False)
+ parser.add_argument('-u', '--url',
+ help='URL from where to fetch buildtools SDK installer, not '
+ 'including filename (optional)\n'
+ 'Requires --filename.',
+ action='store')
+ parser.add_argument('-f', '--filename',
+ help='filename for the buildtools SDK installer to be installed '
+ '(optional)\nRequires --url',
+ action='store')
+ parser.add_argument('-d', '--directory',
+ default=DEFAULT_INSTALL_DIR,
+ help='directory where buildtools SDK will be installed (optional)',
+ action='store')
+ parser.add_argument('-r', '--release',
+ default=DEFAULT_RELEASE,
+ help='Yocto Project release string for SDK which will be '
+ 'installed (optional)',
+ action='store')
+ parser.add_argument('-V', '--installer-version',
+ default=DEFAULT_INSTALLER_VERSION,
+ help='version string for the SDK to be installed (optional)',
+ action='store')
+ parser.add_argument('-b', '--base-url',
+ default=DEFAULT_BASE_URL,
+ help='base URL from which to fetch SDK (optional)', action='store')
+ parser.add_argument('-t', '--build-date',
+ default=DEFAULT_BUILDDATE,
+ help='Build date of pre-release SDK (optional)', action='store')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('--with-extended-buildtools', action='store_true',
+ dest='with_extended_buildtools',
+ default=True,
+ help='enable extended buildtools tarball (on by default)')
+ group.add_argument('--without-extended-buildtools', action='store_false',
+ dest='with_extended_buildtools',
+ help='disable extended buildtools (traditional buildtools tarball)')
+ group.add_argument('--make-only', action='store_true',
+ help='only install make tarball')
+ group = parser.add_mutually_exclusive_group()
+ group.add_argument('-c', '--check', help='enable checksum validation',
+ default=True, action='store_true')
+ group.add_argument('-n', '--no-check', help='disable checksum validation',
+ dest="check", action='store_false')
+ parser.add_argument('-D', '--debug', help='enable debug output',
+ action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors',
+ action='store_true')
+
+ parser.add_argument('-h', '--help', action='help',
+ default=argparse.SUPPRESS,
+ help='show this help message and exit')
+
+ args = parser.parse_args()
+
+ if args.make_only:
+ args.with_extended_buildtools = False
+
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ if args.url and args.filename:
+ logger.debug("--url and --filename detected. Ignoring --base-url "
+ "--release --installer-version arguments.")
+ filename = args.filename
+ buildtools_url = "%s/%s" % (args.url, filename)
+ else:
+ if args.base_url:
+ base_url = args.base_url
+ else:
+ base_url = DEFAULT_BASE_URL
+ if args.release:
+ # check if this is a pre-release "milestone" SDK
+ m = re.search(r"^(?P<distro>[a-zA-Z\-]+)(?P<version>[0-9.]+)(?P<milestone>_M[1-9])$",
+ args.release)
+ logger.debug("milestone regex: %s" % m)
+ if m and m.group('milestone'):
+ logger.debug("release[distro]: %s" % m.group('distro'))
+ logger.debug("release[version]: %s" % m.group('version'))
+ logger.debug("release[milestone]: %s" % m.group('milestone'))
+ if not args.build_date:
+ logger.error("Milestone installers require --build-date")
+ else:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ elif args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s-%s.sh" % (
+ arch, args.installer_version, args.build_date)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/milestones/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+ # regular release SDK
+ else:
+ if args.make_only:
+ filename = "%s-buildtools-make-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ if args.with_extended_buildtools:
+ filename = "%s-buildtools-extended-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ else:
+ filename = "%s-buildtools-nativesdk-standalone-%s.sh" % (arch, args.installer_version)
+ safe_filename = quote(filename)
+ buildtools_url = "%s/%s/buildtools/%s" % (base_url, args.release, safe_filename)
+
+ tmpsdk_dir = tempfile.mkdtemp()
+ try:
+ # Fetch installer
+ logger.info("Fetching buildtools installer")
+ tmpbuildtools = os.path.join(tmpsdk_dir, filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools, buildtools_url), shell=True)
+ if ret != 0:
+ logger.error("Could not download file from %s" % buildtools_url)
+ return ret
+
+ # Verify checksum
+ if args.check:
+ logger.info("Fetching buildtools installer checksum")
+ checksum_type = ""
+ for checksum_type in ["md5sum", "sha256sum"]:
+ check_url = "{}.{}".format(buildtools_url, checksum_type)
+ checksum_filename = "{}.{}".format(filename, checksum_type)
+ tmpbuildtools_checksum = os.path.join(tmpsdk_dir, checksum_filename)
+ ret = subprocess.call("wget -q -O %s %s" %
+ (tmpbuildtools_checksum, check_url), shell=True)
+ if ret == 0:
+ break
+ else:
+ if ret != 0:
+ logger.error("Could not download file from %s" % check_url)
+ return ret
+ regex = re.compile(r"^(?P<checksum>[0-9a-f]+)\s+(?P<path>.*/)?(?P<filename>.*)$")
+ with open(tmpbuildtools_checksum, 'rb') as f:
+ original = f.read()
+ m = re.search(regex, original.decode("utf-8"))
+ logger.debug("checksum regex match: %s" % m)
+ logger.debug("checksum: %s" % m.group('checksum'))
+ logger.debug("path: %s" % m.group('path'))
+ logger.debug("filename: %s" % m.group('filename'))
+ if filename != m.group('filename'):
+ logger.error("Filename does not match name in checksum")
+ return 1
+ checksum = m.group('checksum')
+ if checksum_type == "md5sum":
+ checksum_value = md5_file(tmpbuildtools)
+ else:
+ checksum_value = sha256_file(tmpbuildtools)
+ if checksum == checksum_value:
+ logger.info("Checksum success")
+ else:
+ logger.error("Checksum %s expected. Actual checksum is %s." %
+ (checksum, checksum_value))
+ return 1
+
+ # Make installer executable
+ logger.info("Making installer executable")
+ st = os.stat(tmpbuildtools)
+ os.chmod(tmpbuildtools, st.st_mode | stat.S_IEXEC)
+ logger.debug(os.stat(tmpbuildtools))
+ if args.directory:
+ install_dir = args.directory
+ ret = subprocess.call("%s -d %s -y" %
+ (tmpbuildtools, install_dir), shell=True)
+ else:
+ install_dir = "/opt/poky/%s" % args.installer_version
+ ret = subprocess.call("%s -y" % tmpbuildtools, shell=True)
+ if ret != 0:
+ logger.error("Could not run buildtools installer")
+ return ret
+
+ # Setup the environment
+ logger.info("Setting up the environment")
+ regex = re.compile(r'^(?P<export>export )?(?P<env_var>[A-Z_]+)=(?P<env_val>.+)$')
+ with open("%s/environment-setup-%s-pokysdk-linux" %
+ (install_dir, arch), 'rb') as f:
+ for line in f:
+ match = regex.search(line.decode('utf-8'))
+ logger.debug("export regex: %s" % match)
+ if match:
+ env_var = match.group('env_var')
+ logger.debug("env_var: %s" % env_var)
+ env_val = match.group('env_val')
+ logger.debug("env_val: %s" % env_val)
+ os.environ[env_var] = env_val
+
+ # Test installation
+ logger.info("Testing installation")
+ tool = ""
+ m = re.search("extended", tmpbuildtools)
+ logger.debug("extended regex: %s" % m)
+ if args.with_extended_buildtools and not m:
+ logger.info("Ignoring --with-extended-buildtools as filename "
+ "does not contain 'extended'")
+ if args.make_only:
+ tool = 'make'
+ elif args.with_extended_buildtools and m:
+ tool = 'gcc'
+ else:
+ tool = 'tar'
+ logger.debug("install_dir: %s" % install_dir)
+ cmd = shlex.split("/usr/bin/which %s" % tool)
+ logger.debug("cmd: %s" % cmd)
+ logger.debug("tool: %s" % tool)
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ output, errors = proc.communicate()
+ logger.debug("proc.args: %s" % proc.args)
+ logger.debug("proc.communicate(): output %s" % output)
+ logger.debug("proc.communicate(): errors %s" % errors)
+ which_tool = output.decode('utf-8')
+ logger.debug("which %s: %s" % (tool, which_tool))
+ ret = proc.returncode
+ if not which_tool.startswith(install_dir):
+ logger.error("Something went wrong: %s not found in %s" %
+ (tool, install_dir))
+ if ret != 0:
+ logger.error("Something went wrong: installation failed")
+ else:
+ logger.info("Installation successful. Remember to source the "
+ "environment setup script now and in any new session.")
+ return ret
+
+ finally:
+ # cleanup tmp directory
+ shutil.rmtree(tmpsdk_dir)
+
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+
+ traceback.print_exc()
+ sys.exit(ret)
diff --git a/scripts/lib/argparse_oe.py b/scripts/lib/argparse_oe.py
index 9bdfc1ceca..176b732bbc 100644
--- a/scripts/lib/argparse_oe.py
+++ b/scripts/lib/argparse_oe.py
@@ -1,3 +1,9 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import argparse
from collections import defaultdict, OrderedDict
diff --git a/scripts/lib/build_perf/__init__.py b/scripts/lib/build_perf/__init__.py
index 1f8b729078..dcbb78042d 100644
--- a/scripts/lib/build_perf/__init__.py
+++ b/scripts/lib/build_perf/__init__.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Build performance test library functions"""
diff --git a/scripts/lib/build_perf/html.py b/scripts/lib/build_perf/html.py
index 578bb162ee..d1273c9c50 100644
--- a/scripts/lib/build_perf/html.py
+++ b/scripts/lib/build_perf/html.py
@@ -1,14 +1,7 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Helper module for HTML reporting"""
from jinja2 import Environment, PackageLoader
diff --git a/scripts/lib/build_perf/report.py b/scripts/lib/build_perf/report.py
index d99a36797f..ab77424cc7 100644
--- a/scripts/lib/build_perf/report.py
+++ b/scripts/lib/build_perf/report.py
@@ -1,17 +1,11 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Handling of build perf test reports"""
-from collections import OrderedDict, Mapping, namedtuple
+from collections import OrderedDict, namedtuple
+from collections.abc import Mapping
from datetime import datetime, timezone
from numbers import Number
from statistics import mean, stdev, variance
diff --git a/scripts/lib/buildstats.py b/scripts/lib/buildstats.py
index d9aadf3cb8..6db60d5bcf 100644
--- a/scripts/lib/buildstats.py
+++ b/scripts/lib/buildstats.py
@@ -1,21 +1,14 @@
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Functionality for analyzing buildstats"""
import json
import logging
import os
import re
-from collections import namedtuple,OrderedDict
+from collections import namedtuple
from statistics import mean
@@ -86,8 +79,8 @@ class BSTask(dict):
return self['rusage']['ru_oublock']
@classmethod
- def from_file(cls, buildstat_file):
- """Read buildstat text file"""
+ def from_file(cls, buildstat_file, fallback_end=0):
+ """Read buildstat text file. fallback_end is an optional end time for tasks that are not recorded as finishing."""
bs_task = cls()
log.debug("Reading task buildstats from %s", buildstat_file)
end_time = None
@@ -115,7 +108,10 @@ class BSTask(dict):
bs_task[ru_type][ru_key] = val
elif key == 'Status':
bs_task['status'] = val
- if end_time is not None and start_time is not None:
+ # If the task didn't finish, fill in the fallback end time if specified
+ if start_time and not end_time and fallback_end:
+ end_time = fallback_end
+ if start_time and end_time:
bs_task['elapsed_time'] = end_time - start_time
else:
raise BSError("{} looks like a invalid buildstats file".format(buildstat_file))
@@ -233,25 +229,44 @@ class BuildStats(dict):
epoch = match.group('epoch')
return name, epoch, version, revision
+ @staticmethod
+ def parse_top_build_stats(path):
+ """
+ Parse the top-level build_stats file for build-wide start and duration.
+ """
+ start = elapsed = 0
+ with open(path) as fobj:
+ for line in fobj.readlines():
+ key, val = line.split(':', 1)
+ val = val.strip()
+ if key == 'Build Started':
+ start = float(val)
+ elif key == "Elapsed time":
+ elapsed = float(val.split()[0])
+ return start, elapsed
+
@classmethod
def from_dir(cls, path):
"""Load buildstats from a buildstats directory"""
- if not os.path.isfile(os.path.join(path, 'build_stats')):
+ top_stats = os.path.join(path, 'build_stats')
+ if not os.path.isfile(top_stats):
raise BSError("{} does not look like a buildstats directory".format(path))
log.debug("Reading buildstats directory %s", path)
-
buildstats = cls()
+ build_started, build_elapsed = buildstats.parse_top_build_stats(top_stats)
+ build_end = build_started + build_elapsed
+
subdirs = os.listdir(path)
for dirname in subdirs:
recipe_dir = os.path.join(path, dirname)
- if not os.path.isdir(recipe_dir):
+ if dirname == "reduced_proc_pressure" or not os.path.isdir(recipe_dir):
continue
name, epoch, version, revision = cls.split_nevr(dirname)
bsrecipe = BSRecipe(name, epoch, version, revision)
for task in os.listdir(recipe_dir):
bsrecipe.tasks[task] = BSTask.from_file(
- os.path.join(recipe_dir, task))
+ os.path.join(recipe_dir, task), build_end)
if name in buildstats:
raise BSError("Cannot handle multiple versions of the same "
"package ({})".format(name))
@@ -263,18 +278,22 @@ class BuildStats(dict):
"""Aggregate other buildstats into this"""
if set(self.keys()) != set(buildstats.keys()):
raise ValueError("Refusing to aggregate buildstats, set of "
- "recipes is different")
+ "recipes is different: %s" % (set(self.keys()) ^ set(buildstats.keys())))
for pkg, data in buildstats.items():
self[pkg].aggregate(data)
-def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None):
+def diff_buildstats(bs1, bs2, stat_attr, min_val=None, min_absdiff=None, only_tasks=[]):
"""Compare the tasks of two buildstats"""
tasks_diff = []
pkgs = set(bs1.keys()).union(set(bs2.keys()))
for pkg in pkgs:
tasks1 = bs1[pkg].tasks if pkg in bs1 else {}
tasks2 = bs2[pkg].tasks if pkg in bs2 else {}
+ if only_tasks:
+ tasks1 = {k: v for k, v in tasks1.items() if k in only_tasks}
+ tasks2 = {k: v for k, v in tasks2.items() if k in only_tasks}
+
if not tasks1:
pkg_op = '+'
elif not tasks2:
diff --git a/scripts/lib/checklayer/__init__.py b/scripts/lib/checklayer/__init__.py
index 2618416fab..62ecdfe390 100644
--- a/scripts/lib/checklayer/__init__.py
+++ b/scripts/lib/checklayer/__init__.py
@@ -1,7 +1,9 @@
# Yocto Project layer check tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import re
@@ -14,6 +16,7 @@ class LayerType(Enum):
BSP = 0
DISTRO = 1
SOFTWARE = 2
+ CORE = 3
ERROR_NO_LAYER_CONF = 98
ERROR_BSP_DISTRO = 99
@@ -41,7 +44,7 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
ldata.setVar('LAYERDIR', layer_path)
try:
- ldata = bb.parse.handle(lconf, ldata, include=True)
+ ldata = bb.parse.handle(lconf, ldata, include=True, baseconfig=True)
except:
raise RuntimeError("Parsing of layer.conf from layer: %s failed" % layer_path)
ldata.expandVarref('LAYERDIR')
@@ -57,9 +60,14 @@ def _get_layer_collections(layer_path, lconf=None, data=None):
pattern = ldata.getVar('BBFILE_PATTERN_%s' % name)
depends = ldata.getVar('LAYERDEPENDS_%s' % name)
compat = ldata.getVar('LAYERSERIES_COMPAT_%s' % name)
+ try:
+ depDict = bb.utils.explode_dep_versions2(depends or "")
+ except bb.utils.VersionStringException as vse:
+ bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (name, str(vse)))
+
collections[name]['priority'] = priority
collections[name]['pattern'] = pattern
- collections[name]['depends'] = depends
+ collections[name]['depends'] = ' '.join(depDict.keys())
collections[name]['compat'] = compat
return collections
@@ -99,7 +107,13 @@ def _detect_layer(layer_path):
if distros:
is_distro = True
- if is_bsp and is_distro:
+ layer['collections'] = _get_layer_collections(layer['path'])
+
+ if layer_name == "meta" and "core" in layer['collections']:
+ layer['type'] = LayerType.CORE
+ layer['conf']['machines'] = machines
+ layer['conf']['distros'] = distros
+ elif is_bsp and is_distro:
layer['type'] = LayerType.ERROR_BSP_DISTRO
elif is_bsp:
layer['type'] = LayerType.BSP
@@ -110,8 +124,6 @@ def _detect_layer(layer_path):
else:
layer['type'] = LayerType.SOFTWARE
- layer['collections'] = _get_layer_collections(layer['path'])
-
return layer
def detect_layers(layer_directories, no_auto):
@@ -139,14 +151,38 @@ def detect_layers(layer_directories, no_auto):
return layers
-def _find_layer_depends(depend, layers):
+def _find_layer(depend, layers):
for layer in layers:
+ if 'collections' not in layer:
+ continue
+
for collection in layer['collections']:
if depend == collection:
return layer
return None
-def add_layer_dependencies(bblayersconf, layer, layers, logger):
+def sanity_check_layers(layers, logger):
+ """
+ Check that we didn't find duplicate collection names, as the layer that will
+ be used is non-deterministic. The precise check is duplicate collections
+ with different patterns, as the same pattern being repeated won't cause
+ problems.
+ """
+ import collections
+
+ passed = True
+ seen = collections.defaultdict(set)
+ for layer in layers:
+ for name, data in layer.get("collections", {}).items():
+ seen[name].add(data["pattern"])
+
+ for name, patterns in seen.items():
+ if len(patterns) > 1:
+ passed = False
+ logger.error("Collection %s found multiple times: %s" % (name, ", ".join(patterns)))
+ return passed
+
+def get_layer_dependencies(layer, layers, logger):
def recurse_dependencies(depends, layer, layers, logger, ret = []):
logger.debug('Processing dependencies %s for layer %s.' % \
(depends, layer['name']))
@@ -156,7 +192,7 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
if depend == 'core':
continue
- layer_depend = _find_layer_depends(depend, layers)
+ layer_depend = _find_layer(depend, layers)
if not layer_depend:
logger.error('Layer %s depends on %s and isn\'t found.' % \
(layer['name'], depend))
@@ -167,6 +203,10 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
# multiple errors at once
if ret is not None and layer_depend not in ret:
ret.append(layer_depend)
+ else:
+ # we might have processed this dependency already, in which case
+ # we should not do it again (avoid recursive loop)
+ continue
# Recursively process...
if 'collections' not in layer_depend:
@@ -189,48 +229,65 @@ def add_layer_dependencies(bblayersconf, layer, layers, logger):
layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
# Note: [] (empty) is allowed, None is not!
+ return layer_depends
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+
+ layer_depends = get_layer_dependencies(layer, layers, logger)
if layer_depends is None:
return False
else:
- # Don't add a layer that is already present.
- added = set()
- output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
- for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
- added.add(path)
-
- for layer_depend in layer_depends:
- name = layer_depend['name']
- path = layer_depend['path']
+ add_layers(bblayersconf, layer_depends, logger)
+
+ return True
+
+def add_layers(bblayersconf, layers, logger):
+ # Don't add a layer that is already present.
+ added = set()
+ output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
+ for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
+ added.add(path)
+
+ with open(bblayersconf, 'a+') as f:
+ for layer in layers:
+ logger.info('Adding layer %s' % layer['name'])
+ name = layer['name']
+ path = layer['path']
if path in added:
- continue
+ logger.info('%s is already in %s' % (name, bblayersconf))
else:
added.add(path)
- logger.info('Adding layer dependency %s' % name)
- with open(bblayersconf, 'a+') as f:
f.write("\nBBLAYERS += \"%s\"\n" % path)
return True
-def add_layer(bblayersconf, layer, layers, logger):
- logger.info('Adding layer %s' % layer['name'])
- with open(bblayersconf, 'a+') as f:
- f.write("\nBBLAYERS += \"%s\"\n" % layer['path'])
+def check_bblayers(bblayersconf, layer_path, logger):
+ '''
+ If layer_path found in BBLAYERS return True
+ '''
+ import bb.parse
+ import bb.data
- return True
+ ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True)
+ for bblayer in (ldata.getVar('BBLAYERS') or '').split():
+ if os.path.normpath(bblayer) == os.path.normpath(layer_path):
+ return True
+
+ return False
-def check_command(error_msg, cmd):
+def check_command(error_msg, cmd, cwd=None):
'''
Run a command under a shell, capture stdout and stderr in a single stream,
throw an error when command returns non-zero exit code. Returns the output.
'''
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
output, _ = p.communicate()
if p.returncode:
msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8'))
raise RuntimeError(msg)
return output
-def get_signatures(builddir, failsafe=False, machine=None):
+def get_signatures(builddir, failsafe=False, machine=None, extravars=None):
import re
# some recipes needs to be excluded like meta-world-pkgdata
@@ -241,19 +298,22 @@ def get_signatures(builddir, failsafe=False, machine=None):
sigs = {}
tune2tasks = {}
- cmd = ''
+ cmd = 'BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS BB_SIGNATURE_HANDLER" BB_SIGNATURE_HANDLER="OEBasicHash" '
+ if extravars:
+ cmd += extravars
+ cmd += ' '
if machine:
cmd += 'MACHINE=%s ' % machine
cmd += 'bitbake '
if failsafe:
cmd += '-k '
- cmd += '-S none world'
+ cmd += '-S lockedsigs world'
sigs_file = os.path.join(builddir, 'locked-sigs.inc')
if os.path.exists(sigs_file):
os.unlink(sigs_file)
try:
check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.',
- cmd)
+ cmd, builddir)
except RuntimeError as ex:
if failsafe and os.path.exists(sigs_file):
# Ignore the error here. Most likely some recipes active
@@ -264,8 +324,8 @@ def get_signatures(builddir, failsafe=False, machine=None):
else:
raise
- sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
- tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+ sig_regex = re.compile(r"^(?P<task>.*:.*):(?P<hash>.*) .$")
+ tune_regex = re.compile(r"(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
current_tune = None
with open(sigs_file, 'r') as f:
for line in f.readlines():
diff --git a/scripts/lib/checklayer/case.py b/scripts/lib/checklayer/case.py
index 9dd00412e5..fa9dee384e 100644
--- a/scripts/lib/checklayer/case.py
+++ b/scripts/lib/checklayer/case.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
from oeqa.core.case import OETestCase
diff --git a/scripts/lib/checklayer/cases/bsp.py b/scripts/lib/checklayer/cases/bsp.py
index b6b611be73..b76163fb56 100644
--- a/scripts/lib/checklayer/cases/bsp.py
+++ b/scripts/lib/checklayer/cases/bsp.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
@@ -9,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class BSPCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.BSP:
+ if self.tc.layer['type'] not in (LayerType.BSP, LayerType.CORE):
raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\
self.tc.layer['name'])
@@ -151,7 +153,7 @@ class BSPCheckLayer(OECheckLayerTestCase):
# do_build can be ignored: it is know to have
# different signatures in some cases, for example in
# the allarch ca-certificates due to RDEPENDS=openssl.
- # That particular dependency is whitelisted via
+ # That particular dependency is marked via
# SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
# in the sstate signature hash because filtering it
# out would be hard and running do_build multiple
diff --git a/scripts/lib/checklayer/cases/common.py b/scripts/lib/checklayer/cases/common.py
index 1bef61b048..97b16f78c8 100644
--- a/scripts/lib/checklayer/cases/common.py
+++ b/scripts/lib/checklayer/cases/common.py
@@ -1,18 +1,24 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import glob
import os
import unittest
+import re
from checklayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
from checklayer.case import OECheckLayerTestCase
class CommonCheckLayer(OECheckLayerTestCase):
def test_readme(self):
+ if self.tc.layer['type'] == LayerType.CORE:
+ raise unittest.SkipTest("Core layer's README is top level")
+
# The top-level README file may have a suffix (like README.rst or README.txt).
- readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*'))
+ readme_files = glob.glob(os.path.join(self.tc.layer['path'], '[Rr][Ee][Aa][Dd][Mm][Ee]*'))
self.assertTrue(len(readme_files) > 0,
- msg="Layer doesn't contains README file.")
+ msg="Layer doesn't contain a README file.")
# There might be more than one file matching the file pattern above
# (for example, README.rst and README-COPYING.rst). The one with the shortest
@@ -24,6 +30,16 @@ class CommonCheckLayer(OECheckLayerTestCase):
self.assertTrue(data,
msg="Layer contains a README file but it is empty.")
+ # If a layer's README references another README, then the checks below are not valid
+ if re.search('README', data, re.IGNORECASE):
+ return
+
+ self.assertIn('maintainer', data.lower())
+ self.assertIn('patch', data.lower())
+ # Check that there is an email address in the README
+ email_regex = re.compile(r"[^@]+@[^@]+")
+ self.assertTrue(email_regex.match(data))
+
def test_parse(self):
check_command('Layer %s failed to parse.' % self.tc.layer['name'],
'bitbake -p')
@@ -41,6 +57,36 @@ class CommonCheckLayer(OECheckLayerTestCase):
'''
get_signatures(self.td['builddir'], failsafe=False)
+ def test_world_inherit_class(self):
+ '''
+ This also does "bitbake -S none world" along with inheriting "yocto-check-layer"
+ class, which can do additional per-recipe test cases.
+ '''
+ msg = []
+ try:
+ get_signatures(self.td['builddir'], failsafe=False, machine=None, extravars='BB_ENV_PASSTHROUGH_ADDITIONS="$BB_ENV_PASSTHROUGH_ADDITIONS INHERIT" INHERIT="yocto-check-layer"')
+ except RuntimeError as ex:
+ msg.append(str(ex))
+ if msg:
+ msg.insert(0, 'Layer %s failed additional checks from yocto-check-layer.bbclass\nSee below log for specific recipe parsing errors:\n' % \
+ self.tc.layer['name'])
+ self.fail('\n'.join(msg))
+
+ @unittest.expectedFailure
+ def test_patches_upstream_status(self):
+ import sys
+ sys.path.append(os.path.join(sys.path[0], '../../../../meta/lib/'))
+ import oe.qa
+ patches = []
+ for dirpath, dirs, files in os.walk(self.tc.layer['path']):
+ for filename in files:
+ if filename.endswith(".patch"):
+ ppath = os.path.join(dirpath, filename)
+ if oe.qa.check_upstream_status(ppath):
+ patches.append(ppath)
+ self.assertEqual(len(patches), 0 , \
+ msg="Found following patches with malformed or missing upstream status:\n%s" % '\n'.join([str(patch) for patch in patches]))
+
def test_signatures(self):
if self.tc.layer['type'] == LayerType.SOFTWARE and \
not self.tc.test_software_layer_signatures:
diff --git a/scripts/lib/checklayer/cases/distro.py b/scripts/lib/checklayer/cases/distro.py
index df1b3035eb..a35332451c 100644
--- a/scripts/lib/checklayer/cases/distro.py
+++ b/scripts/lib/checklayer/cases/distro.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import unittest
@@ -9,7 +11,7 @@ from checklayer.case import OECheckLayerTestCase
class DistroCheckLayer(OECheckLayerTestCase):
@classmethod
def setUpClass(self):
- if self.tc.layer['type'] != LayerType.DISTRO:
+ if self.tc.layer['type'] not in (LayerType.DISTRO, LayerType.CORE):
raise unittest.SkipTest("DistroCheckLayer: Layer %s isn't Distro one." %\
self.tc.layer['name'])
diff --git a/scripts/lib/checklayer/context.py b/scripts/lib/checklayer/context.py
index 1bec2c4103..4de8f668fd 100644
--- a/scripts/lib/checklayer/context.py
+++ b/scripts/lib/checklayer/context.py
@@ -1,5 +1,7 @@
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/scripts/lib/devtool/__init__.py b/scripts/lib/devtool/__init__.py
index 89f098a912..6133c1c5b4 100644
--- a/scripts/lib/devtool/__init__.py
+++ b/scripts/lib/devtool/__init__.py
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugins module"""
import os
@@ -88,12 +78,15 @@ def exec_fakeroot(d, cmd, **kwargs):
"""Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
# Grab the command and check it actually exists
fakerootcmd = d.getVar('FAKEROOTCMD')
+ fakerootenv = d.getVar('FAKEROOTENV')
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, kwargs)
+
+def exec_fakeroot_no_d(fakerootcmd, fakerootenv, cmd, **kwargs):
if not os.path.exists(fakerootcmd):
logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
return 2
# Set up the appropriate environment
newenv = dict(os.environ)
- fakerootenv = d.getVar('FAKEROOTENV')
for varvalue in fakerootenv.split():
if '=' in varvalue:
splitval = varvalue.split('=', 1)
@@ -205,7 +198,8 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
import oe.patch
if not os.path.exists(os.path.join(repodir, '.git')):
bb.process.run('git init', cwd=repodir)
- bb.process.run('git add .', cwd=repodir)
+ bb.process.run('git config --local gc.autodetach 0', cwd=repodir)
+ bb.process.run('git add -f -A .', cwd=repodir)
commit_cmd = ['git']
oe.patch.GitApplyTree.gitCommandUserOptions(commit_cmd, d=d)
commit_cmd += ['commit', '-q']
@@ -221,8 +215,13 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
bb.process.run(commit_cmd, cwd=repodir)
# Ensure singletask.lock (as used by externalsrc.bbclass) is ignored by git
+ gitinfodir = os.path.join(repodir, '.git', 'info')
+ try:
+ os.mkdir(gitinfodir)
+ except FileExistsError:
+ pass
excludes = []
- excludefile = os.path.join(repodir, '.git', 'info', 'exclude')
+ excludefile = os.path.join(gitinfodir, 'exclude')
try:
with open(excludefile, 'r') as f:
excludes = f.readlines()
@@ -237,6 +236,28 @@ def setup_git_repo(repodir, version, devbranch, basetag='devtool-base', d=None):
bb.process.run('git checkout -b %s' % devbranch, cwd=repodir)
bb.process.run('git tag -f %s' % basetag, cwd=repodir)
+ # if recipe unpacks another git repo inside S, we need to declare it as a regular git submodule now,
+ # so we will be able to tag branches on it and extract patches when doing finish/update on the recipe
+ stdout, _ = bb.process.run("git status --porcelain", cwd=repodir)
+ found = False
+ for line in stdout.splitlines():
+ if line.endswith("/"):
+ new_dir = line.split()[1]
+ for root, dirs, files in os.walk(os.path.join(repodir, new_dir)):
+ if ".git" in dirs + files:
+ (stdout, _) = bb.process.run('git remote', cwd=root)
+ remote = stdout.splitlines()[0]
+ (stdout, _) = bb.process.run('git remote get-url %s' % remote, cwd=root)
+ remote_url = stdout.splitlines()[0]
+ logger.error(os.path.relpath(os.path.join(root, ".."), root))
+ bb.process.run('git submodule add %s %s' % (remote_url, os.path.relpath(root, os.path.join(root, ".."))), cwd=os.path.join(root, ".."))
+ found = True
+ if found:
+ oe.patch.GitApplyTree.commitIgnored("Add additional submodule from SRC_URI", dir=os.path.join(root, ".."), d=d)
+ found = False
+ if os.path.exists(os.path.join(repodir, '.gitmodules')):
+ bb.process.run('git submodule foreach --recursive "git tag -f %s"' % basetag, cwd=repodir)
+
def recipe_to_append(recipefile, config, wildcard=False):
"""
Convert a recipe file to a bbappend file path within the workspace.
diff --git a/scripts/lib/devtool/build.py b/scripts/lib/devtool/build.py
index 252379e9b2..935ffab46c 100644
--- a/scripts/lib/devtool/build.py
+++ b/scripts/lib/devtool/build.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool build plugin"""
import os
@@ -21,7 +11,8 @@ import bb
import logging
import argparse
import tempfile
-from devtool import exec_build_env_command, check_workspace_recipe, DevtoolError
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import parse_recipe
logger = logging.getLogger('devtool')
@@ -53,8 +44,22 @@ def _get_build_tasks(config):
def build(args, config, basepath, workspace):
"""Entry point for the devtool 'build' subcommand"""
workspacepn = check_workspace_recipe(workspace, args.recipename, bbclassextend=True)
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+ deploytask = 'do_deploy' in rd.getVar('__BBTASKS')
+ finally:
+ tinfoil.shutdown()
- build_tasks = _get_build_tasks(config)
+ if args.clean:
+ # use clean instead of cleansstate to avoid messing things up in eSDK
+ build_tasks = ['do_clean']
+ else:
+ build_tasks = _get_build_tasks(config)
+ if deploytask:
+ build_tasks.append('do_deploy')
bbappend = workspace[workspacepn]['bbappend']
if args.disable_parallel_make:
@@ -83,4 +88,5 @@ def register_commands(subparsers, context):
group='working', order=50)
parser_build.add_argument('recipename', help='Recipe to build')
parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
+ parser_build.add_argument('-c', '--clean', action='store_true', help='clean up recipe building results')
parser_build.set_defaults(func=build)
diff --git a/scripts/lib/devtool/build_image.py b/scripts/lib/devtool/build_image.py
index e5810389be..980f90ddd6 100644
--- a/scripts/lib/devtool/build_image.py
+++ b/scripts/lib/devtool/build_image.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the build-image subcommand."""
@@ -123,7 +113,7 @@ def build_image_task(config, basepath, workspace, image, add_packages=None, task
with open(appendfile, 'w') as afile:
if packages:
# include packages from workspace recipes into the image
- afile.write('IMAGE_INSTALL_append = " %s"\n' % ' '.join(packages))
+ afile.write('IMAGE_INSTALL:append = " %s"\n' % ' '.join(packages))
if not task:
logger.info('Building image %s with the following '
'additional packages: %s', image, ' '.join(packages))
diff --git a/scripts/lib/devtool/build_sdk.py b/scripts/lib/devtool/build_sdk.py
index b89d65b0cb..1cd4831d2b 100644
--- a/scripts/lib/devtool/build_sdk.py
+++ b/scripts/lib/devtool/build_sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
@@ -23,7 +13,7 @@ import shutil
import errno
import sys
import tempfile
-from devtool import exec_build_env_command, setup_tinfoil, parse_recipe, DevtoolError
+from devtool import DevtoolError
from devtool import build_image
logger = logging.getLogger('devtool')
diff --git a/scripts/lib/devtool/deploy.py b/scripts/lib/devtool/deploy.py
index 52e261d560..b5ca8f2c2f 100644
--- a/scripts/lib/devtool/deploy.py
+++ b/scripts/lib/devtool/deploy.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the deploy subcommands"""
import logging
@@ -26,7 +16,7 @@ import bb.utils
import argparse_oe
import oe.types
-from devtool import exec_fakeroot, setup_tinfoil, check_workspace_recipe, DevtoolError
+from devtool import exec_fakeroot_no_d, setup_tinfoil, check_workspace_recipe, DevtoolError
logger = logging.getLogger('devtool')
@@ -143,16 +133,38 @@ def _prepare_remote_script(deploy, verbose=False, dryrun=False, undeployall=Fals
return '\n'.join(lines)
-
-
def deploy(args, config, basepath, workspace):
"""Entry point for the devtool 'deploy' subcommand"""
- import math
- import oe.recipeutils
- import oe.package
+ import oe.utils
check_workspace_recipe(workspace, args.recipename, checksrc=False)
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ try:
+ rd = tinfoil.parse_recipe(args.recipename)
+ except Exception as e:
+ raise DevtoolError('Exception parsing recipe %s: %s' %
+ (args.recipename, e))
+
+ srcdir = rd.getVar('D')
+ workdir = rd.getVar('WORKDIR')
+ path = rd.getVar('PATH')
+ strip_cmd = rd.getVar('STRIP')
+ libdir = rd.getVar('libdir')
+ base_libdir = rd.getVar('base_libdir')
+ max_process = oe.utils.get_bb_number_threads(rd)
+ fakerootcmd = rd.getVar('FAKEROOTCMD')
+ fakerootenv = rd.getVar('FAKEROOTENV')
+ finally:
+ tinfoil.shutdown()
+
+ return deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args)
+
+def deploy_no_d(srcdir, workdir, path, strip_cmd, libdir, base_libdir, max_process, fakerootcmd, fakerootenv, args):
+ import math
+ import oe.package
+
try:
host, destdir = args.target.split(':')
except ValueError:
@@ -162,104 +174,108 @@ def deploy(args, config, basepath, workspace):
if not destdir.endswith('/'):
destdir += '/'
- tinfoil = setup_tinfoil(basepath=basepath)
- try:
- try:
- rd = tinfoil.parse_recipe(args.recipename)
- except Exception as e:
- raise DevtoolError('Exception parsing recipe %s: %s' %
- (args.recipename, e))
- recipe_outdir = rd.getVar('D')
- if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
- raise DevtoolError('No files to deploy - have you built the %s '
- 'recipe? If so, the install step has not installed '
- 'any files.' % args.recipename)
-
- if args.strip and not args.dry_run:
- # Fakeroot copy to new destination
- srcdir = recipe_outdir
- recipe_outdir = os.path.join(rd.getVar('WORKDIR'), 'deploy-target-stripped')
- if os.path.isdir(recipe_outdir):
- bb.utils.remove(recipe_outdir, True)
- exec_fakeroot(rd, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
- os.environ['PATH'] = ':'.join([os.environ['PATH'], rd.getVar('PATH') or ''])
- oe.package.strip_execs(args.recipename, recipe_outdir, rd.getVar('STRIP'), rd.getVar('libdir'),
- rd.getVar('base_libdir'))
-
- filelist = []
- ftotalsize = 0
- for root, _, files in os.walk(recipe_outdir):
- for fn in files:
- # Get the size in kiB (since we'll be comparing it to the output of du -k)
- # MUST use lstat() here not stat() or getfilesize() since we don't want to
- # dereference symlinks
- fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024))
- ftotalsize += fsize
- # The path as it would appear on the target
- fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
- filelist.append((fpath, fsize))
-
- if args.dry_run:
- print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
- for item, _ in filelist:
- print(' %s' % item)
- return 0
-
- extraoptions = ''
- if args.no_host_check:
- extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
- if not args.show_status:
- extraoptions += ' -q'
-
- scp_port = ''
- ssh_port = ''
- if args.port:
- scp_port = "-P %s" % args.port
- ssh_port = "-p %s" % args.port
-
- # In order to delete previously deployed files and have the manifest file on
- # the target, we write out a shell script and then copy it to the target
- # so we can then run it (piping tar output to it).
- # (We cannot use scp here, because it doesn't preserve symlinks.)
- tmpdir = tempfile.mkdtemp(prefix='devtool')
- try:
- tmpscript = '/tmp/devtool_deploy.sh'
- tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
- shellscript = _prepare_remote_script(deploy=True,
- verbose=args.show_status,
- nopreserve=args.no_preserve,
- nocheckspace=args.no_check_space)
- # Write out the script to a file
- with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
- f.write(shellscript)
- # Write out the file list
- with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
- f.write('%d\n' % ftotalsize)
- for fpath, fsize in filelist:
- f.write('%s %d\n' % (fpath, fsize))
- # Copy them to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
- if ret != 0:
- raise DevtoolError('Failed to copy script to %s - rerun with -s to '
- 'get a complete error message' % args.target)
- finally:
- shutil.rmtree(tmpdir)
+ recipe_outdir = srcdir
+ if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
+ raise DevtoolError('No files to deploy - have you built the %s '
+ 'recipe? If so, the install step has not installed '
+ 'any files.' % args.recipename)
+
+ if args.strip and not args.dry_run:
+ # Fakeroot copy to new destination
+ srcdir = recipe_outdir
+ recipe_outdir = os.path.join(workdir, 'devtool-deploy-target-stripped')
+ if os.path.isdir(recipe_outdir):
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "rm -rf %s" % recipe_outdir, shell=True)
+ exec_fakeroot_no_d(fakerootcmd, fakerootenv, "cp -af %s %s" % (os.path.join(srcdir, '.'), recipe_outdir), shell=True)
+ os.environ['PATH'] = ':'.join([os.environ['PATH'], path or ''])
+ oe.package.strip_execs(args.recipename, recipe_outdir, strip_cmd, libdir, base_libdir, max_process)
+
+ filelist = []
+ inodes = set({})
+ ftotalsize = 0
+ for root, _, files in os.walk(recipe_outdir):
+ for fn in files:
+ fstat = os.lstat(os.path.join(root, fn))
+ # Get the size in kiB (since we'll be comparing it to the output of du -k)
+ # MUST use lstat() here not stat() or getfilesize() since we don't want to
+ # dereference symlinks
+ if fstat.st_ino in inodes:
+ fsize = 0
+ else:
+ fsize = int(math.ceil(float(fstat.st_size)/1024))
+ inodes.add(fstat.st_ino)
+ ftotalsize += fsize
+ # The path as it would appear on the target
+ fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn)
+ filelist.append((fpath, fsize))
+
+ if args.dry_run:
+ print('Files to be deployed for %s on target %s:' % (args.recipename, args.target))
+ for item, _ in filelist:
+ print(' %s' % item)
+ return 0
- # Now run the script
- ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s %s \'sh %s %s %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
- if ret != 0:
- raise DevtoolError('Deploy failed - rerun with -s to get a complete '
- 'error message')
+ extraoptions = ''
+ if args.no_host_check:
+ extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ if not args.show_status:
+ extraoptions += ' -q'
+
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
+ scp_port = ''
+ ssh_port = ''
+ if args.port:
+ scp_port = "-P %s" % args.port
+ ssh_port = "-p %s" % args.port
- logger.info('Successfully deployed %s' % recipe_outdir)
+ if args.key:
+ extraoptions += ' -i %s' % args.key
- files_list = []
- for root, _, files in os.walk(recipe_outdir):
- for filename in files:
- filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
- files_list.append(os.path.join(destdir, filename))
+ # In order to delete previously deployed files and have the manifest file on
+ # the target, we write out a shell script and then copy it to the target
+ # so we can then run it (piping tar output to it).
+ # (We cannot use scp here, because it doesn't preserve symlinks.)
+ tmpdir = tempfile.mkdtemp(prefix='devtool')
+ try:
+ tmpscript = '/tmp/devtool_deploy.sh'
+ tmpfilelist = os.path.join(os.path.dirname(tmpscript), 'devtool_deploy.list')
+ shellscript = _prepare_remote_script(deploy=True,
+ verbose=args.show_status,
+ nopreserve=args.no_preserve,
+ nocheckspace=args.no_check_space)
+ # Write out the script to a file
+ with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
+ f.write(shellscript)
+ # Write out the file list
+ with open(os.path.join(tmpdir, os.path.basename(tmpfilelist)), 'w') as f:
+ f.write('%d\n' % ftotalsize)
+ for fpath, fsize in filelist:
+ f.write('%s %d\n' % (fpath, fsize))
+ # Copy them to the target
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ if ret != 0:
+ raise DevtoolError('Failed to copy script to %s - rerun with -s to '
+ 'get a complete error message' % args.target)
finally:
- tinfoil.shutdown()
+ shutil.rmtree(tmpdir)
+
+ # Now run the script
+ ret = exec_fakeroot_no_d(fakerootcmd, fakerootenv, 'tar cf - . | %s %s %s %s \'sh %s %s %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+ if ret != 0:
+ raise DevtoolError('Deploy failed - rerun with -s to get a complete '
+ 'error message')
+
+ logger.info('Successfully deployed %s' % recipe_outdir)
+
+ files_list = []
+ for root, _, files in os.walk(recipe_outdir):
+ for filename in files:
+ filename = os.path.relpath(os.path.join(root, filename), recipe_outdir)
+ files_list.append(os.path.join(destdir, filename))
return 0
@@ -276,6 +292,11 @@ def undeploy(args, config, basepath, workspace):
if not args.show_status:
extraoptions += ' -q'
+ scp_sshexec = ''
+ ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ scp_sshexec = "-S %s" % args.ssh_exec
+ ssh_sshexec = args.ssh_exec
scp_port = ''
ssh_port = ''
if args.port:
@@ -292,7 +313,7 @@ def undeploy(args, config, basepath, workspace):
with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
f.write(shellscript)
# Copy it to the target
- ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+ ret = subprocess.call("scp %s %s %s %s/* %s:%s" % (scp_sshexec, scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
if ret != 0:
raise DevtoolError('Failed to copy script to %s - rerun with -s to '
'get a complete error message' % args.target)
@@ -300,7 +321,7 @@ def undeploy(args, config, basepath, workspace):
shutil.rmtree(tmpdir)
# Now run the script
- ret = subprocess.call('ssh %s %s %s \'sh %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
+ ret = subprocess.call('%s %s %s %s \'sh %s %s\'' % (ssh_sshexec, ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
if ret != 0:
raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
'error message')
@@ -324,7 +345,10 @@ def register_commands(subparsers, context):
parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_deploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_deploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_deploy.add_argument('-I', '--key',
+ help='Specify ssh private key for connection to the target')
strip_opts = parser_deploy.add_mutually_exclusive_group(required=False)
strip_opts.add_argument('-S', '--strip',
@@ -346,5 +370,9 @@ def register_commands(subparsers, context):
parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_undeploy.add_argument('-e', '--ssh-exec', help='Executable to use in place of ssh')
parser_undeploy.add_argument('-P', '--port', help='Specify port to use for connection to the target')
+ parser_undeploy.add_argument('-I', '--key',
+ help='Specify ssh private key for connection to the target')
+
parser_undeploy.set_defaults(func=undeploy)
diff --git a/scripts/lib/devtool/export.py b/scripts/lib/devtool/export.py
index 13ee258e7a..01174edae5 100644
--- a/scripts/lib/devtool/export.py
+++ b/scripts/lib/devtool/export.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool export plugin"""
import os
@@ -84,7 +74,7 @@ def export(args, config, basepath, workspace):
# if all workspace is excluded, quit
if not len(set(workspace.keys()).difference(set(args.exclude))):
- logger.warn('All recipes in workspace excluded, nothing to export')
+ logger.warning('All recipes in workspace excluded, nothing to export')
return 0
exported = []
diff --git a/scripts/lib/devtool/ide_plugins/__init__.py b/scripts/lib/devtool/ide_plugins/__init__.py
new file mode 100644
index 0000000000..19c2f61c5f
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/__init__.py
@@ -0,0 +1,282 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin interface definition and helper functions"""
+
+import errno
+import json
+import logging
+import os
+import stat
+from enum import Enum, auto
+from devtool import DevtoolError
+from bb.utils import mkdirhier
+
+logger = logging.getLogger('devtool')
+
+
+class BuildTool(Enum):
+ UNDEFINED = auto()
+ CMAKE = auto()
+ MESON = auto()
+
+ @property
+ def is_c_ccp(self):
+ if self is BuildTool.CMAKE:
+ return True
+ if self is BuildTool.MESON:
+ return True
+ return False
+
+
+class GdbCrossConfig:
+ """Base class defining the GDB configuration generator interface
+
+ Generate a GDB configuration for a binary on the target device.
+ Only one instance per binary is allowed. This allows to assign unique port
+ numbers for all gdbserver instances.
+ """
+ _gdbserver_port_next = 1234
+ _binaries = []
+
+ def __init__(self, image_recipe, modified_recipe, binary, gdbserver_multi=True):
+ self.image_recipe = image_recipe
+ self.modified_recipe = modified_recipe
+ self.gdb_cross = modified_recipe.gdb_cross
+ self.binary = binary
+ if binary in GdbCrossConfig._binaries:
+ raise DevtoolError(
+ "gdbserver config for binary %s is already generated" % binary)
+ GdbCrossConfig._binaries.append(binary)
+ self.script_dir = modified_recipe.ide_sdk_scripts_dir
+ self.gdbinit_dir = os.path.join(self.script_dir, 'gdbinit')
+ self.gdbserver_multi = gdbserver_multi
+ self.binary_pretty = self.binary.replace(os.sep, '-').lstrip('-')
+ self.gdbserver_port = GdbCrossConfig._gdbserver_port_next
+ GdbCrossConfig._gdbserver_port_next += 1
+ self.id_pretty = "%d_%s" % (self.gdbserver_port, self.binary_pretty)
+ # gdbserver start script
+ gdbserver_script_file = 'gdbserver_' + self.id_pretty
+ if self.gdbserver_multi:
+ gdbserver_script_file += "_m"
+ self.gdbserver_script = os.path.join(
+ self.script_dir, gdbserver_script_file)
+ # gdbinit file
+ self.gdbinit = os.path.join(
+ self.gdbinit_dir, 'gdbinit_' + self.id_pretty)
+ # gdb start script
+ self.gdb_script = os.path.join(
+ self.script_dir, 'gdb_' + self.id_pretty)
+
+ def _gen_gdbserver_start_script(self):
+ """Generate a shell command starting the gdbserver on the remote device via ssh
+
+ GDB supports two modes:
+ multi: gdbserver remains running over several debug sessions
+ once: gdbserver terminates after the debugged process terminates
+ """
+ cmd_lines = ['#!/bin/sh']
+ if self.gdbserver_multi:
+ temp_dir = "TEMP_DIR=/tmp/gdbserver_%s; " % self.id_pretty
+ gdbserver_cmd_start = temp_dir
+ gdbserver_cmd_start += "test -f \\$TEMP_DIR/pid && exit 0; "
+ gdbserver_cmd_start += "mkdir -p \\$TEMP_DIR; "
+ gdbserver_cmd_start += "%s --multi :%s > \\$TEMP_DIR/log 2>&1 & " % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port)
+ gdbserver_cmd_start += "echo \\$! > \\$TEMP_DIR/pid;"
+
+ gdbserver_cmd_stop = temp_dir
+ gdbserver_cmd_stop += "test -f \\$TEMP_DIR/pid && kill \\$(cat \\$TEMP_DIR/pid); "
+ gdbserver_cmd_stop += "rm -rf \\$TEMP_DIR; "
+
+ gdbserver_cmd_l = []
+ gdbserver_cmd_l.append('if [ "$1" = "stop" ]; then')
+ gdbserver_cmd_l.append(' shift')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_stop))
+ gdbserver_cmd_l.append('else')
+ gdbserver_cmd_l.append(" %s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start))
+ gdbserver_cmd_l.append('fi')
+ gdbserver_cmd = os.linesep.join(gdbserver_cmd_l)
+ else:
+ gdbserver_cmd_start = "%s --once :%s %s" % (
+ self.gdb_cross.gdbserver_path, self.gdbserver_port, self.binary)
+ gdbserver_cmd = "%s %s %s %s 'sh -c \"%s\"'" % (
+ self.gdb_cross.target_device.ssh_sshexec, self.gdb_cross.target_device.ssh_port, self.gdb_cross.target_device.extraoptions, self.gdb_cross.target_device.target, gdbserver_cmd_start)
+ cmd_lines.append(gdbserver_cmd)
+ GdbCrossConfig.write_file(self.gdbserver_script, cmd_lines, True)
+
+ def _gen_gdbinit_config(self):
+ """Generate a gdbinit file for this binary and the corresponding gdbserver configuration"""
+ gdbinit_lines = ['# This file is generated by devtool ide-sdk']
+ if self.gdbserver_multi:
+ target_help = '# gdbserver --multi :%d' % self.gdbserver_port
+ remote_cmd = 'target extended-remote'
+ else:
+ target_help = '# gdbserver :%d %s' % (
+ self.gdbserver_port, self.binary)
+ remote_cmd = 'target remote'
+ gdbinit_lines.append('# On the remote target:')
+ gdbinit_lines.append(target_help)
+ gdbinit_lines.append('# On the build machine:')
+ gdbinit_lines.append('# cd ' + self.modified_recipe.real_srctree)
+ gdbinit_lines.append(
+ '# ' + self.gdb_cross.gdb + ' -ix ' + self.gdbinit)
+
+ gdbinit_lines.append('set sysroot ' + self.modified_recipe.d)
+ gdbinit_lines.append('set substitute-path "/usr/include" "' +
+ os.path.join(self.modified_recipe.recipe_sysroot, 'usr', 'include') + '"')
+ # Disable debuginfod for now, the IDE configuration uses rootfs-dbg from the image workdir.
+ gdbinit_lines.append('set debuginfod enabled off')
+ if self.image_recipe.rootfs_dbg:
+ gdbinit_lines.append(
+ 'set solib-search-path "' + self.modified_recipe.solib_search_path_str(self.image_recipe) + '"')
+ # First: Search for sources of this recipe in the workspace folder
+ if self.modified_recipe.pn in self.modified_recipe.target_dbgsrc_dir:
+ gdbinit_lines.append('set substitute-path "%s" "%s"' %
+ (self.modified_recipe.target_dbgsrc_dir, self.modified_recipe.real_srctree))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if self.modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ gdbinit_lines.append('set substitute-path "/usr/src/debug" "%s"' % os.path.join(
+ self.image_recipe.rootfs_dbg, "usr", "src", "debug"))
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+ gdbinit_lines.append(
+ '%s %s:%d' % (remote_cmd, self.gdb_cross.host, self.gdbserver_port))
+ gdbinit_lines.append('set remote exec-file ' + self.binary)
+ gdbinit_lines.append(
+ 'run ' + os.path.join(self.modified_recipe.d, self.binary))
+
+ GdbCrossConfig.write_file(self.gdbinit, gdbinit_lines)
+
+ def _gen_gdb_start_script(self):
+ """Generate a script starting GDB with the corresponding gdbinit configuration."""
+ cmd_lines = ['#!/bin/sh']
+ cmd_lines.append('cd ' + self.modified_recipe.real_srctree)
+ cmd_lines.append(self.gdb_cross.gdb + ' -ix ' +
+ self.gdbinit + ' "$@"')
+ GdbCrossConfig.write_file(self.gdb_script, cmd_lines, True)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+ self._gen_gdbinit_config()
+ self._gen_gdb_start_script()
+
+ @staticmethod
+ def write_file(script_file, cmd_lines, executable=False):
+ script_dir = os.path.dirname(script_file)
+ mkdirhier(script_dir)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ script_f.write(os.linesep)
+ if executable:
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ logger.info("Created: %s" % script_file)
+
+
+class IdeBase:
+ """Base class defining the interface for IDE plugins"""
+
+ def __init__(self):
+ self.ide_name = 'undefined'
+ self.gdb_cross_configs = []
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """Used to find the default ide handler if --ide is not passed"""
+ return 10
+
+ def setup_shared_sysroots(self, shared_env):
+ logger.warn("Shared sysroot mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ logger.warn("Modified recipe mode is not supported for IDE %s" %
+ self.ide_name)
+
+ def initialize_gdb_cross_configs(self, image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfig):
+ binaries = modified_recipe.find_installed_binaries()
+ for binary in binaries:
+ gdb_cross_config = gdb_cross_config_class(
+ image_recipe, modified_recipe, binary)
+ gdb_cross_config.initialize()
+ self.gdb_cross_configs.append(gdb_cross_config)
+
+ @staticmethod
+ def gen_oe_scrtips_sym_link(modified_recipe):
+ # create a sym-link from sources to the scripts directory
+ if os.path.isdir(modified_recipe.ide_sdk_scripts_dir):
+ IdeBase.symlink_force(modified_recipe.ide_sdk_scripts_dir,
+ os.path.join(modified_recipe.real_srctree, 'oe-scripts'))
+
+ @staticmethod
+ def update_json_file(json_dir, json_file, update_dict):
+ """Update a json file
+
+ By default it uses the dict.update function. If this is not sutiable
+ the update function might be passed via update_func parameter.
+ """
+ json_path = os.path.join(json_dir, json_file)
+ logger.info("Updating IDE config file: %s (%s)" %
+ (json_file, json_path))
+ if not os.path.exists(json_dir):
+ os.makedirs(json_dir)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+ orig_dict.update(update_dict)
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ @staticmethod
+ def symlink_force(tgt, dst):
+ try:
+ os.symlink(tgt, dst)
+ except OSError as err:
+ if err.errno == errno.EEXIST:
+ if os.readlink(dst) != tgt:
+ os.remove(dst)
+ os.symlink(tgt, dst)
+ else:
+ raise err
+
+
+def get_devtool_deploy_opts(args):
+ """Filter args for devtool deploy-target args"""
+ if not args.target:
+ return None
+ devtool_deploy_opts = [args.target]
+ if args.no_host_check:
+ devtool_deploy_opts += ["-c"]
+ if args.show_status:
+ devtool_deploy_opts += ["-s"]
+ if args.no_preserve:
+ devtool_deploy_opts += ["-p"]
+ if args.no_check_space:
+ devtool_deploy_opts += ["--no-check-space"]
+ if args.ssh_exec:
+ devtool_deploy_opts += ["-e", args.ssh.exec]
+ if args.port:
+ devtool_deploy_opts += ["-P", args.port]
+ if args.key:
+ devtool_deploy_opts += ["-I", args.key]
+ if args.strip is False:
+ devtool_deploy_opts += ["--no-strip"]
+ return devtool_deploy_opts
diff --git a/scripts/lib/devtool/ide_plugins/ide_code.py b/scripts/lib/devtool/ide_plugins/ide_code.py
new file mode 100644
index 0000000000..a62b93224e
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_code.py
@@ -0,0 +1,463 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk IDE plugin for VSCode and VSCodium"""
+
+import json
+import logging
+import os
+import shutil
+from devtool.ide_plugins import BuildTool, IdeBase, GdbCrossConfig, get_devtool_deploy_opts
+
+logger = logging.getLogger('devtool')
+
+
+class GdbCrossConfigVSCode(GdbCrossConfig):
+ def __init__(self, image_recipe, modified_recipe, binary):
+ super().__init__(image_recipe, modified_recipe, binary, False)
+
+ def initialize(self):
+ self._gen_gdbserver_start_script()
+
+
+class IdeVSCode(IdeBase):
+ """Manage IDE configurations for VSCode
+
+ Modified recipe mode:
+ - cmake: use the cmake-preset generated by devtool ide-sdk
+ - meson: meson is called via a wrapper script generated by devtool ide-sdk
+
+ Shared sysroot mode:
+ In shared sysroot mode, the cross tool-chain is exported to the user's global configuration.
+ A workspace cannot be created because there is no recipe that defines how a workspace could
+ be set up.
+ - cmake: adds a cmake-kit to .local/share/CMakeTools/cmake-tools-kits.json
+ The cmake-kit uses the environment script and the tool-chain file
+ generated by meta-ide-support.
+ - meson: Meson needs manual workspace configuration.
+ """
+
+ @classmethod
+ def ide_plugin_priority(cls):
+ """If --ide is not passed this is the default plugin"""
+ if shutil.which('code'):
+ return 100
+ return 0
+
+ def setup_shared_sysroots(self, shared_env):
+ """Expose the toolchain of the shared sysroots SDK"""
+ datadir = shared_env.ide_support.datadir
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ standalone_sysroot_native = shared_env.build_sysroots.standalone_sysroot_native
+ vscode_ws_path = os.path.join(
+ os.environ['HOME'], '.local', 'share', 'CMakeTools')
+ cmake_kits_path = os.path.join(vscode_ws_path, 'cmake-tools-kits.json')
+ oecmake_generator = "Ninja"
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+
+ if not os.path.isdir(vscode_ws_path):
+ os.makedirs(vscode_ws_path)
+ cmake_kits_old = []
+ if os.path.exists(cmake_kits_path):
+ with open(cmake_kits_path, 'r', encoding='utf-8') as cmake_kits_file:
+ cmake_kits_old = json.load(cmake_kits_file)
+ cmake_kits = cmake_kits_old.copy()
+
+ cmake_kit_new = {
+ "name": "OE " + real_multimach_target_sys,
+ "environmentSetupScript": env_script,
+ "toolchainFile": standalone_sysroot_native + datadir + "/cmake/OEToolchainConfig.cmake",
+ "preferredGenerator": {
+ "name": oecmake_generator
+ }
+ }
+
+ def merge_kit(cmake_kits, cmake_kit_new):
+ i = 0
+ while i < len(cmake_kits):
+ if 'environmentSetupScript' in cmake_kits[i] and \
+ cmake_kits[i]['environmentSetupScript'] == cmake_kit_new['environmentSetupScript']:
+ cmake_kits[i] = cmake_kit_new
+ return
+ i += 1
+ cmake_kits.append(cmake_kit_new)
+ merge_kit(cmake_kits, cmake_kit_new)
+
+ if cmake_kits != cmake_kits_old:
+ logger.info("Updating: %s" % cmake_kits_path)
+ with open(cmake_kits_path, 'w', encoding='utf-8') as cmake_kits_file:
+ json.dump(cmake_kits, cmake_kits_file, indent=4)
+ else:
+ logger.info("Already up to date: %s" % cmake_kits_path)
+
+ cmake_native = os.path.join(
+ shared_env.build_sysroots.standalone_sysroot_native, 'usr', 'bin', 'cmake')
+ if os.path.isfile(cmake_native):
+ logger.info('cmake-kits call cmake by default. If the cmake provided by this SDK should be used, please add the following line to ".vscode/settings.json" file: "cmake.cmakePath": "%s"' % cmake_native)
+ else:
+ logger.error("Cannot find cmake native at: %s" % cmake_native)
+
+ def dot_code_dir(self, modified_recipe):
+ return os.path.join(modified_recipe.srctree, '.vscode')
+
+ def __vscode_settings_meson(self, settings_dict, modified_recipe):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ settings_dict["mesonbuild.mesonPath"] = modified_recipe.meson_wrapper
+
+ confopts = modified_recipe.mesonopts.split()
+ confopts += modified_recipe.meson_cross_file.split()
+ confopts += modified_recipe.extra_oemeson.split()
+ settings_dict["mesonbuild.configureOptions"] = confopts
+ settings_dict["mesonbuild.buildFolder"] = modified_recipe.b
+
+ def __vscode_settings_cmake(self, settings_dict, modified_recipe):
+ """Add cmake specific settings to settings.json.
+
+ Note: most settings are passed to the cmake preset.
+ """
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ settings_dict["cmake.configureOnOpen"] = True
+ settings_dict["cmake.sourceDirectory"] = modified_recipe.real_srctree
+
+ def vscode_settings(self, modified_recipe, image_recipe):
+ files_excludes = {
+ "**/.git/**": True,
+ "**/oe-logs/**": True,
+ "**/oe-workdir/**": True,
+ "**/source-date-epoch/**": True
+ }
+ python_exclude = [
+ "**/.git/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/source-date-epoch/**"
+ ]
+ files_readonly = {
+ modified_recipe.recipe_sysroot + '/**': True,
+ modified_recipe.recipe_sysroot_native + '/**': True,
+ }
+ if image_recipe.rootfs_dbg is not None:
+ files_readonly[image_recipe.rootfs_dbg + '/**'] = True
+ settings_dict = {
+ "files.watcherExclude": files_excludes,
+ "files.exclude": files_excludes,
+ "files.readonlyInclude": files_readonly,
+ "python.analysis.exclude": python_exclude
+ }
+ self.__vscode_settings_cmake(settings_dict, modified_recipe)
+ self.__vscode_settings_meson(settings_dict, modified_recipe)
+
+ settings_file = 'settings.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), settings_file, settings_dict)
+
+ def __vscode_extensions_cmake(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.CMAKE:
+ return
+ recommendations += [
+ "twxs.cmake",
+ "ms-vscode.cmake-tools",
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def __vscode_extensions_meson(self, modified_recipe, recommendations):
+ if modified_recipe.build_tool is not BuildTool.MESON:
+ return
+ recommendations += [
+ 'mesonbuild.mesonbuild',
+ "ms-vscode.cpptools",
+ "ms-vscode.cpptools-extension-pack",
+ "ms-vscode.cpptools-themes"
+ ]
+
+ def vscode_extensions(self, modified_recipe):
+ recommendations = []
+ self.__vscode_extensions_cmake(modified_recipe, recommendations)
+ self.__vscode_extensions_meson(modified_recipe, recommendations)
+ extensions_file = 'extensions.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), extensions_file, {"recommendations": recommendations})
+
+ def vscode_c_cpp_properties(self, modified_recipe):
+ properties_dict = {
+ "name": modified_recipe.recipe_id_pretty,
+ }
+ if modified_recipe.build_tool is BuildTool.CMAKE:
+ properties_dict["configurationProvider"] = "ms-vscode.cmake-tools"
+ elif modified_recipe.build_tool is BuildTool.MESON:
+ properties_dict["configurationProvider"] = "mesonbuild.mesonbuild"
+ properties_dict["compilerPath"] = os.path.join(modified_recipe.staging_bindir_toolchain, modified_recipe.cxx.split()[0])
+ else: # no C/C++ build
+ return
+
+ properties_dicts = {
+ "configurations": [
+ properties_dict
+ ],
+ "version": 4
+ }
+ prop_file = 'c_cpp_properties.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), prop_file, properties_dicts)
+
+ def vscode_launch_bin_dbg(self, gdb_cross_config):
+ modified_recipe = gdb_cross_config.modified_recipe
+
+ launch_config = {
+ "name": gdb_cross_config.id_pretty,
+ "type": "cppdbg",
+ "request": "launch",
+ "program": os.path.join(modified_recipe.d, gdb_cross_config.binary.lstrip('/')),
+ "stopAtEntry": True,
+ "cwd": "${workspaceFolder}",
+ "environment": [],
+ "externalConsole": False,
+ "MIMode": "gdb",
+ "preLaunchTask": gdb_cross_config.id_pretty,
+ "miDebuggerPath": modified_recipe.gdb_cross.gdb,
+ "miDebuggerServerAddress": "%s:%d" % (modified_recipe.gdb_cross.host, gdb_cross_config.gdbserver_port)
+ }
+
+ # Search for header files in recipe-sysroot.
+ src_file_map = {
+ "/usr/include": os.path.join(modified_recipe.recipe_sysroot, "usr", "include")
+ }
+ # First of all search for not stripped binaries in the image folder.
+ # These binaries are copied (and optionally stripped) by deploy-target
+ setup_commands = [
+ {
+ "description": "sysroot",
+ "text": "set sysroot " + modified_recipe.d
+ }
+ ]
+
+ if gdb_cross_config.image_recipe.rootfs_dbg:
+ launch_config['additionalSOLibSearchPath'] = modified_recipe.solib_search_path_str(
+ gdb_cross_config.image_recipe)
+ # First: Search for sources of this recipe in the workspace folder
+ if modified_recipe.pn in modified_recipe.target_dbgsrc_dir:
+ src_file_map[modified_recipe.target_dbgsrc_dir] = "${workspaceFolder}"
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must contain the recipe name PN.")
+ # Second: Search for sources of other recipes in the rootfs-dbg
+ if modified_recipe.target_dbgsrc_dir.startswith("/usr/src/debug"):
+ src_file_map["/usr/src/debug"] = os.path.join(
+ gdb_cross_config.image_recipe.rootfs_dbg, "usr", "src", "debug")
+ else:
+ logger.error(
+ "TARGET_DBGSRC_DIR must start with /usr/src/debug.")
+ else:
+ logger.warning(
+ "Cannot setup debug symbols configuration for GDB. IMAGE_GEN_DEBUGFS is not enabled.")
+
+ launch_config['sourceFileMap'] = src_file_map
+ launch_config['setupCommands'] = setup_commands
+ return launch_config
+
+ def vscode_launch(self, modified_recipe):
+ """GDB Launch configuration for binaries (elf files)"""
+
+ configurations = []
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is modified_recipe:
+ configurations.append(self.vscode_launch_bin_dbg(gdb_cross_config))
+ launch_dict = {
+ "version": "0.2.0",
+ "configurations": configurations
+ }
+ launch_file = 'launch.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), launch_file, launch_dict)
+
+ def vscode_tasks_cpp(self, args, modified_recipe):
+ run_install_deploy = modified_recipe.gen_install_deploy_script(args)
+ install_task_name = "install && deploy-target %s" % modified_recipe.recipe_id_pretty
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": install_task_name,
+ "type": "shell",
+ "command": run_install_deploy,
+ "problemMatcher": []
+ }
+ ]
+ }
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ install_task_name
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks_fallback(self, args, modified_recipe):
+ oe_init_dir = modified_recipe.oe_init_dir
+ oe_init = ". %s %s > /dev/null && " % (modified_recipe.oe_init_build_env, modified_recipe.topdir)
+ dt_build = "devtool build "
+ dt_build_label = dt_build + modified_recipe.recipe_id_pretty
+ dt_build_cmd = dt_build + modified_recipe.bpn
+ clean_opt = " --clean"
+ dt_build_clean_label = dt_build + modified_recipe.recipe_id_pretty + clean_opt
+ dt_build_clean_cmd = dt_build + modified_recipe.bpn + clean_opt
+ dt_deploy = "devtool deploy-target "
+ dt_deploy_label = dt_deploy + modified_recipe.recipe_id_pretty
+ dt_deploy_cmd = dt_deploy + modified_recipe.bpn
+ dt_build_deploy_label = "devtool build & deploy-target %s" % modified_recipe.recipe_id_pretty
+ deploy_opts = ' '.join(get_devtool_deploy_opts(args))
+ tasks_dict = {
+ "version": "2.0.0",
+ "tasks": [
+ {
+ "label": dt_build_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_cmd)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_deploy_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s %s" % (
+ oe_init, dt_deploy_cmd, deploy_opts)
+ ],
+ "problemMatcher": []
+ },
+ {
+ "label": dt_build_deploy_label,
+ "dependsOrder": "sequence",
+ "dependsOn": [
+ dt_build_label,
+ dt_deploy_label
+ ],
+ "problemMatcher": [],
+ "group": {
+ "kind": "build",
+ "isDefault": True
+ }
+ },
+ {
+ "label": dt_build_clean_label,
+ "type": "shell",
+ "command": "bash",
+ "linux": {
+ "options": {
+ "cwd": oe_init_dir
+ }
+ },
+ "args": [
+ "--login",
+ "-c",
+ "%s%s" % (oe_init, dt_build_clean_cmd)
+ ],
+ "problemMatcher": []
+ }
+ ]
+ }
+ if modified_recipe.gdb_cross:
+ for gdb_cross_config in self.gdb_cross_configs:
+ if gdb_cross_config.modified_recipe is not modified_recipe:
+ continue
+ tasks_dict['tasks'].append(
+ {
+ "label": gdb_cross_config.id_pretty,
+ "type": "shell",
+ "isBackground": True,
+ "dependsOn": [
+ dt_build_deploy_label
+ ],
+ "command": gdb_cross_config.gdbserver_script,
+ "problemMatcher": [
+ {
+ "pattern": [
+ {
+ "regexp": ".",
+ "file": 1,
+ "location": 2,
+ "message": 3
+ }
+ ],
+ "background": {
+ "activeOnStart": True,
+ "beginsPattern": ".",
+ "endsPattern": ".",
+ }
+ }
+ ]
+ })
+ tasks_file = 'tasks.json'
+ IdeBase.update_json_file(
+ self.dot_code_dir(modified_recipe), tasks_file, tasks_dict)
+
+ def vscode_tasks(self, args, modified_recipe):
+ if modified_recipe.build_tool.is_c_ccp:
+ self.vscode_tasks_cpp(args, modified_recipe)
+ else:
+ self.vscode_tasks_fallback(args, modified_recipe)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ self.vscode_settings(modified_recipe, image_recipe)
+ self.vscode_extensions(modified_recipe)
+ self.vscode_c_cpp_properties(modified_recipe)
+ if args.target:
+ self.initialize_gdb_cross_configs(
+ image_recipe, modified_recipe, gdb_cross_config_class=GdbCrossConfigVSCode)
+ self.vscode_launch(modified_recipe)
+ self.vscode_tasks(args, modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['code'] = IdeVSCode
diff --git a/scripts/lib/devtool/ide_plugins/ide_none.py b/scripts/lib/devtool/ide_plugins/ide_none.py
new file mode 100644
index 0000000000..f106c5a026
--- /dev/null
+++ b/scripts/lib/devtool/ide_plugins/ide_none.py
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk generic IDE plugin"""
+
+import os
+import logging
+from devtool.ide_plugins import IdeBase, GdbCrossConfig
+
+logger = logging.getLogger('devtool')
+
+
+class IdeNone(IdeBase):
+ """Generate some generic helpers for other IDEs
+
+ Modified recipe mode:
+ Generate some helper scripts for remote debugging with GDB
+
+ Shared sysroot mode:
+ A wrapper for bitbake meta-ide-support and bitbake build-sysroots
+ """
+
+ def __init__(self):
+ super().__init__()
+
+ def setup_shared_sysroots(self, shared_env):
+ real_multimach_target_sys = shared_env.ide_support.real_multimach_target_sys
+ deploy_dir_image = shared_env.ide_support.deploy_dir_image
+ env_script = os.path.join(
+ deploy_dir_image, 'environment-setup-' + real_multimach_target_sys)
+ logger.info(
+ "To use this SDK please source this: %s" % env_script)
+
+ def setup_modified_recipe(self, args, image_recipe, modified_recipe):
+ """generate some helper scripts and config files
+
+ - Execute the do_install task
+ - Execute devtool deploy-target
+ - Generate a gdbinit file per executable
+ - Generate the oe-scripts sym-link
+ """
+ script_path = modified_recipe.gen_install_deploy_script(args)
+ logger.info("Created: %s" % script_path)
+
+ self.initialize_gdb_cross_configs(image_recipe, modified_recipe)
+
+ IdeBase.gen_oe_scrtips_sym_link(modified_recipe)
+
+
+def register_ide_plugin(ide_plugins):
+ ide_plugins['none'] = IdeNone
diff --git a/scripts/lib/devtool/ide_sdk.py b/scripts/lib/devtool/ide_sdk.py
new file mode 100755
index 0000000000..7807b322b3
--- /dev/null
+++ b/scripts/lib/devtool/ide_sdk.py
@@ -0,0 +1,1070 @@
+# Development tool - ide-sdk command plugin
+#
+# Copyright (C) 2023-2024 Siemens AG
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+"""Devtool ide-sdk plugin"""
+
+import json
+import logging
+import os
+import re
+import shutil
+import stat
+import subprocess
+import sys
+from argparse import RawTextHelpFormatter
+from enum import Enum
+
+import scriptutils
+import bb
+from devtool import exec_build_env_command, setup_tinfoil, check_workspace_recipe, DevtoolError, parse_recipe
+from devtool.standard import get_real_srctree
+from devtool.ide_plugins import BuildTool
+
+
+logger = logging.getLogger('devtool')
+
+# dict of classes derived from IdeBase
+ide_plugins = {}
+
+
+class DevtoolIdeMode(Enum):
+ """Different modes are supported by the ide-sdk plugin.
+
+ The enum might be extended by more advanced modes in the future. Some ideas:
+ - auto: modified if all recipes are modified, shared if none of the recipes is modified.
+ - mixed: modified mode for modified recipes, shared mode for all other recipes.
+ """
+
+ modified = 'modified'
+ shared = 'shared'
+
+
+class TargetDevice:
+ """SSH remote login parameters"""
+
+ def __init__(self, args):
+ self.extraoptions = ''
+ if args.no_host_check:
+ self.extraoptions += '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
+ self.ssh_sshexec = 'ssh'
+ if args.ssh_exec:
+ self.ssh_sshexec = args.ssh_exec
+ self.ssh_port = ''
+ if args.port:
+ self.ssh_port = "-p %s" % args.port
+ if args.key:
+ self.extraoptions += ' -i %s' % args.key
+
+ self.target = args.target
+ target_sp = args.target.split('@')
+ if len(target_sp) == 1:
+ self.login = ""
+ self.host = target_sp[0]
+ elif len(target_sp) == 2:
+ self.login = target_sp[0]
+ self.host = target_sp[1]
+ else:
+ logger.error("Invalid target argument: %s" % args.target)
+
+
+class RecipeNative:
+ """Base class for calling bitbake to provide a -native recipe"""
+
+ def __init__(self, name, target_arch=None):
+ self.name = name
+ self.target_arch = target_arch
+ self.bootstrap_tasks = [self.name + ':do_addto_recipe_sysroot']
+ self.staging_bindir_native = None
+ self.target_sys = None
+ self.__native_bin = None
+
+ def _initialize(self, config, workspace, tinfoil):
+ """Get the parsed recipe"""
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+ self.staging_bindir_native = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_NATIVE'))
+ self.target_sys = recipe_d.getVar('TARGET_SYS')
+ return recipe_d
+
+ def initialize(self, config, workspace, tinfoil):
+ """Basic initialization that can be overridden by a derived class"""
+ self._initialize(config, workspace, tinfoil)
+
+ @property
+ def native_bin(self):
+ if not self.__native_bin:
+ raise DevtoolError("native binary name is not defined.")
+ return self.__native_bin
+
+
+class RecipeGdbCross(RecipeNative):
+ """Handle handle gdb-cross on the host and the gdbserver on the target device"""
+
+ def __init__(self, args, target_arch, target_device):
+ super().__init__('gdb-cross-' + target_arch, target_arch)
+ self.target_device = target_device
+ self.gdb = None
+ self.gdbserver_port_next = int(args.gdbserver_port_start)
+ self.config_db = {}
+
+ def __find_gdbserver(self, config, tinfoil):
+ """Absolute path of the gdbserver"""
+ recipe_d_gdb = parse_recipe(
+ config, tinfoil, 'gdb', appends=True, filter_workspace=False)
+ if not recipe_d_gdb:
+ raise DevtoolError("Parsing gdb recipe failed")
+ return os.path.join(recipe_d_gdb.getVar('bindir'), 'gdbserver')
+
+ def initialize(self, config, workspace, tinfoil):
+ super()._initialize(config, workspace, tinfoil)
+ gdb_bin = self.target_sys + '-gdb'
+ gdb_path = os.path.join(
+ self.staging_bindir_native, self.target_sys, gdb_bin)
+ self.gdb = gdb_path
+ self.gdbserver_path = self.__find_gdbserver(config, tinfoil)
+
+ @property
+ def host(self):
+ return self.target_device.host
+
+
+class RecipeImage:
+ """Handle some image recipe related properties
+
+ Most workflows require firmware that runs on the target device.
+ This firmware must be consistent with the setup of the host system.
+ In particular, the debug symbols must be compatible. For this, the
+ rootfs must be created as part of the SDK.
+ """
+
+ def __init__(self, name):
+ self.combine_dbg_image = False
+ self.gdbserver_missing = False
+ self.name = name
+ self.rootfs = None
+ self.__rootfs_dbg = None
+ self.bootstrap_tasks = [self.name + ':do_build']
+
+ def initialize(self, config, tinfoil):
+ image_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not image_d:
+ raise DevtoolError(
+ "Parsing image recipe %s failed" % self.name)
+
+ self.combine_dbg_image = bb.data.inherits_class(
+ 'image-combined-dbg', image_d)
+
+ workdir = image_d.getVar('WORKDIR')
+ self.rootfs = os.path.join(workdir, 'rootfs')
+ if image_d.getVar('IMAGE_GEN_DEBUGFS') == "1":
+ self.__rootfs_dbg = os.path.join(workdir, 'rootfs-dbg')
+
+ self.gdbserver_missing = 'gdbserver' not in image_d.getVar(
+ 'IMAGE_INSTALL')
+
+ @property
+ def debug_support(self):
+ return bool(self.rootfs_dbg)
+
+ @property
+ def rootfs_dbg(self):
+ if self.__rootfs_dbg and os.path.isdir(self.__rootfs_dbg):
+ return self.__rootfs_dbg
+ return None
+
+
+class RecipeMetaIdeSupport:
+ """For the shared sysroots mode meta-ide-support is needed
+
+ For use cases where just a cross tool-chain is required but
+ no recipe is used, devtool ide-sdk abstracts calling bitbake meta-ide-support
+ and bitbake build-sysroots. This also allows to expose the cross-toolchains
+ to IDEs. For example VSCode support different tool-chains with e.g. cmake-kits.
+ """
+
+ def __init__(self):
+ self.bootstrap_tasks = ['meta-ide-support:do_build']
+ self.topdir = None
+ self.datadir = None
+ self.deploy_dir_image = None
+ self.build_sys = None
+ # From toolchain-scripts
+ self.real_multimach_target_sys = None
+
+ def initialize(self, config, tinfoil):
+ meta_ide_support_d = parse_recipe(
+ config, tinfoil, 'meta-ide-support', appends=True, filter_workspace=False)
+ if not meta_ide_support_d:
+ raise DevtoolError("Parsing meta-ide-support recipe failed")
+
+ self.topdir = meta_ide_support_d.getVar('TOPDIR')
+ self.datadir = meta_ide_support_d.getVar('datadir')
+ self.deploy_dir_image = meta_ide_support_d.getVar(
+ 'DEPLOY_DIR_IMAGE')
+ self.build_sys = meta_ide_support_d.getVar('BUILD_SYS')
+ self.real_multimach_target_sys = meta_ide_support_d.getVar(
+ 'REAL_MULTIMACH_TARGET_SYS')
+
+
+class RecipeBuildSysroots:
+ """For the shared sysroots mode build-sysroots is needed"""
+
+ def __init__(self):
+ self.standalone_sysroot = None
+ self.standalone_sysroot_native = None
+ self.bootstrap_tasks = [
+ 'build-sysroots:do_build_target_sysroot',
+ 'build-sysroots:do_build_native_sysroot'
+ ]
+
+ def initialize(self, config, tinfoil):
+ build_sysroots_d = parse_recipe(
+ config, tinfoil, 'build-sysroots', appends=True, filter_workspace=False)
+ if not build_sysroots_d:
+ raise DevtoolError("Parsing build-sysroots recipe failed")
+ self.standalone_sysroot = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT')
+ self.standalone_sysroot_native = build_sysroots_d.getVar(
+ 'STANDALONE_SYSROOT_NATIVE')
+
+
+class SharedSysrootsEnv:
+ """Handle the shared sysroots based workflow
+
+ Support the workflow with just a tool-chain without a recipe.
+ It's basically like:
+ bitbake some-dependencies
+ bitbake meta-ide-support
+ bitbake build-sysroots
+ Use the environment-* file found in the deploy folder
+ """
+
+ def __init__(self):
+ self.ide_support = None
+ self.build_sysroots = None
+
+ def initialize(self, ide_support, build_sysroots):
+ self.ide_support = ide_support
+ self.build_sysroots = build_sysroots
+
+ def setup_ide(self, ide):
+ ide.setup(self)
+
+
+class RecipeNotModified:
+ """Handling of recipes added to the Direct DSK shared sysroots."""
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_populate_sysroot']
+
+
+class RecipeModified:
+ """Handling of recipes in the workspace created by devtool modify"""
+ OE_INIT_BUILD_ENV = 'oe-init-build-env'
+
+ VALID_BASH_ENV_NAME_CHARS = re.compile(r"^[a-zA-Z0-9_]*$")
+
+ def __init__(self, name):
+ self.name = name
+ self.bootstrap_tasks = [name + ':do_install']
+ self.gdb_cross = None
+ # workspace
+ self.real_srctree = None
+ self.srctree = None
+ self.ide_sdk_dir = None
+ self.ide_sdk_scripts_dir = None
+ self.bbappend = None
+ # recipe variables from d.getVar
+ self.b = None
+ self.base_libdir = None
+ self.bblayers = None
+ self.bpn = None
+ self.d = None
+ self.fakerootcmd = None
+ self.fakerootenv = None
+ self.libdir = None
+ self.max_process = None
+ self.package_arch = None
+ self.package_debug_split_style = None
+ self.path = None
+ self.pn = None
+ self.recipe_sysroot = None
+ self.recipe_sysroot_native = None
+ self.staging_incdir = None
+ self.strip_cmd = None
+ self.target_arch = None
+ self.target_dbgsrc_dir = None
+ self.topdir = None
+ self.workdir = None
+ self.recipe_id = None
+ # replicate bitbake build environment
+ self.exported_vars = None
+ self.cmd_compile = None
+ self.__oe_init_dir = None
+ # main build tool used by this recipe
+ self.build_tool = BuildTool.UNDEFINED
+ # build_tool = cmake
+ self.oecmake_generator = None
+ self.cmake_cache_vars = None
+ # build_tool = meson
+ self.meson_buildtype = None
+ self.meson_wrapper = None
+ self.mesonopts = None
+ self.extra_oemeson = None
+ self.meson_cross_file = None
+
+ def initialize(self, config, workspace, tinfoil):
+ recipe_d = parse_recipe(
+ config, tinfoil, self.name, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing %s recipe failed" % self.name)
+
+ # Verify this recipe is built as externalsrc setup by devtool modify
+ workspacepn = check_workspace_recipe(
+ workspace, self.name, bbclassextend=True)
+ self.srctree = workspace[workspacepn]['srctree']
+ # Need to grab this here in case the source is within a subdirectory
+ self.real_srctree = get_real_srctree(
+ self.srctree, recipe_d.getVar('S'), recipe_d.getVar('WORKDIR'))
+ self.bbappend = workspace[workspacepn]['bbappend']
+
+ self.ide_sdk_dir = os.path.join(
+ config.workspace_path, 'ide-sdk', self.name)
+ if os.path.exists(self.ide_sdk_dir):
+ shutil.rmtree(self.ide_sdk_dir)
+ self.ide_sdk_scripts_dir = os.path.join(self.ide_sdk_dir, 'scripts')
+
+ self.b = recipe_d.getVar('B')
+ self.base_libdir = recipe_d.getVar('base_libdir')
+ self.bblayers = recipe_d.getVar('BBLAYERS').split()
+ self.bpn = recipe_d.getVar('BPN')
+ self.cxx = recipe_d.getVar('CXX')
+ self.d = recipe_d.getVar('D')
+ self.fakerootcmd = recipe_d.getVar('FAKEROOTCMD')
+ self.fakerootenv = recipe_d.getVar('FAKEROOTENV')
+ self.libdir = recipe_d.getVar('libdir')
+ self.max_process = int(recipe_d.getVar(
+ "BB_NUMBER_THREADS") or os.cpu_count() or 1)
+ self.package_arch = recipe_d.getVar('PACKAGE_ARCH')
+ self.package_debug_split_style = recipe_d.getVar(
+ 'PACKAGE_DEBUG_SPLIT_STYLE')
+ self.path = recipe_d.getVar('PATH')
+ self.pn = recipe_d.getVar('PN')
+ self.recipe_sysroot = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT'))
+ self.recipe_sysroot_native = os.path.realpath(
+ recipe_d.getVar('RECIPE_SYSROOT_NATIVE'))
+ self.staging_bindir_toolchain = os.path.realpath(
+ recipe_d.getVar('STAGING_BINDIR_TOOLCHAIN'))
+ self.staging_incdir = os.path.realpath(
+ recipe_d.getVar('STAGING_INCDIR'))
+ self.strip_cmd = recipe_d.getVar('STRIP')
+ self.target_arch = recipe_d.getVar('TARGET_ARCH')
+ self.target_dbgsrc_dir = recipe_d.getVar('TARGET_DBGSRC_DIR')
+ self.topdir = recipe_d.getVar('TOPDIR')
+ self.workdir = os.path.realpath(recipe_d.getVar('WORKDIR'))
+
+ self.__init_exported_variables(recipe_d)
+
+ if bb.data.inherits_class('cmake', recipe_d):
+ self.oecmake_generator = recipe_d.getVar('OECMAKE_GENERATOR')
+ self.__init_cmake_preset_cache(recipe_d)
+ self.build_tool = BuildTool.CMAKE
+ elif bb.data.inherits_class('meson', recipe_d):
+ self.meson_buildtype = recipe_d.getVar('MESON_BUILDTYPE')
+ self.mesonopts = recipe_d.getVar('MESONOPTS')
+ self.extra_oemeson = recipe_d.getVar('EXTRA_OEMESON')
+ self.meson_cross_file = recipe_d.getVar('MESON_CROSS_FILE')
+ self.build_tool = BuildTool.MESON
+
+ # Recipe ID is the identifier for IDE config sections
+ self.recipe_id = self.bpn + "-" + self.package_arch
+ self.recipe_id_pretty = self.bpn + ": " + self.package_arch
+
+ def append_to_bbappend(self, append_text):
+ with open(self.bbappend, 'a') as bbap:
+ bbap.write(append_text)
+
+ def remove_from_bbappend(self, append_text):
+ with open(self.bbappend, 'r') as bbap:
+ text = bbap.read()
+ new_text = text.replace(append_text, '')
+ with open(self.bbappend, 'w') as bbap:
+ bbap.write(new_text)
+
+ @staticmethod
+ def is_valid_shell_variable(var):
+ """Skip strange shell variables like systemd
+
+ prevent from strange bugs because of strange variables which
+ are not used in this context but break various tools.
+ """
+ if RecipeModified.VALID_BASH_ENV_NAME_CHARS.match(var):
+ bb.debug(1, "ignoring variable: %s" % var)
+ return True
+ return False
+
+ def debug_build_config(self, args):
+ """Explicitely set for example CMAKE_BUILD_TYPE to Debug if not defined otherwise"""
+ if self.build_tool is BuildTool.CMAKE:
+ append_text = os.linesep + \
+ 'OECMAKE_ARGS:append = " -DCMAKE_BUILD_TYPE:STRING=Debug"' + os.linesep
+ if args.debug_build_config and not 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ self.cmake_cache_vars['CMAKE_BUILD_TYPE'] = {
+ "type": "STRING",
+ "value": "Debug",
+ }
+ self.append_to_bbappend(append_text)
+ elif 'CMAKE_BUILD_TYPE' in self.cmake_cache_vars:
+ del self.cmake_cache_vars['CMAKE_BUILD_TYPE']
+ self.remove_from_bbappend(append_text)
+ elif self.build_tool is BuildTool.MESON:
+ append_text = os.linesep + 'MESON_BUILDTYPE = "debug"' + os.linesep
+ if args.debug_build_config and self.meson_buildtype != "debug":
+ self.mesonopts.replace(
+ '--buildtype ' + self.meson_buildtype, '--buildtype debug')
+ self.append_to_bbappend(append_text)
+ elif self.meson_buildtype == "debug":
+ self.mesonopts.replace(
+ '--buildtype debug', '--buildtype plain')
+ self.remove_from_bbappend(append_text)
+ elif args.debug_build_config:
+ logger.warn(
+ "--debug-build-config is not implemented for this build tool yet.")
+
+ def solib_search_path(self, image):
+ """Search for debug symbols in the rootfs and rootfs-dbg
+
+ The debug symbols of shared libraries which are provided by other packages
+ are grabbed from the -dbg packages in the rootfs-dbg.
+
+ But most cross debugging tools like gdb, perf, and systemtap need to find
+ executable/library first and through it debuglink note find corresponding
+ symbols file. Therefore the library paths from the rootfs are added as well.
+
+ Note: For the devtool modified recipe compiled from the IDE, the debug
+ symbols are taken from the unstripped binaries in the image folder.
+ Also, devtool deploy-target takes the files from the image folder.
+ debug symbols in the image folder refer to the corresponding source files
+ with absolute paths of the build machine. Debug symbols found in the
+ rootfs-dbg are relocated and contain paths which refer to the source files
+ installed on the target device e.g. /usr/src/...
+ """
+ base_libdir = self.base_libdir.lstrip('/')
+ libdir = self.libdir.lstrip('/')
+ so_paths = [
+ # debug symbols for package_debug_split_style: debug-with-srcpkg or .debug
+ os.path.join(image.rootfs_dbg, base_libdir, ".debug"),
+ os.path.join(image.rootfs_dbg, libdir, ".debug"),
+ # debug symbols for package_debug_split_style: debug-file-directory
+ os.path.join(image.rootfs_dbg, "usr", "lib", "debug"),
+
+ # The binaries are required as well, the debug packages are not enough
+ # With image-combined-dbg.bbclass the binaries are copied into rootfs-dbg
+ os.path.join(image.rootfs_dbg, base_libdir),
+ os.path.join(image.rootfs_dbg, libdir),
+ # Without image-combined-dbg.bbclass the binaries are only in rootfs.
+ # Note: Stepping into source files located in rootfs-dbg does not
+ # work without image-combined-dbg.bbclass yet.
+ os.path.join(image.rootfs, base_libdir),
+ os.path.join(image.rootfs, libdir)
+ ]
+ return so_paths
+
+ def solib_search_path_str(self, image):
+ """Return a : separated list of paths usable by GDB's set solib-search-path"""
+ return ':'.join(self.solib_search_path(image))
+
+ def __init_exported_variables(self, d):
+ """Find all variables with export flag set.
+
+ This allows to generate IDE configurations which compile with the same
+ environment as bitbake does. That's at least a reasonable default behavior.
+ """
+ exported_vars = {}
+
+ vars = (key for key in d.keys() if not key.startswith(
+ "__") and not d.getVarFlag(key, "func", False))
+ for var in vars:
+ func = d.getVarFlag(var, "func", False)
+ if d.getVarFlag(var, 'python', False) and func:
+ continue
+ export = d.getVarFlag(var, "export", False)
+ unexport = d.getVarFlag(var, "unexport", False)
+ if not export and not unexport and not func:
+ continue
+ if unexport:
+ continue
+
+ val = d.getVar(var)
+ if val is None:
+ continue
+ if set(var) & set("-.{}+"):
+ logger.warn(
+ "Warning: Found invalid character in variable name %s", str(var))
+ continue
+ varExpanded = d.expand(var)
+ val = str(val)
+
+ if not RecipeModified.is_valid_shell_variable(varExpanded):
+ continue
+
+ if func:
+ code_line = "line: {0}, file: {1}\n".format(
+ d.getVarFlag(var, "lineno", False),
+ d.getVarFlag(var, "filename", False))
+ val = val.rstrip('\n')
+ logger.warn("Warning: exported shell function %s() is not exported (%s)" %
+ (varExpanded, code_line))
+ continue
+
+ if export:
+ exported_vars[varExpanded] = val.strip()
+ continue
+
+ self.exported_vars = exported_vars
+
+ def __init_cmake_preset_cache(self, d):
+ """Get the arguments passed to cmake
+
+ Replicate the cmake configure arguments with all details to
+ share on build folder between bitbake and SDK.
+ """
+ site_file = os.path.join(self.workdir, 'site-file.cmake')
+ if os.path.exists(site_file):
+ print("Warning: site-file.cmake is not supported")
+
+ cache_vars = {}
+ oecmake_args = d.getVar('OECMAKE_ARGS').split()
+ extra_oecmake = d.getVar('EXTRA_OECMAKE').split()
+ for param in oecmake_args + extra_oecmake:
+ d_pref = "-D"
+ if param.startswith(d_pref):
+ param = param[len(d_pref):]
+ else:
+ print("Error: expected a -D")
+ param_s = param.split('=', 1)
+ param_nt = param_s[0].split(':', 1)
+
+ def handle_undefined_variable(var):
+ if var.startswith('${') and var.endswith('}'):
+ return ''
+ else:
+ return var
+ # Example: FOO=ON
+ if len(param_nt) == 1:
+ cache_vars[param_s[0]] = handle_undefined_variable(param_s[1])
+ # Example: FOO:PATH=/tmp
+ elif len(param_nt) == 2:
+ cache_vars[param_nt[0]] = {
+ "type": param_nt[1],
+ "value": handle_undefined_variable(param_s[1]),
+ }
+ else:
+ print("Error: cannot parse %s" % param)
+ self.cmake_cache_vars = cache_vars
+
+ def cmake_preset(self):
+ """Create a preset for cmake that mimics how bitbake calls cmake"""
+ toolchain_file = os.path.join(self.workdir, 'toolchain.cmake')
+ cmake_executable = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'cmake')
+ self.cmd_compile = cmake_executable + " --build --preset " + self.recipe_id
+
+ preset_dict_configure = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "binaryDir": self.b,
+ "generator": self.oecmake_generator,
+ "toolchainFile": toolchain_file,
+ "cacheVariables": self.cmake_cache_vars,
+ "environment": self.exported_vars,
+ "cmakeExecutable": cmake_executable
+ }
+
+ preset_dict_build = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict_test = {
+ "name": self.recipe_id,
+ "displayName": self.recipe_id_pretty,
+ "description": "Bitbake build environment for the recipe %s compiled for %s" % (self.bpn, self.package_arch),
+ "configurePreset": self.recipe_id,
+ "inheritConfigureEnvironment": True
+ }
+
+ preset_dict = {
+ "version": 3, # cmake 3.21, backward compatible with kirkstone
+ "configurePresets": [preset_dict_configure],
+ "buildPresets": [preset_dict_build],
+ "testPresets": [preset_dict_test]
+ }
+
+ # Finally write the json file
+ json_file = 'CMakeUserPresets.json'
+ json_path = os.path.join(self.real_srctree, json_file)
+ logger.info("Updating CMake preset: %s (%s)" % (json_file, json_path))
+ if not os.path.exists(self.real_srctree):
+ os.makedirs(self.real_srctree)
+ try:
+ with open(json_path) as f:
+ orig_dict = json.load(f)
+ except json.decoder.JSONDecodeError:
+ logger.info(
+ "Decoding %s failed. Probably because of comments in the json file" % json_path)
+ orig_dict = {}
+ except FileNotFoundError:
+ orig_dict = {}
+
+ # Add or update the presets for the recipe and keep other presets
+ for k, v in preset_dict.items():
+ if isinstance(v, list):
+ update_preset = v[0]
+ preset_added = False
+ if k in orig_dict:
+ for index, orig_preset in enumerate(orig_dict[k]):
+ if 'name' in orig_preset:
+ if orig_preset['name'] == update_preset['name']:
+ logger.debug("Updating preset: %s" %
+ orig_preset['name'])
+ orig_dict[k][index] = update_preset
+ preset_added = True
+ break
+ else:
+ logger.debug("keeping preset: %s" %
+ orig_preset['name'])
+ else:
+ logger.warn("preset without a name found")
+ if not preset_added:
+ if not k in orig_dict:
+ orig_dict[k] = []
+ orig_dict[k].append(update_preset)
+ logger.debug("Added preset: %s" %
+ update_preset['name'])
+ else:
+ orig_dict[k] = v
+
+ with open(json_path, 'w') as f:
+ json.dump(orig_dict, f, indent=4)
+
+ def gen_meson_wrapper(self):
+ """Generate a wrapper script to call meson with the cross environment"""
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ meson_wrapper = os.path.join(self.ide_sdk_scripts_dir, 'meson')
+ meson_real = os.path.join(
+ self.recipe_sysroot_native, 'usr', 'bin', 'meson.real')
+ with open(meson_wrapper, 'w') as mwrap:
+ mwrap.write("#!/bin/sh" + os.linesep)
+ for var, val in self.exported_vars.items():
+ mwrap.write('export %s="%s"' % (var, val) + os.linesep)
+ mwrap.write("unset CC CXX CPP LD AR NM STRIP" + os.linesep)
+ private_temp = os.path.join(self.b, "meson-private", "tmp")
+ mwrap.write('mkdir -p "%s"' % private_temp + os.linesep)
+ mwrap.write('export TMPDIR="%s"' % private_temp + os.linesep)
+ mwrap.write('exec "%s" "$@"' % meson_real + os.linesep)
+ st = os.stat(meson_wrapper)
+ os.chmod(meson_wrapper, st.st_mode | stat.S_IEXEC)
+ self.meson_wrapper = meson_wrapper
+ self.cmd_compile = meson_wrapper + " compile -C " + self.b
+
+ def which(self, executable):
+ bin_path = shutil.which(executable, path=self.path)
+ if not bin_path:
+ raise DevtoolError(
+ 'Cannot find %s. Probably the recipe %s is not built yet.' % (executable, self.bpn))
+ return bin_path
+
+ @staticmethod
+ def is_elf_file(file_path):
+ with open(file_path, "rb") as f:
+ data = f.read(4)
+ if data == b'\x7fELF':
+ return True
+ return False
+
+ def find_installed_binaries(self):
+ """find all executable elf files in the image directory"""
+ binaries = []
+ d_len = len(self.d)
+ re_so = re.compile(r'.*\.so[.0-9]*$')
+ for root, _, files in os.walk(self.d, followlinks=False):
+ for file in files:
+ if os.path.islink(file):
+ continue
+ if re_so.match(file):
+ continue
+ abs_name = os.path.join(root, file)
+ if os.access(abs_name, os.X_OK) and RecipeModified.is_elf_file(abs_name):
+ binaries.append(abs_name[d_len:])
+ return sorted(binaries)
+
+ def gen_delete_package_dirs(self):
+ """delete folders of package tasks
+
+ This is a workaround for and issue with recipes having their sources
+ downloaded as file://
+ This likely breaks pseudo like:
+ path mismatch [3 links]: ino 79147802 db
+ .../build/tmp/.../cmake-example/1.0/package/usr/src/debug/
+ cmake-example/1.0-r0/oe-local-files/cpp-example-lib.cpp
+ .../build/workspace/sources/cmake-example/oe-local-files/cpp-example-lib.cpp
+ Since the files are anyway outdated lets deleted them (also from pseudo's db) to workaround this issue.
+ """
+ cmd_lines = ['#!/bin/sh']
+
+ # Set up the appropriate environment
+ newenv = dict(os.environ)
+ for varvalue in self.fakerootenv.split():
+ if '=' in varvalue:
+ splitval = varvalue.split('=', 1)
+ newenv[splitval[0]] = splitval[1]
+
+ # Replicate the environment variables from bitbake
+ for var, val in newenv.items():
+ if not RecipeModified.is_valid_shell_variable(var):
+ continue
+ cmd_lines.append('%s="%s"' % (var, val))
+ cmd_lines.append('export %s' % var)
+
+ # Delete the folders
+ pkg_dirs = ' '.join([os.path.join(self.workdir, d) for d in [
+ "package", "packages-split", "pkgdata", "sstate-install-package", "debugsources.list", "*.spec"]])
+ cmd = "%s rm -rf %s" % (self.fakerootcmd, pkg_dirs)
+ cmd_lines.append('%s || { "%s failed"; exit 1; }' % (cmd, cmd))
+
+ return self.write_script(cmd_lines, 'delete_package_dirs')
+
+ def gen_deploy_target_script(self, args):
+ """Generate a script which does what devtool deploy-target does
+
+ This script is much quicker than devtool target-deploy. Because it
+ does not need to start a bitbake server. All information from tinfoil
+ is hard-coded in the generated script.
+ """
+ cmd_lines = ['#!%s' % str(sys.executable)]
+ cmd_lines.append('import sys')
+ cmd_lines.append('devtool_sys_path = %s' % str(sys.path))
+ cmd_lines.append('devtool_sys_path.reverse()')
+ cmd_lines.append('for p in devtool_sys_path:')
+ cmd_lines.append(' if p not in sys.path:')
+ cmd_lines.append(' sys.path.insert(0, p)')
+ cmd_lines.append('from devtool.deploy import deploy_no_d')
+ args_filter = ['debug', 'dry_run', 'key', 'no_check_space', 'no_host_check',
+ 'no_preserve', 'port', 'show_status', 'ssh_exec', 'strip', 'target']
+ filtered_args_dict = {key: value for key, value in vars(
+ args).items() if key in args_filter}
+ cmd_lines.append('filtered_args_dict = %s' % str(filtered_args_dict))
+ cmd_lines.append('class Dict2Class(object):')
+ cmd_lines.append(' def __init__(self, my_dict):')
+ cmd_lines.append(' for key in my_dict:')
+ cmd_lines.append(' setattr(self, key, my_dict[key])')
+ cmd_lines.append('filtered_args = Dict2Class(filtered_args_dict)')
+ cmd_lines.append(
+ 'setattr(filtered_args, "recipename", "%s")' % self.bpn)
+ cmd_lines.append('deploy_no_d("%s", "%s", "%s", "%s", "%s", "%s", %d, "%s", "%s", filtered_args)' %
+ (self.d, self.workdir, self.path, self.strip_cmd,
+ self.libdir, self.base_libdir, self.max_process,
+ self.fakerootcmd, self.fakerootenv))
+ return self.write_script(cmd_lines, 'deploy_target')
+
+ def gen_install_deploy_script(self, args):
+ """Generate a script which does install and deploy"""
+ cmd_lines = ['#!/bin/bash']
+
+ cmd_lines.append(self.gen_delete_package_dirs())
+
+ # . oe-init-build-env $BUILDDIR
+ # Note: Sourcing scripts with arguments requires bash
+ cmd_lines.append('cd "%s" || { echo "cd %s failed"; exit 1; }' % (
+ self.oe_init_dir, self.oe_init_dir))
+ cmd_lines.append('. "%s" "%s" || { echo ". %s %s failed"; exit 1; }' % (
+ self.oe_init_build_env, self.topdir, self.oe_init_build_env, self.topdir))
+
+ # bitbake -c install
+ cmd_lines.append(
+ 'bitbake %s -c install --force || { echo "bitbake %s -c install --force failed"; exit 1; }' % (self.bpn, self.bpn))
+
+ # Self contained devtool deploy-target
+ cmd_lines.append(self.gen_deploy_target_script(args))
+
+ return self.write_script(cmd_lines, 'install_and_deploy')
+
+ def write_script(self, cmd_lines, script_name):
+ bb.utils.mkdirhier(self.ide_sdk_scripts_dir)
+ script_name_arch = script_name + '_' + self.recipe_id
+ script_file = os.path.join(self.ide_sdk_scripts_dir, script_name_arch)
+ with open(script_file, 'w') as script_f:
+ script_f.write(os.linesep.join(cmd_lines))
+ st = os.stat(script_file)
+ os.chmod(script_file, st.st_mode | stat.S_IEXEC)
+ return script_file
+
+ @property
+ def oe_init_build_env(self):
+ """Find the oe-init-build-env used for this setup"""
+ oe_init_dir = self.oe_init_dir
+ if oe_init_dir:
+ return os.path.join(oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ return None
+
+ @property
+ def oe_init_dir(self):
+ """Find the directory where the oe-init-build-env is located
+
+ Assumption: There might be a layer with higher priority than poky
+ which provides to oe-init-build-env in the layer's toplevel folder.
+ """
+ if not self.__oe_init_dir:
+ for layer in reversed(self.bblayers):
+ result = subprocess.run(
+ ['git', 'rev-parse', '--show-toplevel'], cwd=layer, capture_output=True)
+ if result.returncode == 0:
+ oe_init_dir = result.stdout.decode('utf-8').strip()
+ oe_init_path = os.path.join(
+ oe_init_dir, RecipeModified.OE_INIT_BUILD_ENV)
+ if os.path.exists(oe_init_path):
+ logger.debug("Using %s from: %s" % (
+ RecipeModified.OE_INIT_BUILD_ENV, oe_init_path))
+ self.__oe_init_dir = oe_init_dir
+ break
+ if not self.__oe_init_dir:
+ logger.error("Cannot find the bitbake top level folder")
+ return self.__oe_init_dir
+
+
+def ide_setup(args, config, basepath, workspace):
+ """Generate the IDE configuration for the workspace"""
+
+ # Explicitely passing some special recipes does not make sense
+ for recipe in args.recipenames:
+ if recipe in ['meta-ide-support', 'build-sysroots']:
+ raise DevtoolError("Invalid recipe: %s." % recipe)
+
+ # Collect information about tasks which need to be bitbaked
+ bootstrap_tasks = []
+ bootstrap_tasks_late = []
+ tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+ try:
+ # define mode depending on recipes which need to be processed
+ recipes_image_names = []
+ recipes_modified_names = []
+ recipes_other_names = []
+ for recipe in args.recipenames:
+ try:
+ check_workspace_recipe(
+ workspace, recipe, bbclassextend=True)
+ recipes_modified_names.append(recipe)
+ except DevtoolError:
+ recipe_d = parse_recipe(
+ config, tinfoil, recipe, appends=True, filter_workspace=False)
+ if not recipe_d:
+ raise DevtoolError("Parsing recipe %s failed" % recipe)
+ if bb.data.inherits_class('image', recipe_d):
+ recipes_image_names.append(recipe)
+ else:
+ recipes_other_names.append(recipe)
+
+ invalid_params = False
+ if args.mode == DevtoolIdeMode.shared:
+ if len(recipes_modified_names):
+ logger.error("In shared sysroots mode modified recipes %s cannot be handled." % str(
+ recipes_modified_names))
+ invalid_params = True
+ if args.mode == DevtoolIdeMode.modified:
+ if len(recipes_other_names):
+ logger.error("Only in shared sysroots mode not modified recipes %s can be handled." % str(
+ recipes_other_names))
+ invalid_params = True
+ if len(recipes_image_names) != 1:
+ logger.error(
+ "One image recipe is required as the rootfs for the remote development.")
+ invalid_params = True
+ for modified_recipe_name in recipes_modified_names:
+ if modified_recipe_name.startswith('nativesdk-') or modified_recipe_name.endswith('-native'):
+ logger.error(
+ "Only cross compiled recipes are support. %s is not cross." % modified_recipe_name)
+ invalid_params = True
+
+ if invalid_params:
+ raise DevtoolError("Invalid parameters are passed.")
+
+ # For the shared sysroots mode, add all dependencies of all the images to the sysroots
+ # For the modified mode provide one rootfs and the corresponding debug symbols via rootfs-dbg
+ recipes_images = []
+ for recipes_image_name in recipes_image_names:
+ logger.info("Using image: %s" % recipes_image_name)
+ recipe_image = RecipeImage(recipes_image_name)
+ recipe_image.initialize(config, tinfoil)
+ bootstrap_tasks += recipe_image.bootstrap_tasks
+ recipes_images.append(recipe_image)
+
+ # Provide a Direct SDK with shared sysroots
+ recipes_not_modified = []
+ if args.mode == DevtoolIdeMode.shared:
+ ide_support = RecipeMetaIdeSupport()
+ ide_support.initialize(config, tinfoil)
+ bootstrap_tasks += ide_support.bootstrap_tasks
+
+ logger.info("Adding %s to the Direct SDK sysroots." %
+ str(recipes_other_names))
+ for recipe_name in recipes_other_names:
+ recipe_not_modified = RecipeNotModified(recipe_name)
+ bootstrap_tasks += recipe_not_modified.bootstrap_tasks
+ recipes_not_modified.append(recipe_not_modified)
+
+ build_sysroots = RecipeBuildSysroots()
+ build_sysroots.initialize(config, tinfoil)
+ bootstrap_tasks_late += build_sysroots.bootstrap_tasks
+ shared_env = SharedSysrootsEnv()
+ shared_env.initialize(ide_support, build_sysroots)
+
+ recipes_modified = []
+ if args.mode == DevtoolIdeMode.modified:
+ logger.info("Setting up workspaces for modified recipe: %s" %
+ str(recipes_modified_names))
+ gdbs_cross = {}
+ for recipe_name in recipes_modified_names:
+ recipe_modified = RecipeModified(recipe_name)
+ recipe_modified.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += recipe_modified.bootstrap_tasks
+ recipes_modified.append(recipe_modified)
+
+ if recipe_modified.target_arch not in gdbs_cross:
+ target_device = TargetDevice(args)
+ gdb_cross = RecipeGdbCross(
+ args, recipe_modified.target_arch, target_device)
+ gdb_cross.initialize(config, workspace, tinfoil)
+ bootstrap_tasks += gdb_cross.bootstrap_tasks
+ gdbs_cross[recipe_modified.target_arch] = gdb_cross
+ recipe_modified.gdb_cross = gdbs_cross[recipe_modified.target_arch]
+
+ finally:
+ tinfoil.shutdown()
+
+ if not args.skip_bitbake:
+ bb_cmd = 'bitbake '
+ if args.bitbake_k:
+ bb_cmd += "-k "
+ bb_cmd_early = bb_cmd + ' '.join(bootstrap_tasks)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_early, watch=True)
+ if bootstrap_tasks_late:
+ bb_cmd_late = bb_cmd + ' '.join(bootstrap_tasks_late)
+ exec_build_env_command(
+ config.init_path, basepath, bb_cmd_late, watch=True)
+
+ for recipe_image in recipes_images:
+ if (recipe_image.gdbserver_missing):
+ logger.warning(
+ "gdbserver not installed in image %s. Remote debugging will not be available" % recipe_image)
+
+ if recipe_image.combine_dbg_image is False:
+ logger.warning(
+ 'IMAGE_CLASSES += "image-combined-dbg" is missing for image %s. Remote debugging will not find debug symbols from rootfs-dbg.' % recipe_image)
+
+ # Instantiate the active IDE plugin
+ ide = ide_plugins[args.ide]()
+ if args.mode == DevtoolIdeMode.shared:
+ ide.setup_shared_sysroots(shared_env)
+ elif args.mode == DevtoolIdeMode.modified:
+ for recipe_modified in recipes_modified:
+ if recipe_modified.build_tool is BuildTool.CMAKE:
+ recipe_modified.cmake_preset()
+ if recipe_modified.build_tool is BuildTool.MESON:
+ recipe_modified.gen_meson_wrapper()
+ ide.setup_modified_recipe(
+ args, recipe_image, recipe_modified)
+ else:
+ raise DevtoolError("Must not end up here.")
+
+
+def register_commands(subparsers, context):
+ """Register devtool subcommands from this plugin"""
+
+ global ide_plugins
+
+ # Search for IDE plugins in all sub-folders named ide_plugins where devtool seraches for plugins.
+ pluginpaths = [os.path.join(path, 'ide_plugins')
+ for path in context.pluginpaths]
+ ide_plugin_modules = []
+ for pluginpath in pluginpaths:
+ scriptutils.load_plugins(logger, ide_plugin_modules, pluginpath)
+
+ for ide_plugin_module in ide_plugin_modules:
+ if hasattr(ide_plugin_module, 'register_ide_plugin'):
+ ide_plugin_module.register_ide_plugin(ide_plugins)
+ # Sort plugins according to their priority. The first entry is the default IDE plugin.
+ ide_plugins = dict(sorted(ide_plugins.items(),
+ key=lambda p: p[1].ide_plugin_priority(), reverse=True))
+
+ parser_ide_sdk = subparsers.add_parser('ide-sdk', group='working', order=50, formatter_class=RawTextHelpFormatter,
+ help='Setup the SDK and configure the IDE')
+ parser_ide_sdk.add_argument(
+ 'recipenames', nargs='+', help='Generate an IDE configuration suitable to work on the given recipes.\n'
+ 'Depending on the --mode paramter different types of SDKs and IDE configurations are generated.')
+ parser_ide_sdk.add_argument(
+ '-m', '--mode', type=DevtoolIdeMode, default=DevtoolIdeMode.modified,
+ help='Different SDK types are supported:\n'
+ '- "' + DevtoolIdeMode.modified.name + '" (default):\n'
+ ' devtool modify creates a workspace to work on the source code of a recipe.\n'
+ ' devtool ide-sdk builds the SDK and generates the IDE configuration(s) in the workspace directorie(s)\n'
+ ' Usage example:\n'
+ ' devtool modify cmake-example\n'
+ ' devtool ide-sdk cmake-example core-image-minimal\n'
+ ' Start the IDE in the workspace folder\n'
+ ' At least one devtool modified recipe plus one image recipe are required:\n'
+ ' The image recipe is used to generate the target image and the remote debug configuration.\n'
+ '- "' + DevtoolIdeMode.shared.name + '":\n'
+ ' Usage example:\n'
+ ' devtool ide-sdk -m ' + DevtoolIdeMode.shared.name + ' recipe(s)\n'
+ ' This command generates a cross-toolchain as well as the corresponding shared sysroot directories.\n'
+ ' To use this tool-chain the environment-* file found in the deploy..image folder needs to be sourced into a shell.\n'
+ ' In case of VSCode and cmake the tool-chain is also exposed as a cmake-kit')
+ default_ide = list(ide_plugins.keys())[0]
+ parser_ide_sdk.add_argument(
+ '-i', '--ide', choices=ide_plugins.keys(), default=default_ide,
+ help='Setup the configuration for this IDE (default: %s)' % default_ide)
+ parser_ide_sdk.add_argument(
+ '-t', '--target', default='root@192.168.7.2',
+ help='Live target machine running an ssh server: user@hostname.')
+ parser_ide_sdk.add_argument(
+ '-G', '--gdbserver-port-start', default="1234", help='port where gdbserver is listening.')
+ parser_ide_sdk.add_argument(
+ '-c', '--no-host-check', help='Disable ssh host key checking', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-e', '--ssh-exec', help='Executable to use in place of ssh')
+ parser_ide_sdk.add_argument(
+ '-P', '--port', help='Specify ssh port to use for connection to the target')
+ parser_ide_sdk.add_argument(
+ '-I', '--key', help='Specify ssh private key for connection to the target')
+ parser_ide_sdk.add_argument(
+ '--skip-bitbake', help='Generate IDE configuration but skip calling bibtake to update the SDK.', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-k', '--bitbake-k', help='Pass -k parameter to bitbake', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-strip', help='Do not strip executables prior to deploy', dest='strip', action='store_false')
+ parser_ide_sdk.add_argument(
+ '-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-s', '--show-status', help='Show progress/status output', action='store_true')
+ parser_ide_sdk.add_argument(
+ '-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--no-check-space', help='Do not check for available space before deploying', action='store_true')
+ parser_ide_sdk.add_argument(
+ '--debug-build-config', help='Use debug build flags, for example set CMAKE_BUILD_TYPE=Debug', action='store_true')
+ parser_ide_sdk.set_defaults(func=ide_setup)
diff --git a/scripts/lib/devtool/import.py b/scripts/lib/devtool/import.py
index c13a180d14..6829851669 100644
--- a/scripts/lib/devtool/import.py
+++ b/scripts/lib/devtool/import.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool import plugin"""
import os
@@ -81,7 +71,7 @@ def devimport(args, config, basepath, workspace):
break
else:
non_importables.append(fn)
- logger.warn('No recipe to append %s.bbapppend, skipping' % fn)
+ logger.warning('No recipe to append %s.bbapppend, skipping' % fn)
# Extract
imported = []
@@ -104,9 +94,9 @@ def devimport(args, config, basepath, workspace):
try:
tar.extract(member, path=config.workspace_path)
except PermissionError as pe:
- logger.warn(pe)
+ logger.warning(pe)
else:
- logger.warn('File already present. Use --overwrite/-o to overwrite it: %s' % member.name)
+ logger.warning('File already present. Use --overwrite/-o to overwrite it: %s' % member.name)
continue
else:
tar.extract(member, path=config.workspace_path)
@@ -129,7 +119,7 @@ def devimport(args, config, basepath, workspace):
if imported:
logger.info('Imported recipes into workspace %s: %s' % (config.workspace_path, ', '.join(imported)))
else:
- logger.warn('No recipes imported into the workspace')
+ logger.warning('No recipes imported into the workspace')
return 0
diff --git a/scripts/lib/devtool/menuconfig.py b/scripts/lib/devtool/menuconfig.py
new file mode 100644
index 0000000000..18daef30c3
--- /dev/null
+++ b/scripts/lib/devtool/menuconfig.py
@@ -0,0 +1,81 @@
+# OpenEmbedded Development tool - menuconfig command plugin
+#
+# Copyright (C) 2018 Xilinx
+# Written by: Chandana Kalluri <ckalluri@xilinx.com>
+#
+# SPDX-License-Identifier: MIT
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+"""Devtool menuconfig plugin"""
+
+import os
+import bb
+import logging
+import argparse
+import re
+import glob
+from devtool import setup_tinfoil, parse_recipe, DevtoolError, standard, exec_build_env_command
+from devtool import check_workspace_recipe
+logger = logging.getLogger('devtool')
+
+def menuconfig(args, config, basepath, workspace):
+ """Entry point for the devtool 'menuconfig' subcommand"""
+
+ rd = ""
+ kconfigpath = ""
+ pn_src = ""
+ localfilesdir = ""
+ workspace_dir = ""
+ tinfoil = setup_tinfoil(basepath=basepath)
+ try:
+ rd = parse_recipe(config, tinfoil, args.component, appends=True, filter_workspace=False)
+ if not rd:
+ return 1
+
+ check_workspace_recipe(workspace, args.component)
+ pn = rd.getVar('PN')
+
+ if not rd.getVarFlag('do_menuconfig','task'):
+ raise DevtoolError("This recipe does not support menuconfig option")
+
+ workspace_dir = os.path.join(config.workspace_path,'sources')
+ kconfigpath = rd.getVar('B')
+ pn_src = os.path.join(workspace_dir,pn)
+
+ # add check to see if oe_local_files exists or not
+ localfilesdir = os.path.join(pn_src,'oe-local-files')
+ if not os.path.exists(localfilesdir):
+ bb.utils.mkdirhier(localfilesdir)
+ # Add gitignore to ensure source tree is clean
+ gitignorefile = os.path.join(localfilesdir,'.gitignore')
+ with open(gitignorefile, 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n')
+ f.write('*\n')
+
+ finally:
+ tinfoil.shutdown()
+
+ logger.info('Launching menuconfig')
+ exec_build_env_command(config.init_path, basepath, 'bitbake -c menuconfig %s' % pn, watch=True)
+ fragment = os.path.join(localfilesdir, 'devtool-fragment.cfg')
+ res = standard._create_kconfig_diff(pn_src,rd,fragment)
+
+ return 0
+
+def register_commands(subparsers, context):
+ """register devtool subcommands from this plugin"""
+ parser_menuconfig = subparsers.add_parser('menuconfig',help='Alter build-time configuration for a recipe', description='Launches the make menuconfig command (for recipes where do_menuconfig is available), allowing users to make changes to the build-time configuration. Creates a config fragment corresponding to changes made.', group='advanced')
+ parser_menuconfig.add_argument('component', help='compenent to alter config')
+ parser_menuconfig.set_defaults(func=menuconfig,fixed_setup=context.fixed_setup)
diff --git a/scripts/lib/devtool/package.py b/scripts/lib/devtool/package.py
index af9e8f15f5..c2367342c3 100644
--- a/scripts/lib/devtool/package.py
+++ b/scripts/lib/devtool/package.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool plugin containing the package subcommands"""
import os
diff --git a/scripts/lib/devtool/runqemu.py b/scripts/lib/devtool/runqemu.py
index e26cf28c2f..ead978aabc 100644
--- a/scripts/lib/devtool/runqemu.py
+++ b/scripts/lib/devtool/runqemu.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool runqemu plugin"""
diff --git a/scripts/lib/devtool/sdk.py b/scripts/lib/devtool/sdk.py
index 4616753797..9aefd7e354 100644
--- a/scripts/lib/devtool/sdk.py
+++ b/scripts/lib/devtool/sdk.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import subprocess
@@ -217,7 +207,7 @@ def sdk_update(args, config, basepath, workspace):
if not sstate_mirrors:
with open(os.path.join(conf_dir, 'site.conf'), 'a') as f:
f.write('SCONF_VERSION = "%s"\n' % site_conf_version)
- f.write('SSTATE_MIRRORS_append = " file://.* %s/sstate-cache/PATH \\n "\n' % updateserver)
+ f.write('SSTATE_MIRRORS:append = " file://.* %s/sstate-cache/PATH"\n' % updateserver)
finally:
shutil.rmtree(tmpsdk_dir)
@@ -310,7 +300,8 @@ def sdk_install(args, config, basepath, workspace):
return 2
try:
- exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_native_sysroot', watch=True)
+ exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots -c build_target_sysroot', watch=True)
except bb.process.ExecutionError as e:
raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
diff --git a/scripts/lib/devtool/search.py b/scripts/lib/devtool/search.py
index b4f209b7e3..70b81cac5e 100644
--- a/scripts/lib/devtool/search.py
+++ b/scripts/lib/devtool/search.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool search plugin"""
@@ -72,10 +62,11 @@ def search(args, config, basepath, workspace):
with open(os.path.join(pkgdata_dir, 'runtime', pkg), 'r') as f:
for line in f:
if ': ' in line:
- splitline = line.split(':', 1)
+ splitline = line.split(': ', 1)
key = splitline[0]
value = splitline[1].strip()
- if key in ['PKG_%s' % pkg, 'DESCRIPTION', 'FILES_INFO'] or key.startswith('FILERPROVIDES_'):
+ key = key.replace(":" + pkg, "")
+ if key in ['PKG', 'DESCRIPTION', 'FILES_INFO', 'FILERPROVIDES']:
if keyword_rc.search(value):
match = True
break
diff --git a/scripts/lib/devtool/standard.py b/scripts/lib/devtool/standard.py
index a1e8e1d322..bd009f44b1 100644
--- a/scripts/lib/devtool/standard.py
+++ b/scripts/lib/devtool/standard.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool standard plugins"""
import os
@@ -66,7 +56,7 @@ def add(args, config, basepath, workspace):
args.srctree = args.recipename
args.recipename = None
elif os.path.isdir(args.recipename):
- logger.warn('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
+ logger.warning('Ambiguous argument "%s" - assuming you mean it to be the recipe name' % args.recipename)
if not args.fetchuri:
if args.srcrev:
@@ -82,7 +72,7 @@ def add(args, config, basepath, workspace):
if args.fetchuri:
raise DevtoolError('URI specified as positional argument as well as -f/--fetch')
else:
- logger.warn('-f/--fetch option is deprecated - you can now simply specify the URL to fetch as a positional argument instead')
+ logger.warning('-f/--fetch option is deprecated - you can now simply specify the URL to fetch as a positional argument instead')
args.fetchuri = args.fetch
if args.recipename:
@@ -155,8 +145,10 @@ def add(args, config, basepath, workspace):
extracmdopts += ' --src-subdir "%s"' % args.src_subdir
if args.autorev:
extracmdopts += ' -a'
- if args.fetch_dev:
- extracmdopts += ' --fetch-dev'
+ if args.npm_dev:
+ extracmdopts += ' --npm-dev'
+ if args.no_pypi:
+ extracmdopts += ' --no-pypi'
if args.mirrors:
extracmdopts += ' --mirrors'
if args.srcrev:
@@ -217,7 +209,7 @@ def add(args, config, basepath, workspace):
raise DevtoolError('Command \'%s\' did not create any recipe file:\n%s' % (e.command, e.stdout))
attic_recipe = os.path.join(config.workspace_path, 'attic', recipename, os.path.basename(recipefile))
if os.path.exists(attic_recipe):
- logger.warn('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
+ logger.warning('A modified recipe from a previous invocation exists in %s - you may wish to move this over the top of the new recipe if you had changes in it that you want to continue with' % attic_recipe)
finally:
if tmpsrcdir and os.path.exists(tmpsrcdir):
shutil.rmtree(tmpsrcdir)
@@ -244,10 +236,14 @@ def add(args, config, basepath, workspace):
if args.fetchuri and not args.no_git:
setup_git_repo(srctree, args.version, 'devtool', d=tinfoil.config_data)
- initial_rev = None
+ initial_rev = {}
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_rev["."] = stdout.rstrip()
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse HEAD` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule) = line.split()
+ initial_rev[os.path.relpath(submodule, srctree)] = rev
if args.src_subdir:
srctree = os.path.join(srctree, args.src_subdir)
@@ -261,23 +257,20 @@ def add(args, config, basepath, workspace):
if b_is_s:
f.write('EXTERNALSRC_BUILD = "%s"\n' % srctree)
if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
+ for key, value in initial_rev.items():
+ f.write('\n# initial_rev %s: %s\n' % (key, value))
if args.binary:
- f.write('do_install_append() {\n')
+ f.write('do_install:append() {\n')
f.write(' rm -rf ${D}/.git\n')
f.write(' rm -f ${D}/singletask.lock\n')
f.write('}\n')
if bb.data.inherits_class('npm', rd):
- f.write('do_install_append() {\n')
- f.write(' # Remove files added to source dir by devtool/externalsrc\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/singletask.lock\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/.git\n')
- f.write(' rm -rf ${NPM_INSTALLDIR}/oe-local-files\n')
- f.write(' for symlink in ${EXTERNALSRC_SYMLINKS} ; do\n')
- f.write(' rm -f ${NPM_INSTALLDIR}/${symlink%%:*}\n')
- f.write(' done\n')
+ f.write('python do_configure:append() {\n')
+ f.write(' pkgdir = d.getVar("NPM_PACKAGE")\n')
+ f.write(' lockfile = os.path.join(pkgdir, "singletask.lock")\n')
+ f.write(' bb.utils.remove(lockfile)\n')
f.write('}\n')
# Check if the new layer provides recipes whose priorities have been
@@ -295,7 +288,7 @@ def add(args, config, basepath, workspace):
with open(layerconf_file, 'a') as f:
f.write('%s = "%s"\n' % (preferred_provider, recipe_name))
else:
- logger.warn('Set \'%s\' in order to use the recipe' % preferred_provider)
+ logger.warning('Set \'%s\' in order to use the recipe' % preferred_provider)
break
_add_md5(config, recipename, appendfile)
@@ -332,10 +325,6 @@ def _check_compatible_recipe(pn, d):
raise DevtoolError("The %s recipe is a packagegroup, and therefore is "
"not supported by this tool" % pn, 4)
- if bb.data.inherits_class('meta', d):
- raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
- "not supported by this tool" % pn, 4)
-
if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
# Not an incompatibility error per se, so we don't pass the error code
raise DevtoolError("externalsrc is currently enabled for the %s "
@@ -371,7 +360,7 @@ def _move_file(src, dst, dry_run_outdir=None, base_outdir=None):
bb.utils.mkdirhier(dst_d)
shutil.move(src, dst)
-def _copy_file(src, dst, dry_run_outdir=None):
+def _copy_file(src, dst, dry_run_outdir=None, base_outdir=None):
"""Copy a file. Creates all the directory components of destination path."""
dry_run_suffix = ' (dry-run)' if dry_run_outdir else ''
logger.debug('Copying %s to %s%s' % (src, dst, dry_run_suffix))
@@ -471,11 +460,37 @@ def sync(args, config, basepath, workspace):
finally:
tinfoil.shutdown()
+def symlink_oelocal_files_srctree(rd, srctree):
+ import oe.patch
+ if os.path.abspath(rd.getVar('S')) == os.path.abspath(rd.getVar('WORKDIR')):
+ # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+ # (otherwise the recipe won't build as expected)
+ local_files_dir = os.path.join(srctree, 'oe-local-files')
+ addfiles = []
+ for root, _, files in os.walk(local_files_dir):
+ relpth = os.path.relpath(root, local_files_dir)
+ if relpth != '.':
+ bb.utils.mkdirhier(os.path.join(srctree, relpth))
+ for fn in files:
+ if fn == '.gitignore':
+ continue
+ destpth = os.path.join(srctree, relpth, fn)
+ if os.path.exists(destpth):
+ os.unlink(destpth)
+ if relpth != '.':
+ back_relpth = os.path.relpath(local_files_dir, root)
+ os.symlink('%s/oe-local-files/%s/%s' % (back_relpth, relpth, fn), destpth)
+ else:
+ os.symlink('oe-local-files/%s' % fn, destpth)
+ addfiles.append(os.path.join(relpth, fn))
+ if addfiles:
+ oe.patch.GitApplyTree.commitIgnored("Add local file symlinks", dir=srctree, files=addfiles, d=rd)
def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, workspace, fixed_setup, d, tinfoil, no_overrides=False):
"""Extract sources of a recipe"""
import oe.recipeutils
import oe.patch
+ import oe.path
pn = d.getVar('PN')
@@ -507,14 +522,20 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
history = d.varhistory.variable('SRC_URI')
for event in history:
if not 'flag' in event:
- if event['op'].startswith(('_append[', '_prepend[')):
- extra_overrides.append(event['op'].split('[')[1].split(']')[0])
+ if event['op'].startswith((':append[', ':prepend[')):
+ override = event['op'].split('[')[1].split(']')[0]
+ if not override.startswith('pn-'):
+ extra_overrides.append(override)
+ # We want to remove duplicate overrides. If a recipe had multiple
+ # SRC_URI_override += values it would cause mulitple instances of
+ # overrides. This doesn't play nicely with things like creating a
+ # branch for every instance of DEVTOOL_EXTRA_OVERRIDES.
+ extra_overrides = list(set(extra_overrides))
if extra_overrides:
logger.info('SRC_URI contains some conditional appends/prepends - will create branches to represent these')
initial_rev = None
- appendexisted = False
recipefile = d.getVar('FILE')
appendfile = recipe_to_append(recipefile, config)
is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
@@ -549,6 +570,9 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
logger.debug('writing append file %s' % appendfile)
with open(appendfile, 'a') as f:
f.write('###--- _extract_source\n')
+ f.write('deltask do_recipe_qa\n')
+ f.write('deltask do_recipe_qa_setscene\n')
+ f.write('ERROR_QA:remove = "patch-fuzz"\n')
f.write('DEVTOOL_TEMPDIR = "%s"\n' % tempdir)
f.write('DEVTOOL_DEVBRANCH = "%s"\n' % devbranch)
if not is_kernel_yocto:
@@ -566,13 +590,24 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
preservestampfile = os.path.join(sstate_manifests, 'preserve-stamps')
with open(preservestampfile, 'w') as f:
f.write(d.getVar('STAMP'))
+ tinfoil.modified_files()
try:
- if bb.data.inherits_class('kernel-yocto', d):
+ if is_kernel_yocto:
# We need to generate the kernel config
task = 'do_configure'
else:
task = 'do_patch'
+ if 'noexec' in (d.getVarFlags(task, False) or []) or 'task' not in (d.getVarFlags(task, False) or []):
+ logger.info('The %s recipe has %s disabled. Running only '
+ 'do_configure task dependencies' % (pn, task))
+
+ if 'depends' in d.getVarFlags('do_configure', False):
+ pn = d.getVarFlags('do_configure', False)['depends']
+ pn = pn.replace('${PV}', d.getVar('PV'))
+ pn = pn.replace('${COMPILERDEP}', d.getVar('COMPILERDEP'))
+ task = None
+
# Run the fetch + unpack tasks
res = tinfoil.build_targets(pn,
task,
@@ -584,6 +619,17 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
if not res:
raise DevtoolError('Extracting source for %s failed' % pn)
+ if not is_kernel_yocto and ('noexec' in (d.getVarFlags('do_patch', False) or []) or 'task' not in (d.getVarFlags('do_patch', False) or [])):
+ workshareddir = d.getVar('S')
+ if os.path.islink(srctree):
+ os.unlink(srctree)
+
+ os.symlink(workshareddir, srctree)
+
+ # The initial_rev file is created in devtool_post_unpack function that will not be executed if
+ # do_unpack/do_patch tasks are disabled so we have to directly say that source extraction was successful
+ return True, True
+
try:
with open(os.path.join(tempdir, 'initial_rev'), 'r') as f:
initial_rev = f.read()
@@ -594,19 +640,47 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
raise DevtoolError('Something went wrong with source extraction - the devtool-source class was not active or did not function correctly:\n%s' % str(e))
srcsubdir_rel = os.path.relpath(srcsubdir, os.path.join(tempdir, 'workdir'))
+ # Check if work-shared is empty, if yes
+ # find source and copy to work-shared
+ if is_kernel_yocto:
+ workshareddir = d.getVar('STAGING_KERNEL_DIR')
+ staging_kerVer = get_staging_kver(workshareddir)
+ kernelVersion = d.getVar('LINUX_VERSION')
+
+ # handle dangling symbolic link in work-shared:
+ if os.path.islink(workshareddir):
+ os.unlink(workshareddir)
+
+ if os.path.exists(workshareddir) and (not os.listdir(workshareddir) or kernelVersion != staging_kerVer):
+ shutil.rmtree(workshareddir)
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
+ elif not os.path.exists(workshareddir):
+ oe.path.copyhardlinktree(srcsubdir, workshareddir)
+
tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
srctree_localdir = os.path.join(srctree, 'oe-local-files')
if sync:
- bb.process.run('git fetch file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
-
- # Move oe-local-files directory to srctree
- # As the oe-local-files is not part of the constructed git tree,
- # remove them directly during the synchrounizating might surprise
- # the users. Instead, we move it to oe-local-files.bak and remind
- # user in the log message.
+ try:
+ logger.info('Backing up current %s branch as branch: %s.bak' % (devbranch, devbranch))
+ bb.process.run('git branch -f ' + devbranch + '.bak', cwd=srctree)
+
+ # Use git fetch to update the source with the current recipe
+ # To be able to update the currently checked out branch with
+ # possibly new history (no fast-forward) git needs to be told
+ # that's ok
+ logger.info('Syncing source files including patches to git branch: %s' % devbranch)
+ bb.process.run('git fetch --update-head-ok --force file://' + srcsubdir + ' ' + devbranch + ':' + devbranch, cwd=srctree)
+ except bb.process.ExecutionError as e:
+ raise DevtoolError("Error when syncing source files to local checkout: %s" % str(e))
+
+ # Move the oe-local-files directory to srctree.
+ # As oe-local-files is not part of the constructed git tree,
+ # removing it directly during the synchronization might surprise
+ # the user. Instead, we move it to oe-local-files.bak and remind
+ # the user in the log message.
if os.path.exists(srctree_localdir + '.bak'):
- shutil.rmtree(srctree_localdir, srctree_localdir + '.bak')
+ shutil.rmtree(srctree_localdir + '.bak')
if os.path.exists(srctree_localdir):
logger.info('Backing up current local file directory %s' % srctree_localdir)
@@ -622,29 +696,7 @@ def _extract_source(srctree, keep_temp, devbranch, sync, config, basepath, works
shutil.move(tempdir_localdir, srcsubdir)
shutil.move(srcsubdir, srctree)
-
- if os.path.abspath(d.getVar('S')) == os.path.abspath(d.getVar('WORKDIR')):
- # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
- # (otherwise the recipe won't build as expected)
- local_files_dir = os.path.join(srctree, 'oe-local-files')
- addfiles = []
- for root, _, files in os.walk(local_files_dir):
- relpth = os.path.relpath(root, local_files_dir)
- if relpth != '.':
- bb.utils.mkdirhier(os.path.join(srctree, relpth))
- for fn in files:
- if fn == '.gitignore':
- continue
- destpth = os.path.join(srctree, relpth, fn)
- if os.path.exists(destpth):
- os.unlink(destpth)
- os.symlink('oe-local-files/%s' % fn, destpth)
- addfiles.append(os.path.join(relpth, fn))
- if addfiles:
- bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
- useroptions = []
- oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
- bb.process.run('git %s commit -a -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+ symlink_oelocal_files_srctree(d, srctree)
if is_kernel_yocto:
logger.info('Copying kernel config to srctree')
@@ -704,19 +756,49 @@ def _check_preserve(config, recipename):
if splitline[2] != md5:
bb.utils.mkdirhier(preservepath)
preservefile = os.path.basename(removefile)
- logger.warn('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
+ logger.warning('File %s modified since it was written, preserving in %s' % (preservefile, preservepath))
shutil.move(removefile, os.path.join(preservepath, preservefile))
else:
os.remove(removefile)
else:
tf.write(line)
- os.rename(newfile, origfile)
+ bb.utils.rename(newfile, origfile)
+
+def get_staging_kver(srcdir):
+ # Kernel version from work-shared
+ kerver = []
+ staging_kerVer=""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ with open(os.path.join(srcdir, "Makefile")) as f:
+ version = [next(f) for x in range(5)][1:4]
+ for word in version:
+ kerver.append(word.split('= ')[1].split('\n')[0])
+ staging_kerVer = ".".join(kerver)
+ return staging_kerVer
+
+def get_staging_kbranch(srcdir):
+ staging_kbranch = ""
+ if os.path.exists(srcdir) and os.listdir(srcdir):
+ (branch, _) = bb.process.run('git branch | grep \\* | cut -d \' \' -f2', cwd=srcdir)
+ staging_kbranch = "".join(branch.split('\n')[0])
+ return staging_kbranch
+
+def get_real_srctree(srctree, s, workdir):
+ # Check that recipe isn't using a shared workdir
+ s = os.path.abspath(s)
+ workdir = os.path.abspath(workdir)
+ if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
+ # Handle if S is set to a subdirectory of the source
+ srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
+ srctree = os.path.join(srctree, srcsubdir)
+ return srctree
def modify(args, config, basepath, workspace):
"""Entry point for the devtool 'modify' subcommand"""
import bb
import oe.recipeutils
import oe.patch
+ import oe.path
if args.recipename in workspace:
raise DevtoolError("recipe %s is already in your workspace" %
@@ -755,23 +837,89 @@ def modify(args, config, basepath, workspace):
_check_compatible_recipe(pn, rd)
- initial_rev = None
- commits = []
+ initial_revs = {}
+ commits = {}
check_commits = False
+
+ if bb.data.inherits_class('kernel-yocto', rd):
+ # Current set kernel version
+ kernelVersion = rd.getVar('LINUX_VERSION')
+ srcdir = rd.getVar('STAGING_KERNEL_DIR')
+ kbranch = rd.getVar('KBRANCH')
+
+ staging_kerVer = get_staging_kver(srcdir)
+ staging_kbranch = get_staging_kbranch(srcdir)
+ if (os.path.exists(srcdir) and os.listdir(srcdir)) and (kernelVersion in staging_kerVer and staging_kbranch == kbranch):
+ oe.path.copyhardlinktree(srcdir, srctree)
+ workdir = rd.getVar('WORKDIR')
+ srcsubdir = rd.getVar('S')
+ localfilesdir = os.path.join(srctree, 'oe-local-files')
+ # Move local source files into separate subdir
+ recipe_patches = [os.path.basename(patch) for patch in oe.recipeutils.get_recipe_patches(rd)]
+ local_files = oe.recipeutils.get_recipe_local_files(rd)
+
+ for key in local_files.copy():
+ if key.endswith('scc'):
+ sccfile = open(local_files[key], 'r')
+ for l in sccfile:
+ line = l.split()
+ if line and line[0] in ('kconf', 'patch'):
+ cfg = os.path.join(os.path.dirname(local_files[key]), line[-1])
+ if not cfg in local_files.values():
+ local_files[line[-1]] = cfg
+ shutil.copy2(cfg, workdir)
+ sccfile.close()
+
+ # Ignore local files with subdir={BP}
+ srcabspath = os.path.abspath(srcsubdir)
+ local_files = [fname for fname in local_files if os.path.exists(os.path.join(workdir, fname)) and (srcabspath == workdir or not os.path.join(workdir, fname).startswith(srcabspath + os.sep))]
+ if local_files:
+ for fname in local_files:
+ _move_file(os.path.join(workdir, fname), os.path.join(srctree, 'oe-local-files', fname))
+ with open(os.path.join(srctree, 'oe-local-files', '.gitignore'), 'w') as f:
+ f.write('# Ignore local files, by default. Remove this file if you want to commit the directory to Git\n*\n')
+
+ symlink_oelocal_files_srctree(rd, srctree)
+
+ task = 'do_configure'
+ res = tinfoil.build_targets(pn, task, handle_events=True)
+
+ # Copy .config to workspace
+ kconfpath = rd.getVar('B')
+ logger.info('Copying kernel config to workspace')
+ shutil.copy2(os.path.join(kconfpath, '.config'), srctree)
+
+ # Set this to true, we still need to get initial_rev
+ # by parsing the git repo
+ args.no_extract = True
+
if not args.no_extract:
- initial_rev, _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- if not initial_rev:
+ initial_revs["."], _ = _extract_source(srctree, args.keep_temp, args.branch, False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
+ if not initial_revs["."]:
return 1
logger.info('Source tree extracted to %s' % srctree)
- # Get list of commits since this revision
- (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_rev, cwd=srctree)
- commits = stdout.split()
- check_commits = True
+
+ if os.path.exists(os.path.join(srctree, '.git')):
+ # Get list of commits since this revision
+ (stdout, _) = bb.process.run('git rev-list --reverse %s..HEAD' % initial_revs["."], cwd=srctree)
+ commits["."] = stdout.split()
+ check_commits = True
+ (stdout, _) = bb.process.run('git submodule --quiet foreach --recursive \'echo `git rev-parse devtool-base` $PWD\'', cwd=srctree)
+ for line in stdout.splitlines():
+ (rev, submodule_path) = line.split()
+ submodule = os.path.relpath(submodule_path, srctree)
+ initial_revs[submodule] = rev
+ (stdout, _) = bb.process.run('git rev-list --reverse devtool-base..HEAD', cwd=submodule_path)
+ commits[submodule] = stdout.split()
else:
if os.path.exists(os.path.join(srctree, '.git')):
- # Check if it's a tree previously extracted by us
+ # Check if it's a tree previously extracted by us. This is done
+ # by ensuring that devtool-base and args.branch (devtool) exist.
+ # The check_commits logic will cause an exception if either one
+ # of these doesn't exist
try:
(stdout, _) = bb.process.run('git branch --contains devtool-base', cwd=srctree)
+ bb.process.run('git rev-parse %s' % args.branch, cwd=srctree)
except bb.process.ExecutionError:
stdout = ''
if stdout:
@@ -779,11 +927,11 @@ def modify(args, config, basepath, workspace):
for line in stdout.splitlines():
if line.startswith('*'):
(stdout, _) = bb.process.run('git rev-parse devtool-base', cwd=srctree)
- initial_rev = stdout.rstrip()
- if not initial_rev:
+ initial_revs["."] = stdout.rstrip()
+ if "." not in initial_revs:
# Otherwise, just grab the head revision
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- initial_rev = stdout.rstrip()
+ initial_revs["."] = stdout.rstrip()
branch_patches = {}
if check_commits:
@@ -795,62 +943,86 @@ def modify(args, config, basepath, workspace):
if branchname.startswith(override_branch_prefix):
branches.append(branchname)
if branches:
- logger.warn('SRC_URI is conditionally overridden in this recipe, thus several %s* branches have been created, one for each override that makes changes to SRC_URI. It is recommended that you make changes to the %s branch first, then checkout and rebase each %s* branch and update any unique patches there (duplicates on those branches will be ignored by devtool finish/update-recipe)' % (override_branch_prefix, args.branch, override_branch_prefix))
+ logger.warning('SRC_URI is conditionally overridden in this recipe, thus several %s* branches have been created, one for each override that makes changes to SRC_URI. It is recommended that you make changes to the %s branch first, then checkout and rebase each %s* branch and update any unique patches there (duplicates on those branches will be ignored by devtool finish/update-recipe)' % (override_branch_prefix, args.branch, override_branch_prefix))
branches.insert(0, args.branch)
seen_patches = []
for branch in branches:
branch_patches[branch] = []
- (stdout, _) = bb.process.run('git log devtool-base..%s' % branch, cwd=srctree)
- for line in stdout.splitlines():
- line = line.strip()
- if line.startswith(oe.patch.GitApplyTree.patch_line_prefix):
- origpatch = line[len(oe.patch.GitApplyTree.patch_line_prefix):].split(':', 1)[-1].strip()
- if not origpatch in seen_patches:
- seen_patches.append(origpatch)
- branch_patches[branch].append(origpatch)
+ (stdout, _) = bb.process.run('git rev-list devtool-base..%s' % branch, cwd=srctree)
+ for sha1 in stdout.splitlines():
+ notes = oe.patch.GitApplyTree.getNotes(srctree, sha1.strip())
+ origpatch = notes.get(oe.patch.GitApplyTree.original_patch)
+ if origpatch and origpatch not in seen_patches:
+ seen_patches.append(origpatch)
+ branch_patches[branch].append(origpatch)
# Need to grab this here in case the source is within a subdirectory
srctreebase = srctree
-
- # Check that recipe isn't using a shared workdir
- s = os.path.abspath(rd.getVar('S'))
- workdir = os.path.abspath(rd.getVar('WORKDIR'))
- if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
- # Handle if S is set to a subdirectory of the source
- srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
- srctree = os.path.join(srctree, srcsubdir)
+ srctree = get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
bb.utils.mkdirhier(os.path.dirname(appendfile))
with open(appendfile, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n')
+ # if not present, add type=git-dependency to the secondary sources
+ # (non local files) so they can be extracted correctly when building a recipe after
+ # doing a devtool modify on it
+ src_uri = rd.getVar('SRC_URI').split()
+ src_uri_append = []
+ src_uri_remove = []
+
+ # Assume first entry is main source extracted in ${S} so skip it
+ src_uri = src_uri[1::]
+
+ # Add "type=git-dependency" to all non local sources
+ for url in src_uri:
+ if not url.startswith('file://') and not 'type=' in url:
+ src_uri_remove.append(url)
+ src_uri_append.append('%s;type=git-dependency' % url)
+
+ if src_uri_remove:
+ f.write('SRC_URI:remove = "%s"\n' % ' '.join(src_uri_remove))
+ f.write('SRC_URI:append = " %s"\n\n' % ' '.join(src_uri_append))
+
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n')
# Local files can be modified/tracked in separate subdir under srctree
# Mostly useful for packages with S != WORKDIR
- f.write('FILESPATH_prepend := "%s:"\n' %
+ f.write('FILESPATH:prepend := "%s:"\n' %
os.path.join(srctreebase, 'oe-local-files'))
f.write('# srctreebase: %s\n' % srctreebase)
f.write('\ninherit externalsrc\n')
f.write('# NOTE: We use pn- overrides here to avoid affecting multiple variants in the case where the recipe uses BBCLASSEXTEND\n')
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(args.same_dir, args.no_same_dir, rd)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
if bb.data.inherits_class('kernel', rd):
f.write('SRCTREECOVEREDTASKS = "do_validate_branches do_kernel_checkout '
- 'do_fetch do_unpack do_kernel_configme do_kernel_configcheck"\n')
- f.write('\ndo_patch() {\n'
- ' :\n'
- '}\n')
- f.write('\ndo_configure_append() {\n'
+ 'do_fetch do_unpack do_kernel_configcheck"\n')
+ f.write('\ndo_patch[noexec] = "1"\n')
+ f.write('\ndo_configure:append() {\n'
' cp ${B}/.config ${S}/.config.baseline\n'
' ln -sfT ${B}/.config ${S}/.config.new\n'
'}\n')
- if initial_rev:
- f.write('\n# initial_rev: %s\n' % initial_rev)
- for commit in commits:
- f.write('# commit: %s\n' % commit)
+ f.write('\ndo_kernel_configme:prepend() {\n'
+ ' if [ -e ${S}/.config ]; then\n'
+ ' mv ${S}/.config ${S}/.config.old\n'
+ ' fi\n'
+ '}\n')
+ if rd.getVarFlag('do_menuconfig', 'task'):
+ f.write('\ndo_configure:append() {\n'
+ ' if [ ${@oe.types.boolean(d.getVar("KCONFIG_CONFIG_ENABLE_MENUCONFIG"))} = True ]; then\n'
+ ' cp ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.baseline\n'
+ ' ln -sfT ${KCONFIG_CONFIG_ROOTDIR}/.config ${S}/.config.new\n'
+ ' fi\n'
+ '}\n')
+ if initial_revs:
+ for name, rev in initial_revs.items():
+ f.write('\n# initial_rev %s: %s\n' % (name, rev))
+ if name in commits:
+ for commit in commits[name]:
+ f.write('# commit %s: %s\n' % (name, commit))
if branch_patches:
for branch in branch_patches:
if branch == args.branch:
@@ -970,10 +1142,10 @@ def rename(args, config, basepath, workspace):
# Rename bbappend
logger.info('Renaming %s to %s' % (append, newappend))
- os.rename(append, newappend)
+ bb.utils.rename(append, newappend)
# Rename recipe file
logger.info('Renaming %s to %s' % (recipefile, newfile))
- os.rename(recipefile, newfile)
+ bb.utils.rename(recipefile, newfile)
# Rename source tree if it's the default path
appendmd5 = None
@@ -1073,44 +1245,56 @@ def _get_patchset_revs(srctree, recipe_path, initial_rev=None, force_patch_refre
branchname = stdout.rstrip()
# Parse initial rev from recipe if not specified
- commits = []
+ commits = {}
patches = []
+ initial_revs = {}
with open(recipe_path, 'r') as f:
for line in f:
- if line.startswith('# initial_rev:'):
- if not initial_rev:
- initial_rev = line.split(':')[-1].strip()
- elif line.startswith('# commit:') and not force_patch_refresh:
- commits.append(line.split(':')[-1].strip())
- elif line.startswith('# patches_%s:' % branchname):
- patches = line.split(':')[-1].strip().split(',')
-
- update_rev = initial_rev
- changed_revs = None
- if initial_rev:
+ pattern = r'^#\s.*\s(.*):\s([0-9a-fA-F]+)$'
+ match = re.search(pattern, line)
+ if match:
+ name = match.group(1)
+ rev = match.group(2)
+ if line.startswith('# initial_rev'):
+ if not (name == "." and initial_rev):
+ initial_revs[name] = rev
+ elif line.startswith('# commit') and not force_patch_refresh:
+ if name not in commits:
+ commits[name] = [rev]
+ else:
+ commits[name].append(rev)
+ elif line.startswith('# patches_%s:' % branchname):
+ patches = line.split(':')[-1].strip().split(',')
+
+ update_revs = dict(initial_revs)
+ changed_revs = {}
+ for name, rev in initial_revs.items():
# Find first actually changed revision
stdout, _ = bb.process.run('git rev-list --reverse %s..HEAD' %
- initial_rev, cwd=srctree)
+ rev, cwd=os.path.join(srctree, name))
newcommits = stdout.split()
- for i in range(min(len(commits), len(newcommits))):
- if newcommits[i] == commits[i]:
- update_rev = commits[i]
+ if name in commits:
+ for i in range(min(len(commits[name]), len(newcommits))):
+ if newcommits[i] == commits[name][i]:
+ update_revs[name] = commits[name][i]
try:
stdout, _ = bb.process.run('git cherry devtool-patched',
- cwd=srctree)
+ cwd=os.path.join(srctree, name))
except bb.process.ExecutionError as err:
stdout = None
if stdout is not None and not force_patch_refresh:
- changed_revs = []
for line in stdout.splitlines():
if line.startswith('+ '):
rev = line.split()[1]
if rev in newcommits:
- changed_revs.append(rev)
+ if name not in changed_revs:
+ changed_revs[name] = [rev]
+ else:
+ changed_revs[name].append(rev)
- return initial_rev, update_rev, changed_revs, patches
+ return initial_revs, update_revs, changed_revs, patches
def _remove_file_entries(srcuri, filelist):
"""Remove file:// entries from SRC_URI"""
@@ -1165,14 +1349,17 @@ def _remove_source_files(append, files, destpath, no_report_remove=False, dry_ru
raise
-def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
+def _export_patches(srctree, rd, start_revs, destdir, changed_revs=None):
"""Export patches from srctree to given location.
Returns three-tuple of dicts:
1. updated - patches that already exist in SRCURI
2. added - new patches that don't exist in SRCURI
3 removed - patches that exist in SRCURI but not in exported patches
- In each dict the key is the 'basepath' of the URI and value is the
- absolute path to the existing file in recipe space (if any).
+ In each dict the key is the 'basepath' of the URI and value is:
+ - for updated and added dicts, a dict with 2 optionnal keys:
+ - 'path': the absolute path to the existing file in recipe space (if any)
+ - 'patchdir': the directory in wich the patch should be applied (if any)
+ - for removed dict, the absolute path to the existing file in recipe space
"""
import oe.recipeutils
from oe.patch import GitApplyTree
@@ -1186,54 +1373,60 @@ def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
# Generate patches from Git, exclude local files directory
patch_pathspec = _git_exclude_path(srctree, 'oe-local-files')
- GitApplyTree.extractPatches(srctree, start_rev, destdir, patch_pathspec)
-
- new_patches = sorted(os.listdir(destdir))
- for new_patch in new_patches:
- # Strip numbering from patch names. If it's a git sequence named patch,
- # the numbers might not match up since we are starting from a different
- # revision This does assume that people are using unique shortlog
- # values, but they ought to be anyway...
- new_basename = seqpatch_re.match(new_patch).group(2)
- match_name = None
- for old_patch in existing_patches:
- old_basename = seqpatch_re.match(old_patch).group(2)
- old_basename_splitext = os.path.splitext(old_basename)
- if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
- old_patch_noext = os.path.splitext(old_patch)[0]
- match_name = old_patch_noext
- break
- elif new_basename == old_basename:
- match_name = old_patch
- break
- if match_name:
- # Rename patch files
- if new_patch != match_name:
- os.rename(os.path.join(destdir, new_patch),
- os.path.join(destdir, match_name))
- # Need to pop it off the list now before checking changed_revs
- oldpath = existing_patches.pop(old_patch)
- if changed_revs is not None:
- # Avoid updating patches that have not actually changed
- with open(os.path.join(destdir, match_name), 'r') as f:
- firstlineitems = f.readline().split()
- # Looking for "From <hash>" line
- if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
- if not firstlineitems[1] in changed_revs:
- continue
- # Recompress if necessary
- if oldpath.endswith(('.gz', '.Z')):
- bb.process.run(['gzip', match_name], cwd=destdir)
- if oldpath.endswith('.gz'):
- match_name += '.gz'
- else:
- match_name += '.Z'
- elif oldpath.endswith('.bz2'):
- bb.process.run(['bzip2', match_name], cwd=destdir)
- match_name += '.bz2'
- updated[match_name] = oldpath
- else:
- added[new_patch] = None
+ GitApplyTree.extractPatches(srctree, start_revs, destdir, patch_pathspec)
+ for dirpath, dirnames, filenames in os.walk(destdir):
+ new_patches = filenames
+ reldirpath = os.path.relpath(dirpath, destdir)
+ for new_patch in new_patches:
+ # Strip numbering from patch names. If it's a git sequence named patch,
+ # the numbers might not match up since we are starting from a different
+ # revision This does assume that people are using unique shortlog
+ # values, but they ought to be anyway...
+ new_basename = seqpatch_re.match(new_patch).group(2)
+ match_name = None
+ for old_patch in existing_patches:
+ old_basename = seqpatch_re.match(old_patch).group(2)
+ old_basename_splitext = os.path.splitext(old_basename)
+ if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+ old_patch_noext = os.path.splitext(old_patch)[0]
+ match_name = old_patch_noext
+ break
+ elif new_basename == old_basename:
+ match_name = old_patch
+ break
+ if match_name:
+ # Rename patch files
+ if new_patch != match_name:
+ bb.utils.rename(os.path.join(destdir, new_patch),
+ os.path.join(destdir, match_name))
+ # Need to pop it off the list now before checking changed_revs
+ oldpath = existing_patches.pop(old_patch)
+ if changed_revs is not None and dirpath in changed_revs:
+ # Avoid updating patches that have not actually changed
+ with open(os.path.join(dirpath, match_name), 'r') as f:
+ firstlineitems = f.readline().split()
+ # Looking for "From <hash>" line
+ if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+ if not firstlineitems[1] in changed_revs[dirpath]:
+ continue
+ # Recompress if necessary
+ if oldpath.endswith(('.gz', '.Z')):
+ bb.process.run(['gzip', match_name], cwd=destdir)
+ if oldpath.endswith('.gz'):
+ match_name += '.gz'
+ else:
+ match_name += '.Z'
+ elif oldpath.endswith('.bz2'):
+ bb.process.run(['bzip2', match_name], cwd=destdir)
+ match_name += '.bz2'
+ updated[match_name] = {'path' : oldpath}
+ if reldirpath != ".":
+ updated[match_name]['patchdir'] = reldirpath
+ else:
+ added[new_patch] = {}
+ if reldirpath != ".":
+ added[new_patch]['patchdir'] = reldirpath
+
return (updated, added, existing_patches)
@@ -1270,8 +1463,10 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
1. updated - files that already exist in SRCURI
2. added - new files files that don't exist in SRCURI
3 removed - files that exist in SRCURI but not in exported files
- In each dict the key is the 'basepath' of the URI and value is the
- absolute path to the existing file in recipe space (if any).
+ In each dict the key is the 'basepath' of the URI and value is:
+ - for updated and added dicts, a dict with 1 optionnal key:
+ - 'path': the absolute path to the existing file in recipe space (if any)
+ - for removed dict, the absolute path to the existing file in recipe space
"""
import oe.recipeutils
@@ -1284,6 +1479,18 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
updated = OrderedDict()
added = OrderedDict()
removed = OrderedDict()
+
+ # Get current branch and return early with empty lists
+ # if on one of the override branches
+ # (local files are provided only for the main branch and processing
+ # them against lists from recipe overrides will result in mismatches
+ # and broken modifications to recipes).
+ stdout, _ = bb.process.run('git rev-parse --abbrev-ref HEAD',
+ cwd=srctree)
+ branchname = stdout.rstrip()
+ if branchname.startswith(override_branch_prefix):
+ return (updated, added, removed)
+
local_files_dir = os.path.join(srctreebase, 'oe-local-files')
git_files = _git_ls_tree(srctree)
if 'oe-local-files' in git_files:
@@ -1321,15 +1528,29 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
if os.path.exists(os.path.join(local_files_dir, fragment_fn)):
os.unlink(os.path.join(local_files_dir, fragment_fn))
+ # Special handling for cml1, ccmake, etc bbclasses that generated
+ # configuration fragment files that are consumed as source files
+ for frag_class, frag_name in [("cml1", "fragment.cfg"), ("ccmake", "site-file.cmake")]:
+ if bb.data.inherits_class(frag_class, rd):
+ srcpath = os.path.join(rd.getVar('WORKDIR'), frag_name)
+ if os.path.exists(srcpath):
+ if frag_name not in new_set:
+ new_set.append(frag_name)
+ # copy fragment into destdir
+ shutil.copy2(srcpath, destdir)
+ # copy fragment into local files if exists
+ if os.path.isdir(local_files_dir):
+ shutil.copy2(srcpath, local_files_dir)
+
if new_set is not None:
for fname in new_set:
if fname in existing_files:
origpath = existing_files.pop(fname)
workpath = os.path.join(local_files_dir, fname)
if not filecmp.cmp(origpath, workpath):
- updated[fname] = origpath
+ updated[fname] = {'path' : origpath}
elif fname != '.gitignore':
- added[fname] = None
+ added[fname] = {}
workdir = rd.getVar('WORKDIR')
s = rd.getVar('S')
@@ -1346,7 +1567,7 @@ def _export_local_files(srctree, rd, destdir, srctreebase):
if os.path.exists(fpath):
origpath = existing_files.pop(fname)
if not filecmp.cmp(origpath, fpath):
- updated[fpath] = origpath
+ updated[fpath] = {'path' : origpath}
removed = existing_files
return (updated, added, removed)
@@ -1375,6 +1596,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
recipedir = os.path.basename(recipefile)
logger.info('Updating SRCREV in recipe %s%s' % (recipedir, dry_run_suffix))
+ # Get original SRCREV
+ old_srcrev = rd.getVar('SRCREV') or ''
+ if old_srcrev == "INVALID":
+ raise DevtoolError('Update mode srcrev is only valid for recipe fetched from an SCM repository')
+ old_srcrev = {'.': old_srcrev}
+
# Get HEAD revision
try:
stdout, _ = bb.process.run('git rev-parse HEAD', cwd=srctree)
@@ -1401,13 +1628,12 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
if not no_remove:
# Find list of existing patches in recipe file
patches_dir = tempfile.mkdtemp(dir=tempdir)
- old_srcrev = rd.getVar('SRCREV') or ''
upd_p, new_p, del_p = _export_patches(srctree, rd, old_srcrev,
patches_dir)
logger.debug('Patches: update %s, new %s, delete %s' % (dict(upd_p), dict(new_p), dict(del_p)))
# Remove deleted local files and "overlapping" patches
- remove_files = list(del_f.values()) + list(upd_p.values()) + list(del_p.values())
+ remove_files = list(del_f.values()) + [value["path"] for value in upd_p.values() if "path" in value] + [value["path"] for value in del_p.values() if "path" in value]
if remove_files:
removedentries = _remove_file_entries(srcuri, remove_files)[0]
update_srcuri = True
@@ -1421,14 +1647,14 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
patchfields['SRC_URI'] = '\\\n '.join(srcuri)
if dry_run_outdir:
logger.info('Creating bbappend (dry-run)')
- else:
- appendfile, destpath = oe.recipeutils.bbappend_recipe(
- rd, appendlayerdir, files, wildcardver=wildcard_version,
- extralines=patchfields, removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ appendfile, destpath = oe.recipeutils.bbappend_recipe(
+ rd, appendlayerdir, files, wildcardver=wildcard_version,
+ extralines=patchfields, removevalues=removevalues,
+ redirect_output=dry_run_outdir)
else:
files_dir = _determine_files_dir(rd)
- for basepath, path in upd_f.items():
+ for basepath, param in upd_f.items():
+ path = param['path']
logger.info('Updating file %s%s' % (basepath, dry_run_suffix))
if os.path.isabs(basepath):
# Original file (probably with subdir pointing inside source tree)
@@ -1438,7 +1664,8 @@ def _update_recipe_srcrev(recipename, workspace, srctree, rd, appendlayerdir, wi
_move_file(os.path.join(local_files_dir, basepath), path,
dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
update_srcuri= True
- for basepath, path in new_f.items():
+ for basepath, param in new_f.items():
+ path = param['path']
logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
_move_file(os.path.join(local_files_dir, basepath),
os.path.join(files_dir, basepath),
@@ -1470,9 +1697,22 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
if not os.path.exists(append):
raise DevtoolError('unable to find workspace bbappend for recipe %s' %
recipename)
+ srctreebase = workspace[recipename]['srctreebase']
+ relpatchdir = os.path.relpath(srctreebase, srctree)
+ if relpatchdir == '.':
+ patchdir_params = {}
+ else:
+ patchdir_params = {'patchdir': relpatchdir}
- initial_rev, update_rev, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
- if not initial_rev:
+ def srcuri_entry(basepath, patchdir_params):
+ if patchdir_params:
+ paramstr = ';' + ';'.join('%s=%s' % (k,v) for k,v in patchdir_params.items())
+ else:
+ paramstr = ''
+ return 'file://%s%s' % (basepath, paramstr)
+
+ initial_revs, update_revs, changed_revs, filter_patches = _get_patchset_revs(srctree, append, initial_rev, force_patch_refresh)
+ if not initial_revs:
raise DevtoolError('Unable to find initial revision - please specify '
'it with --initial-rev')
@@ -1486,61 +1726,69 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
tempdir = tempfile.mkdtemp(prefix='devtool')
try:
local_files_dir = tempfile.mkdtemp(dir=tempdir)
- if filter_patches:
- upd_f = {}
- new_f = {}
- del_f = {}
- else:
- srctreebase = workspace[recipename]['srctreebase']
- upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
-
- remove_files = []
- if not no_remove:
- # Get all patches from source tree and check if any should be removed
- all_patches_dir = tempfile.mkdtemp(dir=tempdir)
- _, _, del_p = _export_patches(srctree, rd, initial_rev,
- all_patches_dir)
- # Remove deleted local files and patches
- remove_files = list(del_f.values()) + list(del_p.values())
+ upd_f, new_f, del_f = _export_local_files(srctree, rd, local_files_dir, srctreebase)
# Get updated patches from source tree
patches_dir = tempfile.mkdtemp(dir=tempdir)
- upd_p, new_p, _ = _export_patches(srctree, rd, update_rev,
+ upd_p, new_p, _ = _export_patches(srctree, rd, update_revs,
patches_dir, changed_revs)
+ # Get all patches from source tree and check if any should be removed
+ all_patches_dir = tempfile.mkdtemp(dir=tempdir)
+ _, _, del_p = _export_patches(srctree, rd, initial_revs,
+ all_patches_dir)
logger.debug('Pre-filtering: update: %s, new: %s' % (dict(upd_p), dict(new_p)))
if filter_patches:
- new_p = {}
- upd_p = {k:v for k,v in upd_p.items() if k in filter_patches}
- remove_files = [f for f in remove_files if f in filter_patches]
+ new_p = OrderedDict()
+ upd_p = OrderedDict((k,v) for k,v in upd_p.items() if k in filter_patches)
+ del_p = OrderedDict((k,v) for k,v in del_p.items() if k in filter_patches)
+ remove_files = []
+ if not no_remove:
+ # Remove deleted local files and patches
+ remove_files = list(del_f.values()) + list(del_p.values())
updatefiles = False
updaterecipe = False
destpath = None
srcuri = (rd.getVar('SRC_URI', False) or '').split()
+
if appendlayerdir:
- files = dict((os.path.join(local_files_dir, key), val) for
+ files = OrderedDict((os.path.join(local_files_dir, key), val) for
key, val in list(upd_f.items()) + list(new_f.items()))
- files.update(dict((os.path.join(patches_dir, key), val) for
+ files.update(OrderedDict((os.path.join(patches_dir, key), val) for
key, val in list(upd_p.items()) + list(new_p.items())))
+
+ params = []
+ for file, param in files.items():
+ patchdir_param = dict(patchdir_params)
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ params.append(patchdir_param)
+
if files or remove_files:
removevalues = None
if remove_files:
removedentries, remaining = _remove_file_entries(
srcuri, remove_files)
if removedentries or remaining:
- remaining = ['file://' + os.path.basename(item) for
+ remaining = [srcuri_entry(os.path.basename(item), patchdir_params) for
item in remaining]
removevalues = {'SRC_URI': removedentries + remaining}
appendfile, destpath = oe.recipeutils.bbappend_recipe(
rd, appendlayerdir, files,
wildcardver=wildcard_version,
removevalues=removevalues,
- redirect_output=dry_run_outdir)
+ redirect_output=dry_run_outdir,
+ params=params)
else:
logger.info('No patches or local source files needed updating')
else:
# Update existing files
files_dir = _determine_files_dir(rd)
- for basepath, path in upd_f.items():
+ for basepath, param in upd_f.items():
+ path = param['path']
logger.info('Updating file %s' % basepath)
if os.path.isabs(basepath):
# Original file (probably with subdir pointing inside source tree)
@@ -1551,14 +1799,22 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
_move_file(os.path.join(local_files_dir, basepath), path,
dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
- for basepath, path in upd_p.items():
- patchfn = os.path.join(patches_dir, basepath)
+ for basepath, param in upd_p.items():
+ path = param['path']
+ patchdir = param.get('patchdir', ".")
+ if patchdir != "." :
+ patchdir_param = dict(patchdir_params)
+ if patchdir_param:
+ patchdir_param['patchdir'] += patchdir
+ else:
+ patchdir_param['patchdir'] = patchdir
+ patchfn = os.path.join(patches_dir, patchdir, basepath)
if os.path.dirname(path) + '/' == dl_dir:
# This is a a downloaded patch file - we now need to
# replace the entry in SRC_URI with our local version
logger.info('Replacing remote patch %s with updated local version' % basepath)
path = os.path.join(files_dir, basepath)
- _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+ _replace_srcuri_entry(srcuri, basepath, srcuri_entry(basepath, patchdir_param))
updaterecipe = True
else:
logger.info('Updating patch %s%s' % (basepath, dry_run_suffix))
@@ -1566,21 +1822,29 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
dry_run_outdir=dry_run_outdir, base_outdir=recipedir)
updatefiles = True
# Add any new files
- for basepath, path in new_f.items():
+ for basepath, param in new_f.items():
logger.info('Adding new file %s%s' % (basepath, dry_run_suffix))
_move_file(os.path.join(local_files_dir, basepath),
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ srcuri.append(srcuri_entry(basepath, patchdir_params))
updaterecipe = True
- for basepath, path in new_p.items():
+ for basepath, param in new_p.items():
+ patchdir = param.get('patchdir', ".")
logger.info('Adding new patch %s%s' % (basepath, dry_run_suffix))
- _move_file(os.path.join(patches_dir, basepath),
+ _move_file(os.path.join(patches_dir, patchdir, basepath),
os.path.join(files_dir, basepath),
dry_run_outdir=dry_run_outdir,
base_outdir=recipedir)
- srcuri.append('file://%s' % basepath)
+ params = dict(patchdir_params)
+ if patchdir != "." :
+ if params:
+ params['patchdir'] += patchdir
+ else:
+ params['patchdir'] = patchdir
+
+ srcuri.append(srcuri_entry(basepath, params))
updaterecipe = True
# Update recipe, if needed
if _remove_file_entries(srcuri, remove_files)[0]:
@@ -1603,7 +1867,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil
def _guess_recipe_update_mode(srctree, rdata):
"""Guess the recipe update mode to use"""
- src_uri = (rdata.getVar('SRC_URI', False) or '').split()
+ src_uri = (rdata.getVar('SRC_URI') or '').split()
git_uris = [uri for uri in src_uri if uri.startswith('git://')]
if not git_uris:
return 'patch'
@@ -1637,6 +1901,8 @@ def _update_recipe(recipename, workspace, rd, mode, appendlayerdir, wildcard_ver
for line in stdout.splitlines():
branchname = line[2:]
if line.startswith('* '):
+ if 'HEAD' in line:
+ raise DevtoolError('Detached HEAD - please check out a branch, e.g., "devtool"')
startbranch = branchname
if branchname.startswith(override_branch_prefix):
override_branches.append(branchname)
@@ -1720,7 +1986,7 @@ def update_recipe(args, config, basepath, workspace):
if updated:
rf = rd.getVar('FILE')
if rf.startswith(config.workspace_path):
- logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
+ logger.warning('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
finally:
tinfoil.shutdown()
@@ -1742,7 +2008,7 @@ def status(args, config, basepath, workspace):
return 0
-def _reset(recipes, no_clean, config, basepath, workspace):
+def _reset(recipes, no_clean, remove_work, config, basepath, workspace):
"""Reset one or more recipes"""
import oe.path
@@ -1803,7 +2069,7 @@ def _reset(recipes, no_clean, config, basepath, workspace):
if os.path.exists(origdir):
for root, dirs, files in os.walk(origdir):
for fn in files:
- logger.warn('Preserving %s in %s' % (fn, preservepath))
+ logger.warning('Preserving %s in %s' % (fn, preservepath))
_move_file(os.path.join(origdir, fn),
os.path.join(preservepath, fn))
for dn in dirs:
@@ -1820,10 +2086,25 @@ def _reset(recipes, no_clean, config, basepath, workspace):
srctreebase = workspace[pn]['srctreebase']
if os.path.isdir(srctreebase):
if os.listdir(srctreebase):
- # We don't want to risk wiping out any work in progress
- logger.info('Leaving source tree %s as-is; if you no '
- 'longer need it then please delete it manually'
- % srctreebase)
+ if remove_work:
+ logger.info('-r argument used on %s, removing source tree.'
+ ' You will lose any unsaved work' %pn)
+ shutil.rmtree(srctreebase)
+ else:
+ # We don't want to risk wiping out any work in progress
+ if srctreebase.startswith(os.path.join(config.workspace_path, 'sources')):
+ from datetime import datetime
+ preservesrc = os.path.join(config.workspace_path, 'attic', 'sources', "{}.{}".format(pn, datetime.now().strftime("%Y%m%d%H%M%S")))
+ logger.info('Preserving source tree in %s\nIf you no '
+ 'longer need it then please delete it manually.\n'
+ 'It is also possible to reuse it via devtool source tree argument.'
+ % preservesrc)
+ bb.utils.mkdirhier(os.path.dirname(preservesrc))
+ shutil.move(srctreebase, preservesrc)
+ else:
+ logger.info('Leaving source tree %s as-is; if you no '
+ 'longer need it then please delete it manually'
+ % srctreebase)
else:
# This is unlikely, but if it's empty we can just remove it
os.rmdir(srctreebase)
@@ -1833,6 +2114,10 @@ def _reset(recipes, no_clean, config, basepath, workspace):
def reset(args, config, basepath, workspace):
"""Entry point for the devtool 'reset' subcommand"""
import bb
+ import shutil
+
+ recipes = ""
+
if args.recipename:
if args.all:
raise DevtoolError("Recipe cannot be specified if -a/--all is used")
@@ -1847,7 +2132,7 @@ def reset(args, config, basepath, workspace):
else:
recipes = args.recipename
- _reset(recipes, args.no_clean, config, basepath, workspace)
+ _reset(recipes, args.no_clean, args.remove_work, config, basepath, workspace)
return 0
@@ -1855,13 +2140,27 @@ def reset(args, config, basepath, workspace):
def _get_layer(layername, d):
"""Determine the base layer path for the specified layer name/path"""
layerdirs = d.getVar('BBLAYERS').split()
- layers = {os.path.basename(p): p for p in layerdirs}
+ layers = {} # {basename: layer_paths}
+ for p in layerdirs:
+ bn = os.path.basename(p)
+ if bn not in layers:
+ layers[bn] = [p]
+ else:
+ layers[bn].append(p)
# Provide some shortcuts
if layername.lower() in ['oe-core', 'openembedded-core']:
- layerdir = layers.get('meta', None)
+ layername = 'meta'
+ layer_paths = layers.get(layername, None)
+ if not layer_paths:
+ return os.path.abspath(layername)
+ elif len(layer_paths) == 1:
+ return os.path.abspath(layer_paths[0])
else:
- layerdir = layers.get(layername, None)
- return os.path.abspath(layerdir or layername)
+ # multiple layers having the same base name
+ logger.warning("Multiple layers have the same base name '%s', use the first one '%s'." % (layername, layer_paths[0]))
+ logger.warning("Consider using path instead of base name to specify layer:\n\t\t%s" % '\n\t\t'.join(layer_paths))
+ return os.path.abspath(layer_paths[0])
+
def finish(args, config, basepath, workspace):
"""Entry point for the devtool 'finish' subcommand"""
@@ -1884,7 +2183,8 @@ def finish(args, config, basepath, workspace):
else:
raise DevtoolError('Source tree is not clean:\n\n%s\nEnsure you have committed your changes or use -f/--force if you are sure there\'s nothing that needs to be committed' % dirty)
- no_clean = False
+ no_clean = args.no_clean
+ remove_work=args.remove_work
tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
try:
rd = parse_recipe(config, tinfoil, args.recipename, True)
@@ -2036,7 +2336,7 @@ def finish(args, config, basepath, workspace):
if args.dry_run:
logger.info('Resetting recipe (dry-run)')
else:
- _reset([args.recipename], no_clean=no_clean, config=config, basepath=basepath, workspace=workspace)
+ _reset([args.recipename], no_clean=no_clean, remove_work=remove_work, config=config, basepath=basepath, workspace=workspace)
return 0
@@ -2063,7 +2363,8 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
- parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--npm-dev', help='For npm, also fetch devDependencies', action="store_true")
+ parser_add.add_argument('--no-pypi', help='Do not inherit pypi class', action="store_true")
parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
group = parser_add.add_mutually_exclusive_group()
@@ -2148,6 +2449,7 @@ def register_commands(subparsers, context):
parser_reset.add_argument('recipename', nargs='*', help='Recipe to reset')
parser_reset.add_argument('--all', '-a', action="store_true", help='Reset all recipes (clear workspace)')
parser_reset.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
+ parser_reset.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory along with append')
parser_reset.set_defaults(func=reset)
parser_finish = subparsers.add_parser('finish', help='Finish working on a recipe in your workspace',
@@ -2158,6 +2460,8 @@ def register_commands(subparsers, context):
parser_finish.add_argument('--mode', '-m', choices=['patch', 'srcrev', 'auto'], default='auto', help='Update mode (where %(metavar)s is %(choices)s; default is %(default)s)', metavar='MODE')
parser_finish.add_argument('--initial-rev', help='Override starting revision for patches')
parser_finish.add_argument('--force', '-f', action="store_true", help='Force continuing even if there are uncommitted changes in the source tree repository')
+ parser_finish.add_argument('--remove-work', '-r', action="store_true", help='Clean the sources directory under workspace')
+ parser_finish.add_argument('--no-clean', '-n', action="store_true", help='Don\'t clean the sysroot to remove recipe output')
parser_finish.add_argument('--no-overrides', '-O', action="store_true", help='Do not handle other override branches (if they exist)')
parser_finish.add_argument('--dry-run', '-N', action="store_true", help='Dry-run (just report changes instead of writing them)')
parser_finish.add_argument('--force-patch-refresh', action="store_true", help='Update patches in the layer even if they have not been modified (useful for refreshing patch context)')
diff --git a/scripts/lib/devtool/upgrade.py b/scripts/lib/devtool/upgrade.py
index 1dde16641b..fa5b8ef3c7 100644
--- a/scripts/lib/devtool/upgrade.py
+++ b/scripts/lib/devtool/upgrade.py
@@ -2,18 +2,7 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
"""Devtool upgrade plugin"""
@@ -43,9 +32,11 @@ def _run(cmd, cwd=''):
def _get_srctree(tmpdir):
srctree = tmpdir
- dirs = os.listdir(tmpdir)
+ dirs = scriptutils.filter_src_subdirs(tmpdir)
if len(dirs) == 1:
srctree = os.path.join(tmpdir, dirs[0])
+ else:
+ raise DevtoolError("Cannot determine where the source tree is after unpacking in {}: {}".format(tmpdir,dirs))
return srctree
def _copy_source_code(orig, dest):
@@ -82,7 +73,8 @@ def _rename_recipe_dirs(oldpv, newpv, path):
if oldfile.find(oldpv) != -1:
newfile = oldfile.replace(oldpv, newpv)
if oldfile != newfile:
- os.rename(os.path.join(path, oldfile), os.path.join(path, newfile))
+ bb.utils.rename(os.path.join(path, oldfile),
+ os.path.join(path, newfile))
def _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path):
oldrecipe = os.path.basename(oldrecipe)
@@ -98,7 +90,7 @@ def _rename_recipe_files(oldrecipe, bpn, oldpv, newpv, path):
_rename_recipe_dirs(oldpv, newpv, path)
return _rename_recipe_file(oldrecipe, bpn, oldpv, newpv, path)
-def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d):
+def _write_append(rc, srctreebase, srctree, same_dir, no_same_dir, revs, copied, workspace, d):
"""Writes an append file"""
if not os.path.exists(rc):
raise DevtoolError("bbappend not created because %s does not exist" % rc)
@@ -113,38 +105,44 @@ def _write_append(rc, srctree, same_dir, no_same_dir, rev, copied, workspace, d)
pn = d.getVar('PN')
af = os.path.join(appendpath, '%s.bbappend' % brf)
with open(af, 'w') as f:
- f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
+ f.write('FILESEXTRAPATHS:prepend := "${THISDIR}/${PN}:"\n\n')
+ # Local files can be modified/tracked in separate subdir under srctree
+ # Mostly useful for packages with S != WORKDIR
+ f.write('FILESPATH:prepend := "%s:"\n' %
+ os.path.join(srctreebase, 'oe-local-files'))
+ f.write('# srctreebase: %s\n' % srctreebase)
f.write('inherit externalsrc\n')
f.write(('# NOTE: We use pn- overrides here to avoid affecting'
'multiple variants in the case where the recipe uses BBCLASSEXTEND\n'))
- f.write('EXTERNALSRC_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC:pn-%s = "%s"\n' % (pn, srctree))
b_is_s = use_external_build(same_dir, no_same_dir, d)
if b_is_s:
- f.write('EXTERNALSRC_BUILD_pn-%s = "%s"\n' % (pn, srctree))
+ f.write('EXTERNALSRC_BUILD:pn-%s = "%s"\n' % (pn, srctree))
f.write('\n')
- if rev:
- f.write('# initial_rev: %s\n' % rev)
+ if revs:
+ for name, rev in revs.items():
+ f.write('# initial_rev %s: %s\n' % (name, rev))
if copied:
f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
f.write('# original_files: %s\n' % ' '.join(copied))
return af
-def _cleanup_on_error(rf, srctree):
- rfp = os.path.split(rf)[0] # recipe folder
- rfpp = os.path.split(rfp)[0] # recipes folder
- if os.path.exists(rfp):
- shutil.rmtree(b)
- if not len(os.listdir(rfpp)):
- os.rmdir(rfpp)
+def _cleanup_on_error(rd, srctree):
+ if os.path.exists(rd):
+ shutil.rmtree(rd)
srctree = os.path.abspath(srctree)
if os.path.exists(srctree):
shutil.rmtree(srctree)
-def _upgrade_error(e, rf, srctree):
- if rf:
- cleanup_on_error(rf, srctree)
+def _upgrade_error(e, rd, srctree, keep_failure=False, extramsg=None):
+ if not keep_failure:
+ _cleanup_on_error(rd, srctree)
logger.error(e)
- raise DevtoolError(e)
+ if extramsg:
+ logger.error(extramsg)
+ if keep_failure:
+ logger.info('Preserving failed upgrade files (--keep-failure)')
+ sys.exit(1)
def _get_uri(rd):
srcuris = rd.getVar('SRC_URI').split()
@@ -185,12 +183,16 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
uri, rev = _get_uri(crd)
if srcrev:
rev = srcrev
- if uri.startswith('git://'):
+ paths = [srctree]
+ if uri.startswith('git://') or uri.startswith('gitsm://'):
__run('git fetch')
__run('git checkout %s' % rev)
__run('git tag -f devtool-base-new')
- md5 = None
- sha256 = None
+ __run('git submodule update --recursive')
+ __run('git submodule foreach \'git tag -f devtool-base-new\'')
+ (stdout, _) = __run('git submodule --quiet foreach \'echo $sm_path\'')
+ paths += [os.path.join(srctree, p) for p in stdout.splitlines()]
+ checksums = {}
_, _, _, _, _, params = bb.fetch2.decodeurl(uri)
srcsubdir_rel = params.get('destsuffix', 'git')
if not srcbranch:
@@ -198,14 +200,15 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
get_branch = [x.strip() for x in check_branch.splitlines()]
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
- if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
- # Even with the case where get_branch has multiple objects, if 'master' is one
- # of them, we should default take from 'master'
- srcbranch = ''
- elif len(get_branch) == 1:
- # If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
+ if len(get_branch) == 1:
+ # If srcrev is on only ONE branch, then use that branch
srcbranch = get_branch[0]
+ elif 'main' in get_branch:
+ # If srcrev is on multiple branches, then choose 'main' if it is one of them
+ srcbranch = 'main'
+ elif 'master' in get_branch:
+ # Otherwise choose 'master' if it is one of the branches
+ srcbranch = 'master'
else:
# If get_branch contains more than one objects, then display error and exit.
mbrch = '\n ' + '\n '.join(get_branch)
@@ -222,9 +225,6 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
if ftmpdir and keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- md5 = checksums['md5sum']
- sha256 = checksums['sha256sum']
-
tmpsrctree = _get_srctree(tmpdir)
srctree = os.path.abspath(srctree)
srcsubdir_rel = os.path.relpath(tmpsrctree, tmpdir)
@@ -242,14 +242,14 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
# Copy in new ones
_copy_source_code(tmpsrctree, srctree)
- (stdout,_) = __run('git ls-files --modified --others --exclude-standard')
+ (stdout,_) = __run('git ls-files --modified --others')
filelist = stdout.splitlines()
pbar = bb.ui.knotty.BBProgress('Adding changed files', len(filelist))
pbar.start()
batchsize = 100
for i in range(0, len(filelist), batchsize):
batch = filelist[i:i+batchsize]
- __run('git add -A %s' % ' '.join(['"%s"' % item for item in batch]))
+ __run('git add -f -A %s' % ' '.join(['"%s"' % item for item in batch]))
pbar.update(i)
pbar.finish()
@@ -258,38 +258,60 @@ def _extract_new_source(newpv, srctree, no_patch, srcrev, srcbranch, branch, kee
__run('git %s commit -q -m "Commit of upstream changes at version %s" --allow-empty' % (' '.join(useroptions), newpv))
__run('git tag -f devtool-base-%s' % newpv)
- (stdout, _) = __run('git rev-parse HEAD')
- rev = stdout.rstrip()
+ revs = {}
+ for path in paths:
+ (stdout, _) = _run('git rev-parse HEAD', cwd=path)
+ revs[os.path.relpath(path, srctree)] = stdout.rstrip()
if no_patch:
patches = oe.recipeutils.get_recipe_patches(crd)
if patches:
- logger.warn('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
+ logger.warning('By user choice, the following patches will NOT be applied to the new source tree:\n %s' % '\n '.join([os.path.basename(patch) for patch in patches]))
else:
- __run('git checkout devtool-patched -b %s' % branch)
- skiptag = False
- try:
- __run('git rebase %s' % rev)
- except bb.process.ExecutionError as e:
- skiptag = True
- if 'conflict' in e.stdout:
- logger.warn('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
- else:
- logger.warn('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
- if not skiptag:
- if uri.startswith('git://'):
- suffix = 'new'
- else:
- suffix = newpv
- __run('git tag -f devtool-patched-%s' % suffix)
+ for path in paths:
+ _run('git checkout devtool-patched -b %s' % branch, cwd=path)
+ (stdout, _) = _run('git branch --list devtool-override-*', cwd=path)
+ branches_to_rebase = [branch] + stdout.split()
+ target_branch = revs[os.path.relpath(path, srctree)]
+
+ # There is a bug (or feature?) in git rebase where if a commit with
+ # a note is fully rebased away by being part of an old commit, the
+ # note is still attached to the old commit. Avoid this by making
+ # sure all old devtool related commits have a note attached to them
+ # (this assumes git config notes.rewriteMode is set to ignore).
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ if not oe.patch.GitApplyTree.getNotes(path, rev):
+ oe.patch.GitApplyTree.addNote(path, rev, "dummy")
+
+ for b in branches_to_rebase:
+ logger.info("Rebasing {} onto {}".format(b, target_branch))
+ _run('git checkout %s' % b, cwd=path)
+ try:
+ _run('git rebase %s' % target_branch, cwd=path)
+ except bb.process.ExecutionError as e:
+ if 'conflict' in e.stdout:
+ logger.warning('Command \'%s\' failed:\n%s\n\nYou will need to resolve conflicts in order to complete the upgrade.' % (e.command, e.stdout.rstrip()))
+ _run('git rebase --abort', cwd=path)
+ else:
+ logger.warning('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+
+ # Remove any dummy notes added above.
+ (stdout, _) = __run('git rev-list devtool-base..%s' % target_branch)
+ for rev in stdout.splitlines():
+ oe.patch.GitApplyTree.removeNote(path, rev, "dummy")
+
+ _run('git checkout %s' % branch, cwd=path)
if tmpsrctree:
if keep_temp:
logger.info('Preserving temporary directory %s' % tmpsrctree)
else:
shutil.rmtree(tmpsrctree)
+ if tmpdir != tmpsrctree:
+ shutil.rmtree(tmpdir)
- return (rev, md5, sha256, srcbranch, srcsubdir_rel)
+ return (revs, checksums, srcbranch, srcsubdir_rel)
def _add_license_diff_to_recipe(path, diff):
notice_text = """# FIXME: the LIC_FILES_CHKSUM values have been updated by 'devtool upgrade'.
@@ -310,7 +332,7 @@ def _add_license_diff_to_recipe(path, diff):
f.write("\n#\n\n".encode())
f.write(orig_content)
-def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses):
+def _create_new_recipe(newpv, checksums, srcrev, srcbranch, srcsubdir_old, srcsubdir_new, workspace, tinfoil, rd, license_diff, new_licenses, srctree, keep_failure):
"""Creates the new recipe under workspace"""
bpn = rd.getVar('BPN')
@@ -341,7 +363,10 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
replacing = True
new_src_uri = []
for entry in src_uri:
- scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ try:
+ scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(entry)
+ except bb.fetch2.MalformedUrl as e:
+ raise DevtoolError("Could not decode SRC_URI: {}".format(e))
if replacing and scheme in ['git', 'gitsm']:
branch = params.get('branch', 'master')
if rd.expand(branch) != srcbranch:
@@ -379,30 +404,39 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
addnames.append(params['name'])
# Find what's been set in the original recipe
oldnames = []
+ oldsums = []
noname = False
for varflag in rd.getVarFlags('SRC_URI'):
- if varflag.endswith(('.md5sum', '.sha256sum')):
- name = varflag.rsplit('.', 1)[0]
- if name not in oldnames:
- oldnames.append(name)
- elif varflag in ['md5sum', 'sha256sum']:
- noname = True
+ for checksum in checksums:
+ if varflag.endswith('.' + checksum):
+ name = varflag.rsplit('.', 1)[0]
+ if name not in oldnames:
+ oldnames.append(name)
+ oldsums.append(checksum)
+ elif varflag == checksum:
+ noname = True
+ oldsums.append(checksum)
# Even if SRC_URI has named entries it doesn't have to actually use the name
if noname and addnames and addnames[0] not in oldnames:
addnames = []
# Drop any old names (the name actually might include ${PV})
for name in oldnames:
if name not in newnames:
- newvalues['SRC_URI[%s.md5sum]' % name] = None
- newvalues['SRC_URI[%s.sha256sum]' % name] = None
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s.%s]' % (name, checksum)] = None
- if md5 and sha256:
- if addnames:
- nameprefix = '%s.' % addnames[0]
- else:
- nameprefix = ''
- newvalues['SRC_URI[%smd5sum]' % nameprefix] = md5
- newvalues['SRC_URI[%ssha256sum]' % nameprefix] = sha256
+ nameprefix = '%s.' % addnames[0] if addnames else ''
+
+ # md5sum is deprecated, remove any traces of it. If it was the only old
+ # checksum, then replace it with the default checksums.
+ if 'md5sum' in oldsums:
+ newvalues['SRC_URI[%smd5sum]' % nameprefix] = None
+ oldsums.remove('md5sum')
+ if not oldsums:
+ oldsums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
+
+ for checksum in oldsums:
+ newvalues['SRC_URI[%s%s]' % (nameprefix, checksum)] = checksums[checksum]
if srcsubdir_new != srcsubdir_old:
s_subdir_old = os.path.relpath(os.path.abspath(rd.getVar('S')), rd.getVar('WORKDIR'))
@@ -427,7 +461,11 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
newvalues["LIC_FILES_CHKSUM"] = newlicchksum
_add_license_diff_to_recipe(fullpath, license_diff)
- rd = tinfoil.parse_recipe_file(fullpath, False)
+ tinfoil.modified_files()
+ try:
+ rd = tinfoil.parse_recipe_file(fullpath, False)
+ except bb.tinfoil.TinfoilCommandFailed as e:
+ _upgrade_error(e, os.path.dirname(fullpath), srctree, keep_failure, 'Parsing of upgraded recipe failed')
oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
return fullpath, copied
@@ -436,7 +474,7 @@ def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, srcsubdir_old, src
def _check_git_config():
def getconfig(name):
try:
- value = bb.process.run('git config --global %s' % name)[0].strip()
+ value = bb.process.run('git config %s' % name)[0].strip()
except bb.process.ExecutionError as e:
if e.exitcode == 1:
value = None
@@ -523,6 +561,8 @@ def upgrade(args, config, basepath, workspace):
else:
srctree = standard.get_default_srctree(config, pn)
+ srctree_s = standard.get_real_srctree(srctree, rd.getVar('S'), rd.getVar('WORKDIR'))
+
# try to automatically discover latest version and revision if not provided on command line
if not args.version and not args.srcrev:
version_info = oe.recipeutils.get_recipe_upstream_version(rd)
@@ -552,21 +592,20 @@ def upgrade(args, config, basepath, workspace):
try:
logger.info('Extracting current version source...')
rev1, srcsubdir1 = standard._extract_source(srctree, False, 'devtool-orig', False, config, basepath, workspace, args.fixed_setup, rd, tinfoil, no_overrides=args.no_overrides)
- old_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ old_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
logger.info('Extracting upgraded version source...')
- rev2, md5, sha256, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
+ rev2, checksums, srcbranch, srcsubdir2 = _extract_new_source(args.version, srctree, args.no_patch,
args.srcrev, args.srcbranch, args.branch, args.keep_temp,
tinfoil, rd)
- new_licenses = _extract_licenses(srctree, rd.getVar('LIC_FILES_CHKSUM'))
+ new_licenses = _extract_licenses(srctree_s, (rd.getVar('LIC_FILES_CHKSUM') or ""))
license_diff = _generate_license_diff(old_licenses, new_licenses)
- rf, copied = _create_new_recipe(args.version, md5, sha256, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses)
- except bb.process.CmdError as e:
- _upgrade_error(e, rf, srctree)
- except DevtoolError as e:
- _upgrade_error(e, rf, srctree)
+ rf, copied = _create_new_recipe(args.version, checksums, args.srcrev, srcbranch, srcsubdir1, srcsubdir2, config.workspace_path, tinfoil, rd, license_diff, new_licenses, srctree, args.keep_failure)
+ except (bb.process.CmdError, DevtoolError) as e:
+ recipedir = os.path.join(config.workspace_path, 'recipes', rd.getVar('BPN'))
+ _upgrade_error(e, recipedir, srctree, args.keep_failure)
standard._add_md5(config, pn, os.path.dirname(rf))
- af = _write_append(rf, srctree, args.same_dir, args.no_same_dir, rev2,
+ af = _write_append(rf, srctree, srctree_s, args.same_dir, args.no_same_dir, rev2,
copied, config.workspace_path, rd)
standard._add_md5(config, pn, af)
@@ -576,6 +615,9 @@ def upgrade(args, config, basepath, workspace):
logger.info('New recipe is %s' % rf)
if license_diff:
logger.info('License checksums have been updated in the new recipe; please refer to it for the difference between the old and the new license texts.')
+ preferred_version = rd.getVar('PREFERRED_VERSION_%s' % rd.getVar('PN'))
+ if preferred_version:
+ logger.warning('Version is pinned to %s via PREFERRED_VERSION; it may need adjustment to match the new version before any further steps are taken' % preferred_version)
finally:
tinfoil.shutdown()
return 0
@@ -600,6 +642,20 @@ def latest_version(args, config, basepath, workspace):
tinfoil.shutdown()
return 0
+def check_upgrade_status(args, config, basepath, workspace):
+ if not args.recipe:
+ logger.info("Checking the upstream status for all recipes may take a few minutes")
+ results = oe.recipeutils.get_recipe_upgrade_status(args.recipe)
+ for result in results:
+ # pn, update_status, current, latest, maintainer, latest_commit, no_update_reason
+ if args.all or result[1] != 'MATCH':
+ print("{:25} {:15} {:15} {} {} {}".format( result[0],
+ result[2],
+ result[1] if result[1] != 'UPDATE' else (result[3] if not result[3].endswith("new-commits-available") else "new commits"),
+ result[4],
+ result[5] if result[5] != 'N/A' else "",
+ "cannot be updated due to: %s" %(result[6]) if result[6] else ""))
+
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
@@ -620,6 +676,7 @@ def register_commands(subparsers, context):
group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
parser_upgrade.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
+ parser_upgrade.add_argument('--keep-failure', action="store_true", help='Keep failed upgrade recipe and associated files (for debugging)')
parser_upgrade.set_defaults(func=upgrade, fixed_setup=context.fixed_setup)
parser_latest_version = subparsers.add_parser('latest-version', help='Report the latest version of an existing recipe',
@@ -627,3 +684,10 @@ def register_commands(subparsers, context):
group='info')
parser_latest_version.add_argument('recipename', help='Name of recipe to query (just name - no version, path or extension)')
parser_latest_version.set_defaults(func=latest_version)
+
+ parser_check_upgrade_status = subparsers.add_parser('check-upgrade-status', help="Report upgradability for multiple (or all) recipes",
+ description="Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available",
+ group='info')
+ parser_check_upgrade_status.add_argument('recipe', help='Name of the recipe to report (omit to report upgrade info for all recipes)', nargs='*')
+ parser_check_upgrade_status.add_argument('--all', '-a', help='Show all recipes, not just recipes needing upgrade', action="store_true")
+ parser_check_upgrade_status.set_defaults(func=check_upgrade_status)
diff --git a/scripts/lib/devtool/utilcmds.py b/scripts/lib/devtool/utilcmds.py
index 7cd139fb8b..964817766b 100644
--- a/scripts/lib/devtool/utilcmds.py
+++ b/scripts/lib/devtool/utilcmds.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool utility plugins"""
diff --git a/scripts/lib/recipetool/append.py b/scripts/lib/recipetool/append.py
index 69c8bb77a0..341e893305 100644
--- a/scripts/lib/recipetool/append.py
+++ b/scripts/lib/recipetool/append.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -28,6 +18,7 @@ import shutil
import scriptutils
import errno
from collections import defaultdict
+import difflib
logger = logging.getLogger('recipetool')
@@ -59,7 +50,7 @@ def find_target_file(targetpath, d, pkglist=None):
'/etc/group': '/etc/group should be managed through the useradd and extrausers classes',
'/etc/shadow': '/etc/shadow should be managed through the useradd and extrausers classes',
'/etc/gshadow': '/etc/gshadow should be managed through the useradd and extrausers classes',
- '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname_pn-base-files = "value" in configuration',}
+ '${sysconfdir}/hostname': '${sysconfdir}/hostname contents should be set by setting hostname:pn-base-files = "value" in configuration',}
for pthspec, message in invalidtargets.items():
if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
@@ -82,15 +73,15 @@ def find_target_file(targetpath, d, pkglist=None):
# This does assume that PN comes before other values, but that's a fairly safe assumption
for line in f:
if line.startswith('PN:'):
- pn = line.split(':', 1)[1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ pn = line.split(': ', 1)[1].strip()
+ elif line.startswith('FILES_INFO'):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, targetpath):
recipes[targetpath].append(pn)
- elif line.startswith('pkg_preinst_') or line.startswith('pkg_postinst_'):
- scriptval = line.split(':', 1)[1].strip().encode('utf-8').decode('unicode_escape')
+ elif line.startswith('pkg_preinst:') or line.startswith('pkg_postinst:'):
+ scriptval = line.split(': ', 1)[1].strip().encode('utf-8').decode('unicode_escape')
if 'update-alternatives --install %s ' % targetpath in scriptval:
recipes[targetpath].append('?%s' % pn)
elif targetpath_re.search(scriptval):
@@ -238,7 +229,7 @@ def appendfile(args):
if stdout:
logger.debug('file command output: %s' % stdout.rstrip())
if ('executable' in stdout and not 'shell script' in stdout) or 'shared object' in stdout:
- logger.warn('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
+ logger.warning('This file looks like it is a binary or otherwise the output of compilation. If it is, you should consider building it properly instead of substituting a binary file directly.')
if args.recipe:
recipes = {args.targetpath: [args.recipe],}
@@ -275,7 +266,7 @@ def appendfile(args):
if selectpn:
logger.debug('Selecting recipe %s for file %s' % (selectpn, args.targetpath))
if postinst_pns:
- logger.warn('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
+ logger.warning('%s be modified by postinstall scripts for the following recipes:\n %s\nThis may or may not be an issue depending on what modifications these postinstall scripts make.' % (args.targetpath, '\n '.join(postinst_pns)))
rd = _parse_recipe(selectpn, tinfoil)
if not rd:
# Error message already shown
@@ -286,12 +277,12 @@ def appendfile(args):
sourcetype, sourcepath = sourcefile.split('://', 1)
logger.debug('Original source file is %s (%s)' % (sourcepath, sourcetype))
if sourcetype == 'patch':
- logger.warn('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
+ logger.warning('File %s is added by the patch %s - you may need to remove or replace this patch in order to replace the file.' % (args.targetpath, sourcepath))
sourcepath = None
else:
logger.debug('Unable to determine source file, proceeding anyway')
if modpatches:
- logger.warn('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
+ logger.warning('File %s is modified by the following patches:\n %s' % (args.targetpath, '\n '.join(modpatches)))
if instelements and sourcepath:
install = None
@@ -309,7 +300,10 @@ def appendfile(args):
if st.st_mode & stat.S_IXUSR:
perms = '0755'
install = {args.newfile: (args.targetpath, perms)}
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: sourcepath}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ if sourcepath:
+ sourcepath = os.path.basename(sourcepath)
+ oe.recipeutils.bbappend_recipe(rd, args.destlayer, {args.newfile: {'newname' : sourcepath}}, install, wildcardver=args.wildcard_version, machine=args.machine)
+ tinfoil.modified_files()
return 0
else:
if alternative_pns:
@@ -337,35 +331,57 @@ def appendsrc(args, files, rd, extralines=None):
copyfiles = {}
extralines = extralines or []
+ params = []
for newfile, srcfile in files.items():
src_destdir = os.path.dirname(srcfile)
if not args.use_workdir:
if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
srcdir = os.path.join(workdir, 'git')
if not bb.data.inherits_class('kernel-yocto', rd):
- logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
+ logger.warning('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
src_destdir = os.path.join(os.path.relpath(srcdir, workdir), src_destdir)
src_destdir = os.path.normpath(src_destdir)
- source_uri = 'file://{0}'.format(os.path.basename(srcfile))
if src_destdir and src_destdir != '.':
- source_uri += ';subdir={0}'.format(src_destdir)
-
- simple = bb.fetch.URI(source_uri)
- simple.params = {}
- simple_str = str(simple)
- if simple_str in simplified:
- existing = simplified[simple_str]
- if source_uri != existing:
- logger.warn('{0!r} is already in SRC_URI, with different parameters: {1!r}, not adding'.format(source_uri, existing))
- else:
- logger.warn('{0!r} is already in SRC_URI, not adding'.format(source_uri))
+ params.append({'subdir': src_destdir})
else:
- extralines.append('SRC_URI += {0}'.format(source_uri))
- copyfiles[newfile] = srcfile
-
- oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines)
-
+ params.append({})
+
+ copyfiles[newfile] = {'newname' : os.path.basename(srcfile)}
+
+ dry_run_output = None
+ dry_run_outdir = None
+ if args.dry_run:
+ import tempfile
+ dry_run_output = tempfile.TemporaryDirectory(prefix='devtool')
+ dry_run_outdir = dry_run_output.name
+
+ appendfile, _ = oe.recipeutils.bbappend_recipe(rd, args.destlayer, copyfiles, None, wildcardver=args.wildcard_version, machine=args.machine, extralines=extralines, params=params,
+ redirect_output=dry_run_outdir, update_original_recipe=args.update_recipe)
+ if not appendfile:
+ return
+ if args.dry_run:
+ output = ''
+ appendfilename = os.path.basename(appendfile)
+ newappendfile = appendfile
+ if appendfile and os.path.exists(appendfile):
+ with open(appendfile, 'r') as f:
+ oldlines = f.readlines()
+ else:
+ appendfile = '/dev/null'
+ oldlines = []
+
+ with open(os.path.join(dry_run_outdir, appendfilename), 'r') as f:
+ newlines = f.readlines()
+ diff = difflib.unified_diff(oldlines, newlines, appendfile, newappendfile)
+ difflines = list(diff)
+ if difflines:
+ output += ''.join(difflines)
+ if output:
+ logger.info('Diff of changed files:\n%s' % output)
+ else:
+ logger.info('No changed files')
+ tinfoil.modified_files()
def appendsrcfiles(parser, args):
recipedata = _parse_recipe(args.recipe, tinfoil)
@@ -445,6 +461,8 @@ def register_commands(subparsers):
help='Create/update a bbappend to add or replace source files',
description='Creates a bbappend (or updates an existing one) to add or replace the specified file in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify multiple files with a destination directory, so cannot specify the destination filename. See the `appendsrcfile` command for the other behavior.')
parser.add_argument('-D', '--destdir', help='Destination directory (relative to S or WORKDIR, defaults to ".")', default='', type=destination_path)
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('files', nargs='+', metavar='FILE', help='File(s) to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.set_defaults(func=lambda a: appendsrcfiles(parser, a), parserecipes=True)
@@ -452,6 +470,8 @@ def register_commands(subparsers):
parents=[common_src],
help='Create/update a bbappend to add or replace a source file',
description='Creates a bbappend (or updates an existing one) to add or replace the specified files in the recipe sources, either those in WORKDIR or those in the source tree. This command lets you specify the destination filename, not just destination directory, but only works for one file. See the `appendsrcfiles` command for the other behavior.')
+ parser.add_argument('-u', '--update-recipe', help='Update recipe instead of creating (or updating) a bbapend file. DESTLAYER must contains the recipe to update', action='store_true')
+ parser.add_argument('-n', '--dry-run', help='Dry run mode', action='store_true')
parser.add_argument('file', metavar='FILE', help='File to be added to the recipe sources (WORKDIR or S)', type=existing_path)
parser.add_argument('destfile', metavar='DESTFILE', nargs='?', help='Destination path (relative to S or WORKDIR, optional)', type=destination_path)
parser.set_defaults(func=lambda a: appendsrcfile(parser, a), parserecipes=True)
diff --git a/scripts/lib/recipetool/create.py b/scripts/lib/recipetool/create.py
index a3710285bb..8e9ff38db6 100644
--- a/scripts/lib/recipetool/create.py
+++ b/scripts/lib/recipetool/create.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2014-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -70,11 +60,13 @@ class RecipeHandler(object):
if RecipeHandler.recipelibmap:
return
# First build up library->package mapping
- shlib_providers = oe.package.read_shlib_providers(d)
+ d2 = bb.data.createCopy(d)
+ d2.setVar("WORKDIR_PKGDATA", "${PKGDATA_DIR}")
+ shlib_providers = oe.package.read_shlib_providers(d2)
libdir = d.getVar('libdir')
base_libdir = d.getVar('base_libdir')
libpaths = list(set([base_libdir, libdir]))
- libname_re = re.compile('^lib(.+)\.so.*$')
+ libname_re = re.compile(r'^lib(.+)\.so.*$')
pkglibmap = {}
for lib, item in shlib_providers.items():
for path, pkg in item.items():
@@ -98,7 +90,7 @@ class RecipeHandler(object):
break
except IOError as ioe:
if ioe.errno == 2:
- logger.warn('unable to find a pkgdata file for package %s' % pkg)
+ logger.warning('unable to find a pkgdata file for package %s' % pkg)
else:
raise
@@ -123,8 +115,8 @@ class RecipeHandler(object):
for line in f:
if line.startswith('PN:'):
pn = line.split(':', 1)[-1].strip()
- elif line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ elif line.startswith('FILES_INFO:%s:' % pkg):
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in sorted(dictval):
if fullpth.startswith(includedir) and fullpth.endswith('.h'):
@@ -374,7 +366,7 @@ def supports_srcrev(uri):
def reformat_git_uri(uri):
'''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
checkuri = uri.split(';', 1)[0]
- if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
+ if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://git(hub|lab).com/[^/]+/[^/]+/?$', checkuri):
# Appends scheme if the scheme is missing
if not '://' in uri:
uri = 'git://' + uri
@@ -431,19 +423,50 @@ def create_recipe(args):
storeTagName = ''
pv_srcpv = False
+ handled = []
+ classes = []
+
+ # Find all plugins that want to register handlers
+ logger.debug('Loading recipe handlers')
+ raw_handlers = []
+ for plugin in plugins:
+ if hasattr(plugin, 'register_recipe_handlers'):
+ plugin.register_recipe_handlers(raw_handlers)
+ # Sort handlers by priority
+ handlers = []
+ for i, handler in enumerate(raw_handlers):
+ if isinstance(handler, tuple):
+ handlers.append((handler[0], handler[1], i))
+ else:
+ handlers.append((handler, 0, i))
+ handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
+ for handler, priority, _ in handlers:
+ logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+ setattr(handler, '_devtool', args.devtool)
+ handlers = [item[0] for item in handlers]
+
+ fetchuri = None
+ for handler in handlers:
+ if hasattr(handler, 'process_url'):
+ ret = handler.process_url(args, classes, handled, extravalues)
+ if 'url' in handled and ret:
+ fetchuri = ret
+ break
+
if os.path.isfile(source):
source = 'file://%s' % os.path.abspath(source)
if scriptutils.is_src_url(source):
# Warn about github archive URLs
- if re.match('https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
- logger.warn('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
+ if re.match(r'https?://github.com/[^/]+/[^/]+/archive/.+(\.tar\..*|\.zip)$', source):
+ logger.warning('github archive files are not guaranteed to be stable and may be re-generated over time. If the latter occurs, the checksums will likely change and the recipe will fail at do_fetch. It is recommended that you point to an actual commit or tag in the repository instead (using the repository URL in conjunction with the -S/--srcrev option).')
# Fetch a URL
- fetchuri = reformat_git_uri(urldefrag(source)[0])
+ if not fetchuri:
+ fetchuri = reformat_git_uri(urldefrag(source)[0])
if args.binary:
# Assume the archive contains the directory structure verbatim
# so we need to extract to a subdirectory
- fetchuri += ';subdir=${BP}'
+ fetchuri += ';subdir=${BPN}'
srcuri = fetchuri
rev_re = re.compile(';rev=([^;]+)')
res = rev_re.search(srcuri)
@@ -468,6 +491,7 @@ def create_recipe(args):
logger.error('branch= parameter and -B/--srcbranch option cannot both be specified - use one or the other')
sys.exit(1)
srcbranch = args.srcbranch
+ params['branch'] = srcbranch
nobranch = params.get('nobranch')
if nobranch and srcbranch:
logger.error('nobranch= cannot be used if you specify a branch')
@@ -485,8 +509,9 @@ def create_recipe(args):
storeTagName = params['tag']
params['nobranch'] = '1'
del params['tag']
- if scheme == 'npm':
- params['noverify'] = '1'
+ # Assume 'master' branch if not set
+ if scheme in ['git', 'gitsm'] and 'branch' not in params and 'nobranch' not in params:
+ params['branch'] = 'master'
fetchuri = bb.fetch2.encodeurl((scheme, network, path, user, passwd, params))
tmpparent = tinfoil.config_data.getVar('BASE_WORKDIR')
@@ -503,9 +528,7 @@ def create_recipe(args):
if ftmpdir and args.keep_temp:
logger.info('Fetch temp directory is %s' % ftmpdir)
- dirlist = os.listdir(srctree)
- filterout = ['git.indirectionsymlink']
- dirlist = [x for x in dirlist if x not in filterout]
+ dirlist = scriptutils.filter_src_subdirs(srctree)
logger.debug('Directory listing (excluding filtered out):\n %s' % '\n '.join(dirlist))
if len(dirlist) == 1:
singleitem = os.path.join(srctree, dirlist[0])
@@ -538,10 +561,9 @@ def create_recipe(args):
# Remove HEAD reference point and drop remote prefix
get_branch = [x.split('/', 1)[1] for x in get_branch if not x.startswith('origin/HEAD')]
if 'master' in get_branch:
- # If it is master, we do not need to append 'branch=master' as this is default.
# Even with the case where get_branch has multiple objects, if 'master' is one
# of them, we should default take from 'master'
- srcbranch = ''
+ srcbranch = 'master'
elif len(get_branch) == 1:
# If 'master' isn't in get_branch and get_branch contains only ONE object, then store result into 'srcbranch'
srcbranch = get_branch[0]
@@ -554,8 +576,8 @@ def create_recipe(args):
# Since we might have a value in srcbranch, we need to
# recontruct the srcuri to include 'branch' in params.
scheme, network, path, user, passwd, params = bb.fetch2.decodeurl(srcuri)
- if srcbranch:
- params['branch'] = srcbranch
+ if scheme in ['git', 'gitsm']:
+ params['branch'] = srcbranch or 'master'
if storeTagName and scheme in ['git', 'gitsm']:
# Check srcrev using tag and check validity of the tag
@@ -614,7 +636,7 @@ def create_recipe(args):
splitline = line.split()
if len(splitline) > 1:
if splitline[0] == 'origin' and scriptutils.is_src_url(splitline[1]):
- srcuri = reformat_git_uri(splitline[1])
+ srcuri = reformat_git_uri(splitline[1]) + ';branch=master'
srcsubdir = 'git'
break
@@ -647,8 +669,6 @@ def create_recipe(args):
# We'll come back and replace this later in handle_license_vars()
lines_before.append('##LICENSE_PLACEHOLDER##')
- handled = []
- classes = []
# FIXME This is kind of a hack, we probably ought to be using bitbake to do this
pn = None
@@ -686,8 +706,10 @@ def create_recipe(args):
if not srcuri:
lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
lines_before.append('SRC_URI = "%s"' % srcuri)
+ shown_checksums = ["%ssum" % s for s in bb.fetch2.SHOWN_CHECKSUM_LIST]
for key, value in sorted(checksums.items()):
- lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
+ if key in shown_checksums:
+ lines_before.append('SRC_URI[%s] = "%s"' % (key, value))
if srcuri and supports_srcrev(srcuri):
lines_before.append('')
lines_before.append('# Modify these as desired')
@@ -699,12 +721,12 @@ def create_recipe(args):
srcpvprefix = 'svnr'
else:
srcpvprefix = scheme
- lines_before.append('PV = "%s+%s${SRCPV}"' % (realpv or '1.0', srcpvprefix))
+ lines_before.append('PV = "%s+%s"' % (realpv or '1.0', srcpvprefix))
pv_srcpv = True
if not args.autorev and srcrev == '${AUTOREV}':
if os.path.exists(os.path.join(srctree, '.git')):
(stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srctree)
- srcrev = stdout.rstrip()
+ srcrev = stdout.rstrip()
lines_before.append('SRCREV = "%s"' % srcrev)
if args.provides:
lines_before.append('PROVIDES = "%s"' % args.provides)
@@ -721,32 +743,11 @@ def create_recipe(args):
lines_after.append('')
if args.binary:
- lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
+ lines_after.append('INSANE_SKIP:${PN} += "already-stripped"')
lines_after.append('')
- if args.fetch_dev:
- extravalues['fetchdev'] = True
- else:
- extravalues['fetchdev'] = None
-
- # Find all plugins that want to register handlers
- logger.debug('Loading recipe handlers')
- raw_handlers = []
- for plugin in plugins:
- if hasattr(plugin, 'register_recipe_handlers'):
- plugin.register_recipe_handlers(raw_handlers)
- # Sort handlers by priority
- handlers = []
- for i, handler in enumerate(raw_handlers):
- if isinstance(handler, tuple):
- handlers.append((handler[0], handler[1], i))
- else:
- handlers.append((handler, 0, i))
- handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
- for handler, priority, _ in handlers:
- logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
- setattr(handler, '_devtool', args.devtool)
- handlers = [item[0] for item in handlers]
+ if args.npm_dev:
+ extravalues['NPM_INSTALL_DEV'] = 1
# Apply the handlers
if args.binary:
@@ -756,6 +757,10 @@ def create_recipe(args):
for handler in handlers:
handler.process(srctree_use, classes, lines_before, lines_after, handled, extravalues)
+ # native and nativesdk classes are special and must be inherited last
+ # If present, put them at the end of the classes list
+ classes.sort(key=lambda c: c in ("native", "nativesdk"))
+
extrafiles = extravalues.pop('extrafiles', {})
extra_pn = extravalues.pop('PN', None)
extra_pv = extravalues.pop('PV', None)
@@ -843,7 +848,7 @@ def create_recipe(args):
elif line.startswith('PV = '):
if realpv:
# Replace the first part of the PV value
- line = re.sub('"[^+]*\+', '"%s+' % realpv, line)
+ line = re.sub(r'"[^+]*\+', '"%s+' % realpv, line)
lines_before.append(line)
if args.also_native:
@@ -880,8 +885,10 @@ def create_recipe(args):
outlines.append('')
outlines.extend(lines_after)
+ outlines = [ line.rstrip('\n') +"\n" for line in outlines]
+
if extravalues:
- _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=False)
+ _, outlines = oe.recipeutils.patch_recipe_lines(outlines, extravalues, trailing_newline=True)
if args.extract_to:
scriptutils.git_convert_standalone_clone(srctree)
@@ -897,7 +904,7 @@ def create_recipe(args):
log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
if outfile == '-':
- sys.stdout.write('\n'.join(outlines) + '\n')
+ sys.stdout.write(''.join(outlines) + '\n')
else:
with open(outfile, 'w') as f:
lastline = None
@@ -905,9 +912,10 @@ def create_recipe(args):
if not lastline and not line:
# Skip extra blank lines
continue
- f.write('%s\n' % line)
+ f.write('%s' % line)
lastline = line
log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
+ tinfoil.modified_files()
if tempsrc:
if args.keep_temp:
@@ -930,6 +938,22 @@ def split_value(value):
else:
return value
+def fixup_license(value):
+ # Ensure licenses with OR starts and ends with brackets
+ if '|' in value:
+ return '(' + value + ')'
+ return value
+
+def tidy_licenses(value):
+ """Flat, split and sort licenses"""
+ from oe.license import flattened_licenses
+ def _choose(a, b):
+ str_a, str_b = sorted((" & ".join(a), " & ".join(b)), key=str.casefold)
+ return ["(%s | %s)" % (str_a, str_b)]
+ if not isinstance(value, str):
+ value = " & ".join(value)
+ return sorted(list(set(flattened_licenses(value, _choose))), key=str.casefold)
+
def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lichandled = [x for x in handled if x[0] == 'license']
if lichandled:
@@ -943,10 +967,13 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines = []
if licvalues:
for licvalue in licvalues:
- if not licvalue[0] in licenses:
- licenses.append(licvalue[0])
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
lic_files_chksum.append('file://%s;md5=%s' % (licvalue[1], licvalue[2]))
- if licvalue[0] == 'Unknown':
+ if license == 'Unknown':
lic_unknown.append(licvalue[1])
if lic_unknown:
lines.append('#')
@@ -955,9 +982,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
for licfile in lic_unknown:
lines.append('# %s' % licfile)
- extra_license = split_value(extravalues.pop('LICENSE', []))
- if '&' in extra_license:
- extra_license.remove('&')
+ extra_license = tidy_licenses(extravalues.pop('LICENSE', ''))
if extra_license:
if licenses == ['Unknown']:
licenses = extra_license
@@ -998,7 +1023,7 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
lines.append('# instead of &. If there is any doubt, check the accompanying documentation')
lines.append('# to determine which situation is applicable.')
- lines.append('LICENSE = "%s"' % ' & '.join(licenses))
+ lines.append('LICENSE = "%s"' % ' & '.join(sorted(licenses, key=str.casefold)))
lines.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n '.join(lic_files_chksum))
lines.append('')
@@ -1015,117 +1040,170 @@ def handle_license_vars(srctree, lines_before, handled, extravalues, d):
handled.append(('license', licvalues))
return licvalues
-def get_license_md5sums(d, static_only=False):
+def get_license_md5sums(d, static_only=False, linenumbers=False):
import bb.utils
+ import csv
md5sums = {}
- if not static_only:
+ if not static_only and not linenumbers:
# Gather md5sums of license files in common license dir
commonlicdir = d.getVar('COMMON_LICENSE_DIR')
for fn in os.listdir(commonlicdir):
md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
md5sums[md5value] = fn
+
# The following were extracted from common values in various recipes
# (double checking the license against the license file itself, not just
# the LICENSE value in the recipe)
- md5sums['94d55d512a9ba36caa9b7df079bae19f'] = 'GPLv2'
- md5sums['b234ee4d69f5fce4486a80fdaf4a4263'] = 'GPLv2'
- md5sums['59530bdf33659b29e73d4adb9f9f6552'] = 'GPLv2'
- md5sums['0636e73ff0215e8d672dc4c32c317bb3'] = 'GPLv2'
- md5sums['eb723b61539feef013de476e68b5c50a'] = 'GPLv2'
- md5sums['751419260aa954499f7abaabaa882bbe'] = 'GPLv2'
- md5sums['393a5ca445f6965873eca0259a17f833'] = 'GPLv2'
- md5sums['12f884d2ae1ff87c09e5b7ccc2c4ca7e'] = 'GPLv2'
- md5sums['8ca43cbc842c2336e835926c2166c28b'] = 'GPLv2'
- md5sums['ebb5c50ab7cab4baeffba14977030c07'] = 'GPLv2'
- md5sums['c93c0550bd3173f4504b2cbd8991e50b'] = 'GPLv2'
- md5sums['9ac2e7cff1ddaf48b6eab6028f23ef88'] = 'GPLv2'
- md5sums['4325afd396febcb659c36b49533135d4'] = 'GPLv2'
- md5sums['18810669f13b87348459e611d31ab760'] = 'GPLv2'
- md5sums['d7810fab7487fb0aad327b76f1be7cd7'] = 'GPLv2' # the Linux kernel's COPYING file
- md5sums['bbb461211a33b134d42ed5ee802b37ff'] = 'LGPLv2.1'
- md5sums['7fbc338309ac38fefcd64b04bb903e34'] = 'LGPLv2.1'
- md5sums['4fbd65380cdd255951079008b364516c'] = 'LGPLv2.1'
- md5sums['2d5025d4aa3495befef8f17206a5b0a1'] = 'LGPLv2.1'
- md5sums['fbc093901857fcd118f065f900982c24'] = 'LGPLv2.1'
- md5sums['a6f89e2100d9b6cdffcea4f398e37343'] = 'LGPLv2.1'
- md5sums['d8045f3b8f929c1cb29a1e3fd737b499'] = 'LGPLv2.1'
- md5sums['fad9b3332be894bab9bc501572864b29'] = 'LGPLv2.1'
- md5sums['3bf50002aefd002f49e7bb854063f7e7'] = 'LGPLv2'
- md5sums['9f604d8a4f8e74f4f5140845a21b6674'] = 'LGPLv2'
- md5sums['5f30f0716dfdd0d91eb439ebec522ec2'] = 'LGPLv2'
- md5sums['55ca817ccb7d5b5b66355690e9abc605'] = 'LGPLv2'
- md5sums['252890d9eee26aab7b432e8b8a616475'] = 'LGPLv2'
- md5sums['3214f080875748938ba060314b4f727d'] = 'LGPLv2'
- md5sums['db979804f025cf55aabec7129cb671ed'] = 'LGPLv2'
- md5sums['d32239bcb673463ab874e80d47fae504'] = 'GPLv3'
- md5sums['f27defe1e96c2e1ecd4e0c9be8967949'] = 'GPLv3'
- md5sums['6a6a8e020838b23406c81b19c1d46df6'] = 'LGPLv3'
- md5sums['3b83ef96387f14655fc854ddc3c6bd57'] = 'Apache-2.0'
- md5sums['385c55653886acac3821999a3ccd17b3'] = 'Artistic-1.0 | GPL-2.0' # some perl modules
- md5sums['54c7042be62e169199200bc6477f04d1'] = 'BSD-3-Clause'
+
+ # Read license md5sums from csv file
+ scripts_path = os.path.dirname(os.path.realpath(__file__))
+ for path in (d.getVar('BBPATH').split(':')
+ + [os.path.join(scripts_path, '..', '..')]):
+ csv_path = os.path.join(path, 'lib', 'recipetool', 'licenses.csv')
+ if os.path.isfile(csv_path):
+ with open(csv_path, newline='') as csv_file:
+ fieldnames = ['md5sum', 'license', 'beginline', 'endline', 'md5']
+ reader = csv.DictReader(csv_file, delimiter=',', fieldnames=fieldnames)
+ for row in reader:
+ if linenumbers:
+ md5sums[row['md5sum']] = (
+ row['license'], row['beginline'], row['endline'], row['md5'])
+ else:
+ md5sums[row['md5sum']] = row['license']
+
return md5sums
-def crunch_license(licfile):
+def crunch_known_licenses(d):
'''
- Remove non-material text from a license file and then check
- its md5sum against a known list. This works well for licenses
- which contain a copyright statement, but is also a useful way
- to handle people's insistence upon reformatting the license text
- slightly (with no material difference to the text of the
- license).
+ Calculate the MD5 checksums for the crunched versions of all common
+ licenses. Also add additional known checksums.
'''
+
+ crunched_md5sums = {}
- import oe.utils
-
- # Note: these are carefully constructed!
- license_title_re = re.compile('^\(?(#+ *)?(The )?.{1,10} [Ll]icen[sc]e( \(.{1,10}\))?\)?:?$')
- license_statement_re = re.compile('^(This (project|software) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
- copyright_re = re.compile('^(#+)? *Copyright .*$')
+ # common licenses
+ crunched_md5sums['ad4e9d34a2e966dfe9837f18de03266d'] = 'GFDL-1.1-only'
+ crunched_md5sums['d014fb11a34eb67dc717fdcfc97e60ed'] = 'GFDL-1.2-only'
+ crunched_md5sums['e020ca655b06c112def28e597ab844f1'] = 'GFDL-1.3-only'
- crunched_md5sums = {}
# The following two were gleaned from the "forever" npm package
crunched_md5sums['0a97f8e4cbaf889d6fa51f84b89a79f6'] = 'ISC'
- crunched_md5sums['eecf6429523cbc9693547cf2db790b5c'] = 'MIT'
- # https://github.com/vasi/pixz/blob/master/LICENSE
- crunched_md5sums['2f03392b40bbe663597b5bd3cc5ebdb9'] = 'BSD-2-Clause'
# https://github.com/waffle-gl/waffle/blob/master/LICENSE.txt
- crunched_md5sums['e72e5dfef0b1a4ca8a3d26a60587db66'] = 'BSD-2-Clause'
+ crunched_md5sums['50fab24ce589d69af8964fdbfe414c60'] = 'BSD-2-Clause'
# https://github.com/spigwitmer/fakeds1963s/blob/master/LICENSE
- crunched_md5sums['8be76ac6d191671f347ee4916baa637e'] = 'GPLv2'
- # https://github.com/datto/dattobd/blob/master/COPYING
- # http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/docs/GPLv2.TXT
- crunched_md5sums['1d65c5ad4bf6489f85f4812bf08ae73d'] = 'GPLv2'
+ crunched_md5sums['88a4355858a1433fea99fae34a44da88'] = 'GPL-2.0-only'
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
- # http://git.neil.brown.name/?p=mdadm.git;a=blob;f=COPYING;h=d159169d1050894d3ea3b98e1c965c4058208fe1;hb=HEAD
- crunched_md5sums['fb530f66a7a89ce920f0e912b5b66d4b'] = 'GPLv2'
- # https://github.com/gkos/nrf24/blob/master/COPYING
- crunched_md5sums['7b6aaa4daeafdfa6ed5443fd2684581b'] = 'GPLv2'
- # https://github.com/josch09/resetusb/blob/master/COPYING
- crunched_md5sums['8b8ac1d631a4d220342e83bcf1a1fbc3'] = 'GPLv3'
+ crunched_md5sums['063b5c3ebb5f3aa4c85a2ed18a31fbe7'] = 'GPL-2.0-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv2.1
- crunched_md5sums['2ea316ed973ae176e502e2297b574bb3'] = 'LGPLv2.1'
+ crunched_md5sums['7f5202f4d44ed15dcd4915f5210417d8'] = 'LGPL-2.1-only'
# unixODBC-2.3.4 COPYING
- crunched_md5sums['1daebd9491d1e8426900b4fa5a422814'] = 'LGPLv2.1'
+ crunched_md5sums['3debde09238a8c8e1f6a847e1ec9055b'] = 'LGPL-2.1-only'
# https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
- crunched_md5sums['2ebfb3bb49b9a48a075cc1425e7f4129'] = 'LGPLv3'
+ crunched_md5sums['f90c613c51aa35da4d79dd55fc724ceb'] = 'LGPL-3.0-only'
# https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/epl-v10
crunched_md5sums['efe2cb9a35826992b9df68224e3c2628'] = 'EPL-1.0'
- # https://raw.githubusercontent.com/eclipse/mosquitto/v1.4.14/edl-v10
- crunched_md5sums['0a9c78c0a398d1bbce4a166757d60387'] = 'EDL-1.0'
+
+ # https://raw.githubusercontent.com/jquery/esprima/3.1.3/LICENSE.BSD
+ crunched_md5sums['80fa7b56a28e8c902e6af194003220a5'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/npm/npm-install-checks/master/LICENSE
+ crunched_md5sums['e659f77bfd9002659e112d0d3d59b2c1'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/silverwind/default-gateway/4.2.0/LICENSE
+ crunched_md5sums['4c641f2d995c47f5cb08bdb4b5b6ea05'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/tad-lispy/node-damerau-levenshtein/v1.0.5/LICENSE
+ crunched_md5sums['2b8c039b2b9a25f0feb4410c4542d346'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/terser/terser/v3.17.0/LICENSE
+ crunched_md5sums['8bd23871802951c9ad63855151204c2c'] = 'BSD-2-Clause'
+ # https://raw.githubusercontent.com/alexei/sprintf.js/1.0.3/LICENSE
+ crunched_md5sums['008c22318c8ea65928bf730ddd0273e3'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/Caligatio/jsSHA/v3.2.0/LICENSE
+ crunched_md5sums['0e46634a01bfef056892949acaea85b1'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/d3/d3-path/v1.0.9/LICENSE
+ crunched_md5sums['b5f72aef53d3b2b432702c30b0215666'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/feross/ieee754/v1.1.13/LICENSE
+ crunched_md5sums['a39327c997c20da0937955192d86232d'] = 'BSD-3-Clause'
+ # https://raw.githubusercontent.com/joyent/node-extsprintf/v1.3.0/LICENSE
+ crunched_md5sums['721f23a96ff4161ca3a5f071bbe18108'] = 'MIT'
+ # https://raw.githubusercontent.com/pvorb/clone/v0.2.0/LICENSE
+ crunched_md5sums['b376d29a53c9573006b9970709231431'] = 'MIT'
+ # https://raw.githubusercontent.com/andris9/encoding/v0.1.12/LICENSE
+ crunched_md5sums['85d8a977ee9d7c5ab4ac03c9b95431c4'] = 'MIT-0'
+ # https://raw.githubusercontent.com/faye/websocket-driver-node/0.7.3/LICENSE.md
+ crunched_md5sums['b66384e7137e41a9b1904ef4d39703b6'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/less/less.js/v4.1.1/LICENSE
+ crunched_md5sums['b27575459e02221ccef97ec0bfd457ae'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/microsoft/TypeScript/v3.5.3/LICENSE.txt
+ crunched_md5sums['a54a1a6a39e7f9dbb4a23a42f5c7fd1c'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/request/request/v2.87.0/LICENSE
+ crunched_md5sums['1034431802e57486b393d00c5d262b8a'] = 'Apache-2.0'
+ # https://raw.githubusercontent.com/dchest/tweetnacl-js/v0.14.5/LICENSE
+ crunched_md5sums['75605e6bdd564791ab698fca65c94a4f'] = 'Unlicense'
+ # https://raw.githubusercontent.com/stackgl/gl-mat3/v2.0.0/LICENSE.md
+ crunched_md5sums['75512892d6f59dddb6d1c7e191957e9c'] = 'Zlib'
+
+ commonlicdir = d.getVar('COMMON_LICENSE_DIR')
+ for fn in sorted(os.listdir(commonlicdir)):
+ md5value, lictext = crunch_license(os.path.join(commonlicdir, fn))
+ if md5value not in crunched_md5sums:
+ crunched_md5sums[md5value] = fn
+ elif fn != crunched_md5sums[md5value]:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s' rather than '%s'" % (md5value, crunched_md5sums[md5value], fn))
+ else:
+ bb.debug(2, "crunched_md5sums['%s'] is already set to '%s'" % (md5value, crunched_md5sums[md5value]))
+
+ return crunched_md5sums
+
+def crunch_license(licfile):
+ '''
+ Remove non-material text from a license file and then calculate its
+ md5sum. This works well for licenses that contain a copyright statement,
+ but is also a useful way to handle people's insistence upon reformatting
+ the license text slightly (with no material difference to the text of the
+ license).
+ '''
+
+ import oe.utils
+
+ # Note: these are carefully constructed!
+ license_title_re = re.compile(r'^#*\(? *(This is )?([Tt]he )?.{0,15} ?[Ll]icen[sc]e( \(.{1,10}\))?\)?[:\.]? ?#*$')
+ license_statement_re = re.compile(r'^((This (project|software)|.{1,10}) is( free software)? (released|licen[sc]ed)|(Released|Licen[cs]ed)) under the .{1,10} [Ll]icen[sc]e:?$')
+ copyright_re = re.compile(r'^ *[#\*]* *(Modified work |MIT LICENSED )?Copyright ?(\([cC]\))? .*$')
+ disclaimer_re = re.compile(r'^ *\*? ?All [Rr]ights [Rr]eserved\.$')
+ email_re = re.compile(r'^.*<[\w\.-]*@[\w\.\-]*>$')
+ header_re = re.compile(r'^(\/\**!?)? ?[\-=\*]* ?(\*\/)?$')
+ tag_re = re.compile(r'^ *@?\(?([Ll]icense|MIT)\)?$')
+ url_re = re.compile(r'^ *[#\*]* *https?:\/\/[\w\.\/\-]+$')
+
lictext = []
with open(licfile, 'r', errors='surrogateescape') as f:
for line in f:
# Drop opening statements
if copyright_re.match(line):
continue
+ elif disclaimer_re.match(line):
+ continue
+ elif email_re.match(line):
+ continue
+ elif header_re.match(line):
+ continue
+ elif tag_re.match(line):
+ continue
+ elif url_re.match(line):
+ continue
elif license_title_re.match(line):
continue
elif license_statement_re.match(line):
continue
- # Squash spaces, and replace smart quotes, double quotes
- # and backticks with single quotes
+ # Strip comment symbols
+ line = line.replace('*', '') \
+ .replace('#', '')
+ # Unify spelling
+ line = line.replace('sub-license', 'sublicense')
+ # Squash spaces
line = oe.utils.squashspaces(line.strip())
+ # Replace smart quotes, double quotes and backticks with single quotes
line = line.replace(u"\u2018", "'").replace(u"\u2019", "'").replace(u"\u201c","'").replace(u"\u201d", "'").replace('"', '\'').replace('`', '\'')
+ # Unify brackets
+ line = line.replace("{", "[").replace("}", "]")
if line:
lictext.append(line)
@@ -1136,31 +1214,40 @@ def crunch_license(licfile):
except UnicodeEncodeError:
md5val = None
lictext = ''
- license = crunched_md5sums.get(md5val, None)
- return license, md5val, lictext
+ return md5val, lictext
def guess_license(srctree, d):
import bb
md5sums = get_license_md5sums(d)
+ crunched_md5sums = crunch_known_licenses(d)
+
licenses = []
licspecs = ['*LICEN[CS]E*', 'COPYING*', '*[Ll]icense*', 'LEGAL*', '[Ll]egal*', '*GPL*', 'README.lic*', 'COPYRIGHT*', '[Cc]opyright*', 'e[dp]l-v10']
+ skip_extensions = (".html", ".js", ".json", ".svg", ".ts", ".go")
licfiles = []
for root, dirs, files in os.walk(srctree):
for fn in files:
+ if fn.endswith(skip_extensions):
+ continue
for spec in licspecs:
if fnmatch.fnmatch(fn, spec):
fullpath = os.path.join(root, fn)
if not fullpath in licfiles:
licfiles.append(fullpath)
- for licfile in licfiles:
+ for licfile in sorted(licfiles):
md5value = bb.utils.md5_file(licfile)
license = md5sums.get(md5value, None)
if not license:
- license, crunched_md5, lictext = crunch_license(licfile)
- if not license:
+ crunched_md5, lictext = crunch_license(licfile)
+ license = crunched_md5sums.get(crunched_md5, None)
+ if lictext and not license:
license = 'Unknown'
- licenses.append((license, os.path.relpath(licfile, srctree), md5value))
+ logger.info("Please add the following line for '%s' to a 'lib/recipetool/licenses.csv' " \
+ "and replace `Unknown` with the license:\n" \
+ "%s,Unknown" % (os.path.relpath(licfile, srctree), md5value))
+ if license:
+ licenses.append((license, os.path.relpath(licfile, srctree), md5value))
# FIXME should we grab at least one source file with a license header and add that too?
@@ -1174,6 +1261,7 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
"""
pkglicenses = {pn: []}
for license, licpath, _ in licvalues:
+ license = fixup_license(license)
for pkgname, pkgpath in packages.items():
if licpath.startswith(pkgpath + '/'):
if pkgname in pkglicenses:
@@ -1186,11 +1274,14 @@ def split_pkg_licenses(licvalues, packages, outlines, fallback_licenses=None, pn
pkglicenses[pn].append(license)
outlicenses = {}
for pkgname in packages:
- license = ' '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
- if license == 'Unknown' and pkgname in fallback_licenses:
+ # Assume AND operator between license files
+ license = ' & '.join(list(set(pkglicenses.get(pkgname, ['Unknown'])))) or 'Unknown'
+ if license == 'Unknown' and fallback_licenses and pkgname in fallback_licenses:
license = fallback_licenses[pkgname]
- outlines.append('LICENSE_%s = "%s"' % (pkgname, license))
- outlicenses[pkgname] = license.split()
+ licenses = tidy_licenses(license)
+ license = ' & '.join(licenses)
+ outlines.append('LICENSE:%s = "%s"' % (pkgname, license))
+ outlicenses[pkgname] = licenses
return outlicenses
def read_pkgconfig_provides(d):
@@ -1322,7 +1413,8 @@ def register_commands(subparsers):
group.add_argument('-S', '--srcrev', help='Source revision to fetch if fetching from an SCM such as git (default latest)')
parser_create.add_argument('-B', '--srcbranch', help='Branch in source repository if fetching from an SCM such as git (default master)')
parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
- parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--npm-dev', action="store_true", help='For npm, also fetch devDependencies')
+ parser_create.add_argument('--no-pypi', action="store_true", help='Do not inherit pypi class')
parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
parser_create.add_argument('--mirrors', action="store_true", help='Enable PREMIRRORS and MIRRORS for source tree fetching (disabled by default).')
parser_create.set_defaults(func=create_recipe)
diff --git a/scripts/lib/recipetool/create_buildsys.py b/scripts/lib/recipetool/create_buildsys.py
index 4743c740cf..ec9d510e23 100644
--- a/scripts/lib/recipetool/create_buildsys.py
+++ b/scripts/lib/recipetool/create_buildsys.py
@@ -2,22 +2,12 @@
#
# Copyright (C) 2014-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+import os
import re
import logging
-import glob
from recipetool.create import RecipeHandler, validate_pv
logger = logging.getLogger('recipetool')
@@ -147,15 +137,15 @@ class CmakeRecipeHandler(RecipeHandler):
deps = []
unmappedpkgs = []
- proj_re = re.compile('project\s*\(([^)]*)\)', re.IGNORECASE)
- pkgcm_re = re.compile('pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
- pkgsm_re = re.compile('pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
- findpackage_re = re.compile('find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
- findlibrary_re = re.compile('find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
- checklib_re = re.compile('check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
- include_re = re.compile('include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
- subdir_re = re.compile('add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
- dep_re = re.compile('([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
+ proj_re = re.compile(r'project\s*\(([^)]*)\)', re.IGNORECASE)
+ pkgcm_re = re.compile(r'pkg_check_modules\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?\s+([^)\s]+)\s*\)', re.IGNORECASE)
+ pkgsm_re = re.compile(r'pkg_search_module\s*\(\s*[a-zA-Z0-9-_]+\s*(REQUIRED)?((\s+[^)\s]+)+)\s*\)', re.IGNORECASE)
+ findpackage_re = re.compile(r'find_package\s*\(\s*([a-zA-Z0-9-_]+)\s*.*', re.IGNORECASE)
+ findlibrary_re = re.compile(r'find_library\s*\(\s*[a-zA-Z0-9-_]+\s*(NAMES\s+)?([a-zA-Z0-9-_ ]+)\s*.*')
+ checklib_re = re.compile(r'check_library_exists\s*\(\s*([^\s)]+)\s*.*', re.IGNORECASE)
+ include_re = re.compile(r'include\s*\(\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ subdir_re = re.compile(r'add_subdirectory\s*\(\s*([^)\s]*)\s*([^)\s]*)\s*\)', re.IGNORECASE)
+ dep_re = re.compile(r'([^ ><=]+)( *[<>=]+ *[^ ><=]+)?')
def find_cmake_package(pkg):
RecipeHandler.load_devel_filemap(tinfoil.config_data)
@@ -236,9 +226,9 @@ class CmakeRecipeHandler(RecipeHandler):
elif pkg == 'PkgConfig':
inherits.append('pkgconfig')
elif pkg == 'PythonInterp':
- inherits.append('pythonnative')
+ inherits.append('python3native')
elif pkg == 'PythonLibs':
- inherits.append('python-dir')
+ inherits.append('python3-dir')
else:
# Try to map via looking at installed CMake packages in pkgdata
dep = find_cmake_package(pkg)
@@ -427,22 +417,22 @@ class AutotoolsRecipeHandler(RecipeHandler):
}
progclassmap = {'gconftool-2': 'gconf',
'pkg-config': 'pkgconfig',
- 'python': 'pythonnative',
+ 'python': 'python3native',
'python3': 'python3native',
'perl': 'perlnative',
'makeinfo': 'texinfo',
}
- pkg_re = re.compile('PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- pkgce_re = re.compile('PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
- lib_re = re.compile('AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
- libx_re = re.compile('AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
- progs_re = re.compile('_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
- dep_re = re.compile('([^ ><=]+)( [<>=]+ [^ ><=]+)?')
- ac_init_re = re.compile('AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
- am_init_re = re.compile('AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
- define_re = re.compile('\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
- version_re = re.compile('([0-9.]+)')
+ pkg_re = re.compile(r'PKG_CHECK_MODULES\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ pkgce_re = re.compile(r'PKG_CHECK_EXISTS\(\s*\[?([^,\]]*)\]?[),].*')
+ lib_re = re.compile(r'AC_CHECK_LIB\(\s*\[?([^,\]]*)\]?,.*')
+ libx_re = re.compile(r'AX_CHECK_LIBRARY\(\s*\[?[^,\]]*\]?,\s*\[?([^,\]]*)\]?,\s*\[?([a-zA-Z0-9-]*)\]?,.*')
+ progs_re = re.compile(r'_PROGS?\(\s*\[?[a-zA-Z0-9_]*\]?,\s*\[?([^,\]]*)\]?[),].*')
+ dep_re = re.compile(r'([^ ><=]+)( [<>=]+ [^ ><=]+)?')
+ ac_init_re = re.compile(r'AC_INIT\(\s*([^,]+),\s*([^,]+)[,)].*')
+ am_init_re = re.compile(r'AM_INIT_AUTOMAKE\(\s*([^,]+),\s*([^,]+)[,)].*')
+ define_re = re.compile(r'\s*(m4_)?define\(\s*([^,]+),\s*([^,]+)\)')
+ version_re = re.compile(r'([0-9.]+)')
defines = {}
def subst_defines(value):
@@ -555,7 +545,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
deps.append('zlib')
elif keyword in ('AX_CHECK_OPENSSL', 'AX_LIB_CRYPTO'):
deps.append('openssl')
- elif keyword == 'AX_LIB_CURL':
+ elif keyword in ('AX_LIB_CURL', 'LIBCURL_CHECK_CONFIG'):
deps.append('curl')
elif keyword == 'AX_LIB_BEECRYPT':
deps.append('beecrypt')
@@ -576,16 +566,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
elif keyword == 'AX_PROG_XSLTPROC':
deps.append('libxslt-native')
elif keyword in ['AC_PYTHON_DEVEL', 'AX_PYTHON_DEVEL', 'AM_PATH_PYTHON']:
- pythonclass = 'pythonnative'
- res = version_re.search(value)
- if res:
- if res.group(1).startswith('3'):
- pythonclass = 'python3native'
- # Avoid replacing python3native with pythonnative
- if not pythonclass in inherits and not 'python3native' in inherits:
- if 'pythonnative' in inherits:
- inherits.remove('pythonnative')
- inherits.append(pythonclass)
+ pythonclass = 'python3native'
elif keyword == 'AX_WITH_CURSES':
deps.append('ncurses')
elif keyword == 'AX_PATH_BDB':
@@ -643,6 +624,7 @@ class AutotoolsRecipeHandler(RecipeHandler):
'AX_CHECK_OPENSSL',
'AX_LIB_CRYPTO',
'AX_LIB_CURL',
+ 'LIBCURL_CHECK_CONFIG',
'AX_LIB_BEECRYPT',
'AX_LIB_EXPAT',
'AX_LIB_GCRYPT',
diff --git a/scripts/lib/recipetool/create_buildsys_python.py b/scripts/lib/recipetool/create_buildsys_python.py
index 5bd2aa337c..a807dafae5 100644
--- a/scripts/lib/recipetool/create_buildsys_python.py
+++ b/scripts/lib/recipetool/create_buildsys_python.py
@@ -2,25 +2,15 @@
#
# Copyright (C) 2015 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import ast
import codecs
import collections
-import distutils.command.build_py
+import setuptools.command.build_py
import email
-import imp
+import importlib
import glob
import itertools
import logging
@@ -28,7 +18,11 @@ import os
import re
import sys
import subprocess
+import json
+import urllib.request
from recipetool.create import RecipeHandler
+from urllib.parse import urldefrag
+from recipetool.create import determine_from_url
logger = logging.getLogger('recipetool')
@@ -41,69 +35,14 @@ def tinfoil_init(instance):
class PythonRecipeHandler(RecipeHandler):
- base_pkgdeps = ['python-core']
- excluded_pkgdeps = ['python-dbg']
- # os.path is provided by python-core
+ base_pkgdeps = ['python3-core']
+ excluded_pkgdeps = ['python3-dbg']
+ # os.path is provided by python3-core
assume_provided = ['builtins', 'os.path']
- # Assumes that the host python builtin_module_names is sane for target too
+ # Assumes that the host python3 builtin_module_names is sane for target too
assume_provided = assume_provided + list(sys.builtin_module_names)
+ excluded_fields = []
- bbvar_map = {
- 'Name': 'PN',
- 'Version': 'PV',
- 'Home-page': 'HOMEPAGE',
- 'Summary': 'SUMMARY',
- 'Description': 'DESCRIPTION',
- 'License': 'LICENSE',
- 'Requires': 'RDEPENDS_${PN}',
- 'Provides': 'RPROVIDES_${PN}',
- 'Obsoletes': 'RREPLACES_${PN}',
- }
- # PN/PV are already set by recipetool core & desc can be extremely long
- excluded_fields = [
- 'Description',
- ]
- setup_parse_map = {
- 'Url': 'Home-page',
- 'Classifiers': 'Classifier',
- 'Description': 'Summary',
- }
- setuparg_map = {
- 'Home-page': 'url',
- 'Classifier': 'classifiers',
- 'Summary': 'description',
- 'Description': 'long-description',
- }
- # Values which are lists, used by the setup.py argument based metadata
- # extraction method, to determine how to process the setup.py output.
- setuparg_list_fields = [
- 'Classifier',
- 'Requires',
- 'Provides',
- 'Obsoletes',
- 'Platform',
- 'Supported-Platform',
- ]
- setuparg_multi_line_values = ['Description']
- replacements = [
- ('License', r' +$', ''),
- ('License', r'^ +', ''),
- ('License', r' ', '-'),
- ('License', r'^GNU-', ''),
- ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
- ('License', r'^UNKNOWN$', ''),
-
- # Remove currently unhandled version numbers from these variables
- ('Requires', r' *\([^)]*\)', ''),
- ('Provides', r' *\([^)]*\)', ''),
- ('Obsoletes', r' *\([^)]*\)', ''),
- ('Install-requires', r'^([^><= ]+).*', r'\1'),
- ('Extras-require', r'^([^><= ]+).*', r'\1'),
- ('Tests-require', r'^([^><= ]+).*', r'\1'),
-
- # Remove unhandled dependency on particular features (e.g. foo[PDF])
- ('Install-requires', r'\[[^\]]+\]$', ''),
- ]
classifier_license_map = {
'License :: OSI Approved :: Academic Free License (AFL)': 'AFL',
@@ -111,30 +50,39 @@ class PythonRecipeHandler(RecipeHandler):
'License :: OSI Approved :: Apple Public Source License': 'APSL',
'License :: OSI Approved :: Artistic License': 'Artistic',
'License :: OSI Approved :: Attribution Assurance License': 'AAL',
- 'License :: OSI Approved :: BSD License': 'BSD',
+ 'License :: OSI Approved :: BSD License': 'BSD-3-Clause',
+ 'License :: OSI Approved :: Boost Software License 1.0 (BSL-1.0)': 'BSL-1.0',
+ 'License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)': 'CECILL-2.1',
+ 'License :: OSI Approved :: Common Development and Distribution License 1.0 (CDDL-1.0)': 'CDDL-1.0',
'License :: OSI Approved :: Common Public License': 'CPL',
+ 'License :: OSI Approved :: Eclipse Public License 1.0 (EPL-1.0)': 'EPL-1.0',
+ 'License :: OSI Approved :: Eclipse Public License 2.0 (EPL-2.0)': 'EPL-2.0',
'License :: OSI Approved :: Eiffel Forum License': 'EFL',
'License :: OSI Approved :: European Union Public Licence 1.0 (EUPL 1.0)': 'EUPL-1.0',
'License :: OSI Approved :: European Union Public Licence 1.1 (EUPL 1.1)': 'EUPL-1.1',
- 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0+',
- 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0',
+ 'License :: OSI Approved :: European Union Public Licence 1.2 (EUPL 1.2)': 'EUPL-1.2',
+ 'License :: OSI Approved :: GNU Affero General Public License v3': 'AGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)': 'AGPL-3.0-or-later',
'License :: OSI Approved :: GNU Free Documentation License (FDL)': 'GFDL',
'License :: OSI Approved :: GNU General Public License (GPL)': 'GPL',
- 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0',
- 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0+',
- 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0',
- 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0+',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0',
- 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0+',
+ 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)': 'GPL-2.0-only',
+ 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)': 'GPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)': 'GPL-3.0-only',
+ 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)': 'GPL-3.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)': 'LGPL-2.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)': 'LGPL-2.0-or-later',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)': 'LGPL-3.0-only',
+ 'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)': 'LGPL-3.0-or-later',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)': 'LGPL',
+ 'License :: OSI Approved :: Historical Permission Notice and Disclaimer (HPND)': 'HPND',
'License :: OSI Approved :: IBM Public License': 'IPL',
'License :: OSI Approved :: ISC License (ISCL)': 'ISC',
'License :: OSI Approved :: Intel Open Source License': 'Intel',
'License :: OSI Approved :: Jabber Open Source License': 'Jabber',
'License :: OSI Approved :: MIT License': 'MIT',
+ 'License :: OSI Approved :: MIT No Attribution License (MIT-0)': 'MIT-0',
'License :: OSI Approved :: MITRE Collaborative Virtual Workspace License (CVW)': 'CVWL',
+ 'License :: OSI Approved :: MirOS License (MirOS)': 'MirOS',
'License :: OSI Approved :: Motosoto License': 'Motosoto',
'License :: OSI Approved :: Mozilla Public License 1.0 (MPL)': 'MPL-1.0',
'License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)': 'MPL-1.1',
@@ -142,123 +90,122 @@ class PythonRecipeHandler(RecipeHandler):
'License :: OSI Approved :: Nethack General Public License': 'NGPL',
'License :: OSI Approved :: Nokia Open Source License': 'Nokia',
'License :: OSI Approved :: Open Group Test Suite License': 'OGTSL',
+ 'License :: OSI Approved :: Open Software License 3.0 (OSL-3.0)': 'OSL-3.0',
+ 'License :: OSI Approved :: PostgreSQL License': 'PostgreSQL',
'License :: OSI Approved :: Python License (CNRI Python License)': 'CNRI-Python',
- 'License :: OSI Approved :: Python Software Foundation License': 'PSF',
+ 'License :: OSI Approved :: Python Software Foundation License': 'PSF-2.0',
'License :: OSI Approved :: Qt Public License (QPL)': 'QPL',
'License :: OSI Approved :: Ricoh Source Code Public License': 'RSCPL',
+ 'License :: OSI Approved :: SIL Open Font License 1.1 (OFL-1.1)': 'OFL-1.1',
'License :: OSI Approved :: Sleepycat License': 'Sleepycat',
- 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': '-- Sun Industry Standards Source License (SISSL)',
+ 'License :: OSI Approved :: Sun Industry Standards Source License (SISSL)': 'SISSL',
'License :: OSI Approved :: Sun Public License': 'SPL',
+ 'License :: OSI Approved :: The Unlicense (Unlicense)': 'Unlicense',
+ 'License :: OSI Approved :: Universal Permissive License (UPL)': 'UPL-1.0',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License': 'NCSA',
'License :: OSI Approved :: Vovida Software License 1.0': 'VSL-1.0',
'License :: OSI Approved :: W3C License': 'W3C',
'License :: OSI Approved :: X.Net License': 'Xnet',
'License :: OSI Approved :: Zope Public License': 'ZPL',
'License :: OSI Approved :: zlib/libpng License': 'Zlib',
+ 'License :: Other/Proprietary License': 'Proprietary',
+ 'License :: Public Domain': 'PD',
}
def __init__(self):
pass
- def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
- if 'buildsystem' in handled:
- return False
-
- if not RecipeHandler.checkfiles(srctree, ['setup.py']):
- return
-
- # setup.py is always parsed to get at certain required information, such as
- # distutils vs setuptools
- #
- # If egg info is available, we use it for both its PKG-INFO metadata
- # and for its requires.txt for install_requires.
- # If PKG-INFO is available but no egg info is, we use that for metadata in preference to
- # the parsed setup.py, but use the install_requires info from the
- # parsed setup.py.
-
- setupscript = os.path.join(srctree, 'setup.py')
- try:
- setup_info, uses_setuptools, setup_non_literals, extensions = self.parse_setup_py(setupscript)
- except Exception:
- logger.exception("Failed to parse setup.py")
- setup_info, uses_setuptools, setup_non_literals, extensions = {}, True, [], []
-
- egginfo = glob.glob(os.path.join(srctree, '*.egg-info'))
- if egginfo:
- info = self.get_pkginfo(os.path.join(egginfo[0], 'PKG-INFO'))
- requires_txt = os.path.join(egginfo[0], 'requires.txt')
- if os.path.exists(requires_txt):
- with codecs.open(requires_txt) as f:
- inst_req = []
- extras_req = collections.defaultdict(list)
- current_feature = None
- for line in f.readlines():
- line = line.rstrip()
- if not line:
- continue
-
- if line.startswith('['):
- current_feature = line[1:-1]
- elif current_feature:
- extras_req[current_feature].append(line)
- else:
- inst_req.append(line)
- info['Install-requires'] = inst_req
- info['Extras-require'] = extras_req
- elif RecipeHandler.checkfiles(srctree, ['PKG-INFO']):
- info = self.get_pkginfo(os.path.join(srctree, 'PKG-INFO'))
-
- if setup_info:
- if 'Install-requires' in setup_info:
- info['Install-requires'] = setup_info['Install-requires']
- if 'Extras-require' in setup_info:
- info['Extras-require'] = setup_info['Extras-require']
- else:
- if setup_info:
- info = setup_info
+ def process_url(self, args, classes, handled, extravalues):
+ """
+ Convert any pypi url https://pypi.org/project/<package>/<version> into https://files.pythonhosted.org/packages/source/...
+ which corresponds to the archive location, and add pypi class
+ """
+
+ if 'url' in handled:
+ return None
+
+ fetch_uri = None
+ source = args.source
+ required_version = args.version if args.version else None
+ match = re.match(r'https?://pypi.org/project/([^/]+)(?:/([^/]+))?/?$', urldefrag(source)[0])
+ if match:
+ package = match.group(1)
+ version = match.group(2) if match.group(2) else required_version
+
+ json_url = f"https://pypi.org/pypi/%s/json" % package
+ response = urllib.request.urlopen(json_url)
+ if response.status == 200:
+ data = json.loads(response.read())
+ if not version:
+ # grab latest version
+ version = data["info"]["version"]
+ pypi_package = data["info"]["name"]
+ for release in reversed(data["releases"][version]):
+ if release["packagetype"] == "sdist":
+ fetch_uri = release["url"]
+ break
else:
- info = self.get_setup_args_info(setupscript)
-
- # Grab the license value before applying replacements
- license_str = info.get('License', '').strip()
-
- self.apply_info_replacements(info)
-
- if uses_setuptools:
- classes.append('setuptools')
+ logger.warning("Cannot handle pypi url %s: cannot fetch package information using %s", source, json_url)
+ return None
else:
- classes.append('distutils')
-
- if license_str:
- for i, line in enumerate(lines_before):
- if line.startswith('LICENSE = '):
- lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
- break
-
- if 'Classifier' in info:
- existing_licenses = info.get('License', '')
- licenses = []
- for classifier in info['Classifier']:
- if classifier in self.classifier_license_map:
- license = self.classifier_license_map[classifier]
- if license == 'Apache' and 'Apache-2.0' in existing_licenses:
- license = 'Apache-2.0'
- elif license == 'GPL':
- if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
- license = 'GPL-2.0'
- elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
- license = 'GPL-3.0'
- elif license == 'LGPL':
- if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
- license = 'LGPL-2.1'
- elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
- license = 'LGPL-2.0'
- elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
- license = 'LGPL-3.0'
- licenses.append(license)
-
- if licenses:
- info['License'] = ' & '.join(licenses)
+ match = re.match(r'^https?://files.pythonhosted.org/packages.*/(.*)-.*$', source)
+ if match:
+ fetch_uri = source
+ pypi_package = match.group(1)
+ _, version = determine_from_url(fetch_uri)
+
+ if match and not args.no_pypi:
+ if required_version and version != required_version:
+ raise Exception("Version specified using --version/-V (%s) and version specified in the url (%s) do not match" % (required_version, version))
+ # This is optionnal if BPN looks like "python-<pypi_package>" or "python3-<pypi_package>" (see pypi.bbclass)
+ # but at this point we cannot know because because user can specify the output name of the recipe on the command line
+ extravalues["PYPI_PACKAGE"] = pypi_package
+ # If the tarball extension is not 'tar.gz' (default value in pypi.bblcass) whe should set PYPI_PACKAGE_EXT in the recipe
+ pypi_package_ext = re.match(r'.*%s-%s\.(.*)$' % (pypi_package, version), fetch_uri)
+ if pypi_package_ext:
+ pypi_package_ext = pypi_package_ext.group(1)
+ if pypi_package_ext != "tar.gz":
+ extravalues["PYPI_PACKAGE_EXT"] = pypi_package_ext
+
+ # Pypi class will handle S and SRC_URI variables, so remove them
+ # TODO: allow oe.recipeutils.patch_recipe_lines() to accept regexp so we can simplify the following to:
+ # extravalues['SRC_URI(?:\[.*?\])?'] = None
+ extravalues['S'] = None
+ extravalues['SRC_URI'] = None
+
+ classes.append('pypi')
+
+ handled.append('url')
+ return fetch_uri
+
+ def handle_classifier_license(self, classifiers, existing_licenses=""):
+
+ licenses = []
+ for classifier in classifiers:
+ if classifier in self.classifier_license_map:
+ license = self.classifier_license_map[classifier]
+ if license == 'Apache' and 'Apache-2.0' in existing_licenses:
+ license = 'Apache-2.0'
+ elif license == 'GPL':
+ if 'GPL-2.0' in existing_licenses or 'GPLv2' in existing_licenses:
+ license = 'GPL-2.0'
+ elif 'GPL-3.0' in existing_licenses or 'GPLv3' in existing_licenses:
+ license = 'GPL-3.0'
+ elif license == 'LGPL':
+ if 'LGPL-2.1' in existing_licenses or 'LGPLv2.1' in existing_licenses:
+ license = 'LGPL-2.1'
+ elif 'LGPL-2.0' in existing_licenses or 'LGPLv2' in existing_licenses:
+ license = 'LGPL-2.0'
+ elif 'LGPL-3.0' in existing_licenses or 'LGPLv3' in existing_licenses:
+ license = 'LGPL-3.0'
+ licenses.append(license)
+
+ if licenses:
+ return ' & '.join(licenses)
+
+ return None
+
+ def map_info_to_bbvar(self, info, extravalues):
# Map PKG-INFO & setup.py fields to bitbake variables
for field, values in info.items():
@@ -274,73 +221,214 @@ class PythonRecipeHandler(RecipeHandler):
value = ' '.join(str(v) for v in values if v)
bbvar = self.bbvar_map[field]
+ if bbvar == "PN":
+ # by convention python recipes start with "python3-"
+ if not value.startswith('python'):
+ value = 'python3-' + value
+
if bbvar not in extravalues and value:
extravalues[bbvar] = value
- mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
+ def apply_info_replacements(self, info):
+ if not self.replacements:
+ return
- extras_req = set()
- if 'Extras-require' in info:
- extras_req = info['Extras-require']
- if extras_req:
- lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
- lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
- lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
- lines_after.append('#')
- lines_after.append('# Uncomment this line to enable all the optional features.')
- lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
- for feature, feature_reqs in extras_req.items():
- unmapped_deps.difference_update(feature_reqs)
+ for variable, search, replace in self.replacements:
+ if variable not in info:
+ continue
- feature_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
- lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
+ def replace_value(search, replace, value):
+ if replace is None:
+ if re.search(search, value):
+ return None
+ else:
+ new_value = re.sub(search, replace, value)
+ if value != new_value:
+ return new_value
+ return value
- inst_reqs = set()
- if 'Install-requires' in info:
- if extras_req:
- lines_after.append('')
- inst_reqs = info['Install-requires']
- if inst_reqs:
- unmapped_deps.difference_update(inst_reqs)
+ value = info[variable]
+ if isinstance(value, str):
+ new_value = replace_value(search, replace, value)
+ if new_value is None:
+ del info[variable]
+ elif new_value != value:
+ info[variable] = new_value
+ elif hasattr(value, 'items'):
+ for dkey, dvalue in list(value.items()):
+ new_list = []
+ for pos, a_value in enumerate(dvalue):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
- inst_req_deps = ('python-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
- lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
- lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+ if value != new_list:
+ value[dkey] = new_list
+ else:
+ new_list = []
+ for pos, a_value in enumerate(value):
+ new_value = replace_value(search, replace, a_value)
+ if new_value is not None and new_value != value:
+ new_list.append(new_value)
- if mapped_deps:
- name = info.get('Name')
- if name and name[0] in mapped_deps:
- # Attempt to avoid self-reference
- mapped_deps.remove(name[0])
- mapped_deps -= set(self.excluded_pkgdeps)
- if inst_reqs or extras_req:
- lines_after.append('')
- lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
- lines_after.append('# python sources, and might not be 100% accurate.')
- lines_after.append('RDEPENDS_${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+ if value != new_list:
+ info[variable] = new_list
- unmapped_deps -= set(extensions)
- unmapped_deps -= set(self.assume_provided)
- if unmapped_deps:
- if mapped_deps:
- lines_after.append('')
- lines_after.append('# WARNING: We were unable to map the following python package/module')
- lines_after.append('# dependencies to the bitbake packages which include them:')
- lines_after.extend('# {}'.format(d) for d in sorted(unmapped_deps))
- handled.append('buildsystem')
+ def scan_python_dependencies(self, paths):
+ deps = set()
+ try:
+ dep_output = self.run_command(['pythondeps', '-d'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ for line in dep_output.splitlines():
+ line = line.rstrip()
+ dep, filename = line.split('\t', 1)
+ if filename.endswith('/setup.py'):
+ continue
+ deps.add(dep)
- def get_pkginfo(self, pkginfo_fn):
- msg = email.message_from_file(open(pkginfo_fn, 'r'))
- msginfo = {}
- for field in msg.keys():
- values = msg.get_all(field)
- if len(values) == 1:
- msginfo[field] = values[0]
- else:
- msginfo[field] = values
- return msginfo
+ try:
+ provides_output = self.run_command(['pythondeps', '-p'] + paths)
+ except (OSError, subprocess.CalledProcessError):
+ pass
+ else:
+ provides_lines = (l.rstrip() for l in provides_output.splitlines())
+ provides = set(l for l in provides_lines if l and l != 'setup')
+ deps -= provides
+
+ return deps
+
+ def parse_pkgdata_for_python_packages(self):
+ pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+
+ ldata = tinfoil.config_data.createCopy()
+ bb.parse.handle('classes-recipe/python3-dir.bbclass', ldata, True)
+ python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+
+ dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
+ python_dirs = [python_sitedir + os.sep,
+ os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
+ os.path.dirname(python_sitedir) + os.sep]
+ packages = {}
+ for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
+ files_info = None
+ with open(pkgdatafile, 'r') as f:
+ for line in f.readlines():
+ field, value = line.split(': ', 1)
+ if field.startswith('FILES_INFO'):
+ files_info = ast.literal_eval(value)
+ break
+ else:
+ continue
+
+ for fn in files_info:
+ for suffix in importlib.machinery.all_suffixes():
+ if fn.endswith(suffix):
+ break
+ else:
+ continue
+
+ if fn.startswith(dynload_dir + os.sep):
+ if '/.debug/' in fn:
+ continue
+ base = os.path.basename(fn)
+ provided = base.split('.', 1)[0]
+ packages[provided] = os.path.basename(pkgdatafile)
+ continue
+
+ for python_dir in python_dirs:
+ if fn.startswith(python_dir):
+ relpath = fn[len(python_dir):]
+ relstart, _, relremaining = relpath.partition(os.sep)
+ if relstart.endswith('.egg'):
+ relpath = relremaining
+ base, _ = os.path.splitext(relpath)
+
+ if '/.debug/' in base:
+ continue
+ if os.path.basename(base) == '__init__':
+ base = os.path.dirname(base)
+ base = base.replace(os.sep + os.sep, os.sep)
+ provided = base.replace(os.sep, '.')
+ packages[provided] = os.path.basename(pkgdatafile)
+ return packages
+
+ @classmethod
+ def run_command(cls, cmd, **popenargs):
+ if 'stderr' not in popenargs:
+ popenargs['stderr'] = subprocess.STDOUT
+ try:
+ return subprocess.check_output(cmd, **popenargs).decode('utf-8')
+ except OSError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
+ raise
+ except subprocess.CalledProcessError as exc:
+ logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
+ raise
+
+class PythonSetupPyRecipeHandler(PythonRecipeHandler):
+ bbvar_map = {
+ 'Name': 'PN',
+ 'Version': 'PV',
+ 'Home-page': 'HOMEPAGE',
+ 'Summary': 'SUMMARY',
+ 'Description': 'DESCRIPTION',
+ 'License': 'LICENSE',
+ 'Requires': 'RDEPENDS:${PN}',
+ 'Provides': 'RPROVIDES:${PN}',
+ 'Obsoletes': 'RREPLACES:${PN}',
+ }
+ # PN/PV are already set by recipetool core & desc can be extremely long
+ excluded_fields = [
+ 'Description',
+ ]
+ setup_parse_map = {
+ 'Url': 'Home-page',
+ 'Classifiers': 'Classifier',
+ 'Description': 'Summary',
+ }
+ setuparg_map = {
+ 'Home-page': 'url',
+ 'Classifier': 'classifiers',
+ 'Summary': 'description',
+ 'Description': 'long-description',
+ }
+ # Values which are lists, used by the setup.py argument based metadata
+ # extraction method, to determine how to process the setup.py output.
+ setuparg_list_fields = [
+ 'Classifier',
+ 'Requires',
+ 'Provides',
+ 'Obsoletes',
+ 'Platform',
+ 'Supported-Platform',
+ ]
+ setuparg_multi_line_values = ['Description']
+
+ replacements = [
+ ('License', r' +$', ''),
+ ('License', r'^ +', ''),
+ ('License', r' ', '-'),
+ ('License', r'^GNU-', ''),
+ ('License', r'-[Ll]icen[cs]e(,?-[Vv]ersion)?', ''),
+ ('License', r'^UNKNOWN$', ''),
+
+ # Remove currently unhandled version numbers from these variables
+ ('Requires', r' *\([^)]*\)', ''),
+ ('Provides', r' *\([^)]*\)', ''),
+ ('Obsoletes', r' *\([^)]*\)', ''),
+ ('Install-requires', r'^([^><= ]+).*', r'\1'),
+ ('Extras-require', r'^([^><= ]+).*', r'\1'),
+ ('Tests-require', r'^([^><= ]+).*', r'\1'),
+
+ # Remove unhandled dependency on particular features (e.g. foo[PDF])
+ ('Install-requires', r'\[[^\]]+\]$', ''),
+ ]
+
+ def __init__(self):
+ pass
def parse_setup_py(self, setupscript='./setup.py'):
with codecs.open(setupscript) as f:
@@ -366,7 +454,7 @@ class PythonRecipeHandler(RecipeHandler):
return info, 'setuptools' in imported_modules, non_literals, extensions
def get_setup_args_info(self, setupscript='./setup.py'):
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
info = {}
keys = set(self.bbvar_map.keys())
keys |= set(self.setuparg_list_fields)
@@ -400,7 +488,7 @@ class PythonRecipeHandler(RecipeHandler):
def get_setup_byline(self, fields, setupscript='./setup.py'):
info = {}
- cmd = ['python', setupscript]
+ cmd = ['python3', setupscript]
cmd.extend('--' + self.setuparg_map.get(f, f.lower()) for f in fields)
try:
info_lines = self.run_command(cmd, cwd=os.path.dirname(setupscript)).splitlines()
@@ -416,47 +504,16 @@ class PythonRecipeHandler(RecipeHandler):
info[fields[lineno]] = line
return info
- def apply_info_replacements(self, info):
- for variable, search, replace in self.replacements:
- if variable not in info:
- continue
-
- def replace_value(search, replace, value):
- if replace is None:
- if re.search(search, value):
- return None
- else:
- new_value = re.sub(search, replace, value)
- if value != new_value:
- return new_value
- return value
-
- value = info[variable]
- if isinstance(value, str):
- new_value = replace_value(search, replace, value)
- if new_value is None:
- del info[variable]
- elif new_value != value:
- info[variable] = new_value
- elif hasattr(value, 'items'):
- for dkey, dvalue in list(value.items()):
- new_list = []
- for pos, a_value in enumerate(dvalue):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- value[dkey] = new_list
+ def get_pkginfo(self, pkginfo_fn):
+ msg = email.message_from_file(open(pkginfo_fn, 'r'))
+ msginfo = {}
+ for field in msg.keys():
+ values = msg.get_all(field)
+ if len(values) == 1:
+ msginfo[field] = values[0]
else:
- new_list = []
- for pos, a_value in enumerate(value):
- new_value = replace_value(search, replace, a_value)
- if new_value is not None and new_value != value:
- new_list.append(new_value)
-
- if value != new_list:
- info[variable] = new_list
+ msginfo[field] = values
+ return msginfo
def scan_setup_python_deps(self, srctree, setup_info, setup_non_literals):
if 'Package-dir' in setup_info:
@@ -464,9 +521,13 @@ class PythonRecipeHandler(RecipeHandler):
else:
package_dir = {}
- class PackageDir(distutils.command.build_py.build_py):
+ dist = setuptools.Distribution()
+
+ class PackageDir(setuptools.command.build_py.build_py):
def __init__(self, package_dir):
self.package_dir = package_dir
+ self.dist = dist
+ super().__init__(self.dist)
pd = PackageDir(package_dir)
to_scan = []
@@ -507,99 +568,441 @@ class PythonRecipeHandler(RecipeHandler):
unmapped_deps.add(dep)
return mapped_deps, unmapped_deps
- def scan_python_dependencies(self, paths):
- deps = set()
- try:
- dep_output = self.run_command(['pythondeps', '-d'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+
+ if 'buildsystem' in handled:
+ return False
+
+ logger.debug("Trying setup.py parser")
+
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ['setup.py'])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
else:
- for line in dep_output.splitlines():
- line = line.rstrip()
- dep, filename = line.split('\t', 1)
- if filename.endswith('/setup.py'):
- continue
- deps.add(dep)
+ logger.debug("No setup.py found")
+ return False
+
+ # setup.py is always parsed to get at certain required information, such as
+ # distutils vs setuptools
+ #
+ # If egg info is available, we use it for both its PKG-INFO metadata
+ # and for its requires.txt for install_requires.
+ # If PKG-INFO is available but no egg info is, we use that for metadata in preference to
+ # the parsed setup.py, but use the install_requires info from the
+ # parsed setup.py.
+ setupscript = os.path.join(srctree, 'setup.py')
try:
- provides_output = self.run_command(['pythondeps', '-p'] + paths)
- except (OSError, subprocess.CalledProcessError):
- pass
+ setup_info, uses_setuptools, setup_non_literals, extensions = self.parse_setup_py(setupscript)
+ except Exception:
+ logger.exception("Failed to parse setup.py")
+ setup_info, uses_setuptools, setup_non_literals, extensions = {}, True, [], []
+
+ egginfo = glob.glob(os.path.join(srctree, '*.egg-info'))
+ if egginfo:
+ info = self.get_pkginfo(os.path.join(egginfo[0], 'PKG-INFO'))
+ requires_txt = os.path.join(egginfo[0], 'requires.txt')
+ if os.path.exists(requires_txt):
+ with codecs.open(requires_txt) as f:
+ inst_req = []
+ extras_req = collections.defaultdict(list)
+ current_feature = None
+ for line in f.readlines():
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line.startswith('['):
+ # PACKAGECONFIG must not contain expressions or whitespace
+ line = line.replace(" ", "")
+ line = line.replace(':', "")
+ line = line.replace('.', "-dot-")
+ line = line.replace('"', "")
+ line = line.replace('<', "-smaller-")
+ line = line.replace('>', "-bigger-")
+ line = line.replace('_', "-")
+ line = line.replace('(', "")
+ line = line.replace(')', "")
+ line = line.replace('!', "-not-")
+ line = line.replace('=', "-equals-")
+ current_feature = line[1:-1]
+ elif current_feature:
+ extras_req[current_feature].append(line)
+ else:
+ inst_req.append(line)
+ info['Install-requires'] = inst_req
+ info['Extras-require'] = extras_req
+ elif RecipeHandler.checkfiles(srctree, ['PKG-INFO']):
+ info = self.get_pkginfo(os.path.join(srctree, 'PKG-INFO'))
+
+ if setup_info:
+ if 'Install-requires' in setup_info:
+ info['Install-requires'] = setup_info['Install-requires']
+ if 'Extras-require' in setup_info:
+ info['Extras-require'] = setup_info['Extras-require']
else:
- provides_lines = (l.rstrip() for l in provides_output.splitlines())
- provides = set(l for l in provides_lines if l and l != 'setup')
- deps -= provides
+ if setup_info:
+ info = setup_info
+ else:
+ info = self.get_setup_args_info(setupscript)
- return deps
+ # Grab the license value before applying replacements
+ license_str = info.get('License', '').strip()
- def parse_pkgdata_for_python_packages(self):
- suffixes = [t[0] for t in imp.get_suffixes()]
- pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
+ self.apply_info_replacements(info)
- ldata = tinfoil.config_data.createCopy()
- bb.parse.handle('classes/python-dir.bbclass', ldata, True)
- python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
+ if uses_setuptools:
+ classes.append('setuptools3')
+ else:
+ classes.append('distutils3')
- dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
- python_dirs = [python_sitedir + os.sep,
- os.path.join(os.path.dirname(python_sitedir), 'dist-packages') + os.sep,
- os.path.dirname(python_sitedir) + os.sep]
- packages = {}
- for pkgdatafile in glob.glob('{}/runtime/*'.format(pkgdata_dir)):
- files_info = None
- with open(pkgdatafile, 'r') as f:
- for line in f.readlines():
- field, value = line.split(': ', 1)
- if field == 'FILES_INFO':
- files_info = ast.literal_eval(value)
- break
- else:
- continue
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith('##LICENSE_PLACEHOLDER##'):
+ lines_before.insert(i, '# NOTE: License in setup.py/PKGINFO is: %s' % license_str)
+ break
- for fn in files_info:
- for suffix in suffixes:
- if fn.endswith(suffix):
- break
- else:
- continue
+ if 'Classifier' in info:
+ license = self.handle_classifier_license(info['Classifier'], info.get('License', ''))
+ if license:
+ info['License'] = license
- if fn.startswith(dynload_dir + os.sep):
- if '/.debug/' in fn:
- continue
- base = os.path.basename(fn)
- provided = base.split('.', 1)[0]
- packages[provided] = os.path.basename(pkgdatafile)
- continue
+ self.map_info_to_bbvar(info, extravalues)
- for python_dir in python_dirs:
- if fn.startswith(python_dir):
- relpath = fn[len(python_dir):]
- relstart, _, relremaining = relpath.partition(os.sep)
- if relstart.endswith('.egg'):
- relpath = relremaining
- base, _ = os.path.splitext(relpath)
+ mapped_deps, unmapped_deps = self.scan_setup_python_deps(srctree, setup_info, setup_non_literals)
- if '/.debug/' in base:
- continue
- if os.path.basename(base) == '__init__':
- base = os.path.dirname(base)
- base = base.replace(os.sep + os.sep, os.sep)
- provided = base.replace(os.sep, '.')
- packages[provided] = os.path.basename(pkgdatafile)
- return packages
+ extras_req = set()
+ if 'Extras-require' in info:
+ extras_req = info['Extras-require']
+ if extras_req:
+ lines_after.append('# The following configs & dependencies are from setuptools extras_require.')
+ lines_after.append('# These dependencies are optional, hence can be controlled via PACKAGECONFIG.')
+ lines_after.append('# The upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('# The configs are might not correct, since PACKAGECONFIG does not support expressions as may used in requires.txt - they are just replaced by text.')
+ lines_after.append('#')
+ lines_after.append('# Uncomment this line to enable all the optional features.')
+ lines_after.append('#PACKAGECONFIG ?= "{}"'.format(' '.join(k.lower() for k in extras_req)))
+ for feature, feature_reqs in extras_req.items():
+ unmapped_deps.difference_update(feature_reqs)
+
+ feature_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(feature_reqs))
+ lines_after.append('PACKAGECONFIG[{}] = ",,,{}"'.format(feature.lower(), ' '.join(feature_req_deps)))
+
+ inst_reqs = set()
+ if 'Install-requires' in info:
+ if extras_req:
+ lines_after.append('')
+ inst_reqs = info['Install-requires']
+ if inst_reqs:
+ unmapped_deps.difference_update(inst_reqs)
+
+ inst_req_deps = ('python3-' + r.replace('.', '-').lower() for r in sorted(inst_reqs))
+ lines_after.append('# WARNING: the following rdepends are from setuptools install_requires. These')
+ lines_after.append('# upstream names may not correspond exactly to bitbake package names.')
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(inst_req_deps)))
+
+ if mapped_deps:
+ name = info.get('Name')
+ if name and name[0] in mapped_deps:
+ # Attempt to avoid self-reference
+ mapped_deps.remove(name[0])
+ mapped_deps -= set(self.excluded_pkgdeps)
+ if inst_reqs or extras_req:
+ lines_after.append('')
+ lines_after.append('# WARNING: the following rdepends are determined through basic analysis of the')
+ lines_after.append('# python sources, and might not be 100% accurate.')
+ lines_after.append('RDEPENDS:${{PN}} += "{}"'.format(' '.join(sorted(mapped_deps))))
+
+ unmapped_deps -= set(extensions)
+ unmapped_deps -= set(self.assume_provided)
+ if unmapped_deps:
+ if mapped_deps:
+ lines_after.append('')
+ lines_after.append('# WARNING: We were unable to map the following python package/module')
+ lines_after.append('# dependencies to the bitbake packages which include them:')
+ lines_after.extend('# {}'.format(d) for d in sorted(unmapped_deps))
+
+ handled.append('buildsystem')
+
+class PythonPyprojectTomlRecipeHandler(PythonRecipeHandler):
+ """Base class to support PEP517 and PEP518
+
+ PEP517 https://peps.python.org/pep-0517/#source-trees
+ PEP518 https://peps.python.org/pep-0518/#build-system-table
+ """
+ # bitbake currently supports the 4 following backends
+ build_backend_map = {
+ "setuptools.build_meta": "python_setuptools_build_meta",
+ "poetry.core.masonry.api": "python_poetry_core",
+ "flit_core.buildapi": "python_flit_core",
+ "hatchling.build": "python_hatchling",
+ "maturin": "python_maturin",
+ "mesonpy": "python_mesonpy",
+ }
+
+ # setuptools.build_meta and flit declare project metadata into the "project" section of pyproject.toml
+ # according to PEP-621: https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#declaring-project-metadata
+ # while poetry uses the "tool.poetry" section according to its official documentation: https://python-poetry.org/docs/pyproject/
+ # keys from "project" and "tool.poetry" sections are almost the same except for the HOMEPAGE which is "homepage" for tool.poetry
+ # and "Homepage" for "project" section. So keep both
+ bbvar_map = {
+ "name": "PN",
+ "version": "PV",
+ "Homepage": "HOMEPAGE",
+ "homepage": "HOMEPAGE",
+ "description": "SUMMARY",
+ "license": "LICENSE",
+ "dependencies": "RDEPENDS:${PN}",
+ "requires": "DEPENDS",
+ }
+
+ replacements = [
+ ("license", r" +$", ""),
+ ("license", r"^ +", ""),
+ ("license", r" ", "-"),
+ ("license", r"^GNU-", ""),
+ ("license", r"-[Ll]icen[cs]e(,?-[Vv]ersion)?", ""),
+ ("license", r"^UNKNOWN$", ""),
+ # Remove currently unhandled version numbers from these variables
+ ("requires", r"\[[^\]]+\]$", ""),
+ ("requires", r"^([^><= ]+).*", r"\1"),
+ ("dependencies", r"\[[^\]]+\]$", ""),
+ ("dependencies", r"^([^><= ]+).*", r"\1"),
+ ]
+
+ excluded_native_pkgdeps = [
+ # already provided by python_setuptools_build_meta.bbclass
+ "python3-setuptools-native",
+ "python3-wheel-native",
+ # already provided by python_poetry_core.bbclass
+ "python3-poetry-core-native",
+ # already provided by python_flit_core.bbclass
+ "python3-flit-core-native",
+ # already provided by python_mesonpy
+ "python3-meson-python-native",
+ ]
+
+ # add here a list of known and often used packages and the corresponding bitbake package
+ known_deps_map = {
+ "setuptools": "python3-setuptools",
+ "wheel": "python3-wheel",
+ "poetry-core": "python3-poetry-core",
+ "flit_core": "python3-flit-core",
+ "setuptools-scm": "python3-setuptools-scm",
+ "hatchling": "python3-hatchling",
+ "hatch-vcs": "python3-hatch-vcs",
+ "meson-python" : "python3-meson-python",
+ }
+
+ def __init__(self):
+ pass
+
+ def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
+ info = {}
+ metadata = {}
+
+ if 'buildsystem' in handled:
+ return False
+
+ logger.debug("Trying pyproject.toml parser")
+
+ # Check for non-zero size setup.py files
+ setupfiles = RecipeHandler.checkfiles(srctree, ["pyproject.toml"])
+ for fn in setupfiles:
+ if os.path.getsize(fn):
+ break
+ else:
+ logger.debug("No pyproject.toml found")
+ return False
+
+ setupscript = os.path.join(srctree, "pyproject.toml")
- @classmethod
- def run_command(cls, cmd, **popenargs):
- if 'stderr' not in popenargs:
- popenargs['stderr'] = subprocess.STDOUT
try:
- return subprocess.check_output(cmd, **popenargs).decode('utf-8')
- except OSError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc)
- raise
- except subprocess.CalledProcessError as exc:
- logger.error('Unable to run `{}`: {}', ' '.join(cmd), exc.output)
- raise
+ try:
+ import tomllib
+ except ImportError:
+ try:
+ import tomli as tomllib
+ except ImportError:
+ logger.error("Neither 'tomllib' nor 'tomli' could be imported, cannot scan pyproject.toml.")
+ return False
+
+ try:
+ with open(setupscript, "rb") as f:
+ config = tomllib.load(f)
+ except Exception:
+ logger.exception("Failed to parse pyproject.toml")
+ return False
+
+ build_backend = config["build-system"]["build-backend"]
+ if build_backend in self.build_backend_map:
+ classes.append(self.build_backend_map[build_backend])
+ else:
+ logger.error(
+ "Unsupported build-backend: %s, cannot use pyproject.toml. Will try to use legacy setup.py"
+ % build_backend
+ )
+ return False
+
+ licfile = ""
+
+ if build_backend == "poetry.core.masonry.api":
+ if "tool" in config and "poetry" in config["tool"]:
+ metadata = config["tool"]["poetry"]
+ else:
+ if "project" in config:
+ metadata = config["project"]
+
+ if metadata:
+ for field, values in metadata.items():
+ if field == "license":
+ # For setuptools.build_meta and flit, licence is a table
+ # but for poetry licence is a string
+ # for hatchling, both table (jsonschema) and string (iniconfig) have been used
+ if build_backend == "poetry.core.masonry.api":
+ value = values
+ else:
+ value = values.get("text", "")
+ if not value:
+ licfile = values.get("file", "")
+ continue
+ elif field == "dependencies" and build_backend == "poetry.core.masonry.api":
+ # For poetry backend, "dependencies" section looks like:
+ # [tool.poetry.dependencies]
+ # requests = "^2.13.0"
+ # requests = { version = "^2.13.0", source = "private" }
+ # See https://python-poetry.org/docs/master/pyproject/#dependencies-and-dependency-groups for more details
+ # This class doesn't handle versions anyway, so we just get the dependencies name here and construct a list
+ value = []
+ for k in values.keys():
+ value.append(k)
+ elif isinstance(values, dict):
+ for k, v in values.items():
+ info[k] = v
+ continue
+ else:
+ value = values
+
+ info[field] = value
+
+ # Grab the license value before applying replacements
+ license_str = info.get("license", "").strip()
+
+ if license_str:
+ for i, line in enumerate(lines_before):
+ if line.startswith("##LICENSE_PLACEHOLDER##"):
+ lines_before.insert(
+ i, "# NOTE: License in pyproject.toml is: %s" % license_str
+ )
+ break
+
+ info["requires"] = config["build-system"]["requires"]
+
+ self.apply_info_replacements(info)
+
+ if "classifiers" in info:
+ license = self.handle_classifier_license(
+ info["classifiers"], info.get("license", "")
+ )
+ if license:
+ if licfile:
+ lines = []
+ md5value = bb.utils.md5_file(os.path.join(srctree, licfile))
+ lines.append('LICENSE = "%s"' % license)
+ lines.append(
+ 'LIC_FILES_CHKSUM = "file://%s;md5=%s"'
+ % (licfile, md5value)
+ )
+ lines.append("")
+
+ # Replace the placeholder so we get the values in the right place in the recipe file
+ try:
+ pos = lines_before.index("##LICENSE_PLACEHOLDER##")
+ except ValueError:
+ pos = -1
+ if pos == -1:
+ lines_before.extend(lines)
+ else:
+ lines_before[pos : pos + 1] = lines
+
+ handled.append(("license", [license, licfile, md5value]))
+ else:
+ info["license"] = license
+
+ provided_packages = self.parse_pkgdata_for_python_packages()
+ provided_packages.update(self.known_deps_map)
+ native_mapped_deps, native_unmapped_deps = set(), set()
+ mapped_deps, unmapped_deps = set(), set()
+
+ if "requires" in info:
+ for require in info["requires"]:
+ mapped = provided_packages.get(require)
+
+ if mapped:
+ logger.debug("Mapped %s to %s" % (require, mapped))
+ native_mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % require)
+ native_unmapped_deps.add(require)
+
+ info.pop("requires")
+
+ if native_mapped_deps != set():
+ native_mapped_deps = {
+ item + "-native" for item in native_mapped_deps
+ }
+ native_mapped_deps -= set(self.excluded_native_pkgdeps)
+ if native_mapped_deps != set():
+ info["requires"] = " ".join(sorted(native_mapped_deps))
+
+ if native_unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(native_unmapped_deps)
+ )
+
+ if "dependencies" in info:
+ for dependency in info["dependencies"]:
+ mapped = provided_packages.get(dependency)
+ if mapped:
+ logger.debug("Mapped %s to %s" % (dependency, mapped))
+ mapped_deps.add(mapped)
+ else:
+ logger.debug("Could not map %s" % dependency)
+ unmapped_deps.add(dependency)
+
+ info.pop("dependencies")
+
+ if mapped_deps != set():
+ if mapped_deps != set():
+ info["dependencies"] = " ".join(sorted(mapped_deps))
+
+ if unmapped_deps:
+ lines_after.append("")
+ lines_after.append(
+ "# WARNING: We were unable to map the following python package/module"
+ )
+ lines_after.append(
+ "# runtime dependencies to the bitbake packages which include them:"
+ )
+ lines_after.extend(
+ "# {}".format(d) for d in sorted(unmapped_deps)
+ )
+
+ self.map_info_to_bbvar(info, extravalues)
+
+ handled.append("buildsystem")
+ except Exception:
+ logger.exception("Failed to correctly handle pyproject.toml, falling back to another method")
+ return False
def gather_setup_info(fileobj):
@@ -715,5 +1118,7 @@ def has_non_literals(value):
def register_recipe_handlers(handlers):
- # We need to make sure this is ahead of the makefile fallback handler
- handlers.append((PythonRecipeHandler(), 70))
+ # We need to make sure these are ahead of the makefile fallback handler
+ # and the pyproject.toml handler ahead of the setup.py handler
+ handlers.append((PythonPyprojectTomlRecipeHandler(), 75))
+ handlers.append((PythonSetupPyRecipeHandler(), 70))
diff --git a/scripts/lib/recipetool/create_go.py b/scripts/lib/recipetool/create_go.py
new file mode 100644
index 0000000000..a85a2f2786
--- /dev/null
+++ b/scripts/lib/recipetool/create_go.py
@@ -0,0 +1,777 @@
+# Recipe creation tool - go support plugin
+#
+# The code is based on golang internals. See the afftected
+# methods for further reference and information.
+#
+# Copyright (C) 2023 Weidmueller GmbH & Co KG
+# Author: Lukas Funke <lukas.funke@weidmueller.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+
+from collections import namedtuple
+from enum import Enum
+from html.parser import HTMLParser
+from recipetool.create import RecipeHandler, handle_license_vars
+from recipetool.create import guess_license, tidy_licenses, fixup_license
+from recipetool.create import determine_from_url
+from urllib.error import URLError, HTTPError
+
+import bb.utils
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+import shutil
+import tempfile
+import urllib.parse
+import urllib.request
+
+
+GoImport = namedtuple('GoImport', 'root vcs url suffix')
+logger = logging.getLogger('recipetool')
+CodeRepo = namedtuple(
+ 'CodeRepo', 'path codeRoot codeDir pathMajor pathPrefix pseudoMajor')
+
+tinfoil = None
+
+# Regular expression to parse pseudo semantic version
+# see https://go.dev/ref/mod#pseudo-versions
+re_pseudo_semver = re.compile(
+ r"^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)(?P<utc>\d{14})-(?P<commithash>[A-Za-z0-9]+)(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$")
+# Regular expression to parse semantic version
+re_semver = re.compile(
+ r"^v(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$")
+
+
+def tinfoil_init(instance):
+ global tinfoil
+ tinfoil = instance
+
+
+class GoRecipeHandler(RecipeHandler):
+ """Class to handle the go recipe creation"""
+
+ @staticmethod
+ def __ensure_go():
+ """Check if the 'go' command is available in the recipes"""
+ recipe = "go-native"
+ if not tinfoil.recipes_parsed:
+ tinfoil.parse_recipes()
+ try:
+ rd = tinfoil.parse_recipe(recipe)
+ except bb.providers.NoProvider:
+ bb.error(
+ "Nothing provides '%s' which is required for the build" % (recipe))
+ bb.note(
+ "You will likely need to add a layer that provides '%s'" % (recipe))
+ return None
+
+ bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+ gopath = os.path.join(bindir, 'go')
+
+ if not os.path.exists(gopath):
+ tinfoil.build_targets(recipe, 'addto_recipe_sysroot')
+
+ if not os.path.exists(gopath):
+ logger.error(
+ '%s required to process specified source, but %s did not seem to populate it' % 'go', recipe)
+ return None
+
+ return bindir
+
+ def __resolve_repository_static(self, modulepath):
+ """Resolve the repository in a static manner
+
+ The method is based on the go implementation of
+ `repoRootFromVCSPaths` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+
+ url = urllib.parse.urlparse("https://" + modulepath)
+ req = urllib.request.Request(url.geturl())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ # Some modulepath are just redirects to github (or some other vcs
+ # hoster). Therefore, we check if this modulepath redirects to
+ # somewhere else
+ if resp.geturl() != url.geturl():
+ bb.debug(1, "%s is redirectred to %s" %
+ (url.geturl(), resp.geturl()))
+ url = urllib.parse.urlparse(resp.geturl())
+ modulepath = url.netloc + url.path
+
+ except URLError as url_err:
+ # This is probably because the module path
+ # contains the subdir and major path. Thus,
+ # we ignore this error for now
+ logger.debug(
+ 1, "Failed to fetch page from [%s]: %s" % (url, str(url_err)))
+
+ host, _, _ = modulepath.partition('/')
+
+ class vcs(Enum):
+ pathprefix = "pathprefix"
+ regexp = "regexp"
+ type = "type"
+ repo = "repo"
+ check = "check"
+ schemelessRepo = "schemelessRepo"
+
+ # GitHub
+ vcsGitHub = {}
+ vcsGitHub[vcs.pathprefix] = "github.com"
+ vcsGitHub[vcs.regexp] = re.compile(
+ r'^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsGitHub[vcs.type] = "git"
+ vcsGitHub[vcs.repo] = "https://\\g<root>"
+
+ # Bitbucket
+ vcsBitbucket = {}
+ vcsBitbucket[vcs.pathprefix] = "bitbucket.org"
+ vcsBitbucket[vcs.regexp] = re.compile(
+ r'^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsBitbucket[vcs.type] = "git"
+ vcsBitbucket[vcs.repo] = "https://\\g<root>"
+
+ # IBM DevOps Services (JazzHub)
+ vcsIBMDevOps = {}
+ vcsIBMDevOps[vcs.pathprefix] = "hub.jazz.net/git"
+ vcsIBMDevOps[vcs.regexp] = re.compile(
+ r'^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsIBMDevOps[vcs.type] = "git"
+ vcsIBMDevOps[vcs.repo] = "https://\\g<root>"
+
+ # Git at Apache
+ vcsApacheGit = {}
+ vcsApacheGit[vcs.pathprefix] = "git.apache.org"
+ vcsApacheGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsApacheGit[vcs.type] = "git"
+ vcsApacheGit[vcs.repo] = "https://\\g<root>"
+
+ # Git at OpenStack
+ vcsOpenStackGit = {}
+ vcsOpenStackGit[vcs.pathprefix] = "git.openstack.org"
+ vcsOpenStackGit[vcs.regexp] = re.compile(
+ r'^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/(?P<suffix>[A-Za-z0-9_.\-]+))*$')
+ vcsOpenStackGit[vcs.type] = "git"
+ vcsOpenStackGit[vcs.repo] = "https://\\g<root>"
+
+ # chiselapp.com for fossil
+ vcsChiselapp = {}
+ vcsChiselapp[vcs.pathprefix] = "chiselapp.com"
+ vcsChiselapp[vcs.regexp] = re.compile(
+ r'^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$')
+ vcsChiselapp[vcs.type] = "fossil"
+ vcsChiselapp[vcs.repo] = "https://\\g<root>"
+
+ # General syntax for any server.
+ # Must be last.
+ vcsGeneralServer = {}
+ vcsGeneralServer[vcs.regexp] = re.compile(
+ "(?P<root>(?P<repo>([a-z0-9.\\-]+\\.)+[a-z0-9.\\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\\-]+)+?)\\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?(?P<suffix>[A-Za-z0-9_.\\-]+))*$")
+ vcsGeneralServer[vcs.schemelessRepo] = True
+
+ vcsPaths = [vcsGitHub, vcsBitbucket, vcsIBMDevOps,
+ vcsApacheGit, vcsOpenStackGit, vcsChiselapp,
+ vcsGeneralServer]
+
+ if modulepath.startswith("example.net") or modulepath == "rsc.io":
+ logger.warning("Suspicious module path %s" % modulepath)
+ return None
+ if modulepath.startswith("http:") or modulepath.startswith("https:"):
+ logger.warning("Import path should not start with %s %s" %
+ ("http", "https"))
+ return None
+
+ rootpath = None
+ vcstype = None
+ repourl = None
+ suffix = None
+
+ for srv in vcsPaths:
+ m = srv[vcs.regexp].match(modulepath)
+ if vcs.pathprefix in srv:
+ if host == srv[vcs.pathprefix]:
+ rootpath = m.group('root')
+ vcstype = srv[vcs.type]
+ repourl = m.expand(srv[vcs.repo])
+ suffix = m.group('suffix')
+ break
+ elif m and srv[vcs.schemelessRepo]:
+ rootpath = m.group('root')
+ vcstype = m[vcs.type]
+ repourl = m[vcs.repo]
+ suffix = m.group('suffix')
+ break
+
+ return GoImport(rootpath, vcstype, repourl, suffix)
+
+ def __resolve_repository_dynamic(self, modulepath):
+ """Resolve the repository root in a dynamic manner.
+
+ The method is based on the go implementation of
+ `repoRootForImportDynamic` in
+ https://github.com/golang/go/blob/master/src/cmd/go/internal/vcs/vcs.go
+ """
+ url = urllib.parse.urlparse("https://" + modulepath)
+
+ class GoImportHTMLParser(HTMLParser):
+
+ def __init__(self):
+ super().__init__()
+ self.__srv = {}
+
+ def handle_starttag(self, tag, attrs):
+ if tag == 'meta' and list(
+ filter(lambda a: (a[0] == 'name' and a[1] == 'go-import'), attrs)):
+ content = list(
+ filter(lambda a: (a[0] == 'content'), attrs))
+ if content:
+ srv = content[0][1].split()
+ self.__srv[srv[0]] = srv
+
+ def go_import(self, modulepath):
+ if modulepath in self.__srv:
+ srv = self.__srv[modulepath]
+ return GoImport(srv[0], srv[1], srv[2], None)
+ return None
+
+ url = url.geturl() + "?go-get=1"
+ req = urllib.request.Request(url)
+
+ try:
+ body = urllib.request.urlopen(req).read()
+ except HTTPError as http_err:
+ logger.warning(
+ "Unclean status when fetching page from [%s]: %s", url, str(http_err))
+ body = http_err.fp.read()
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ parser = GoImportHTMLParser()
+ parser.feed(body.decode('utf-8'))
+ parser.close()
+
+ return parser.go_import(modulepath)
+
+ def __resolve_from_golang_proxy(self, modulepath, version):
+ """
+ Resolves repository data from golang proxy
+ """
+ url = urllib.parse.urlparse("https://proxy.golang.org/"
+ + modulepath
+ + "/@v/"
+ + version
+ + ".info")
+
+ # Transform url to lower case, golang proxy doesn't like mixed case
+ req = urllib.request.Request(url.geturl().lower())
+
+ try:
+ resp = urllib.request.urlopen(req)
+ except URLError as url_err:
+ logger.warning(
+ "Failed to fetch page from [%s]: %s", url, str(url_err))
+ return None
+
+ golang_proxy_res = resp.read().decode('utf-8')
+ modinfo = json.loads(golang_proxy_res)
+
+ if modinfo and 'Origin' in modinfo:
+ origin = modinfo['Origin']
+ _root_url = urllib.parse.urlparse(origin['URL'])
+
+ # We normalize the repo URL since we don't want the scheme in it
+ _subdir = origin['Subdir'] if 'Subdir' in origin else None
+ _root, _, _ = self.__split_path_version(modulepath)
+ if _subdir:
+ _root = _root[:-len(_subdir)].strip('/')
+
+ _commit = origin['Hash']
+ _vcs = origin['VCS']
+ return (GoImport(_root, _vcs, _root_url.geturl(), None), _commit)
+
+ return None
+
+ def __resolve_repository(self, modulepath):
+ """
+ Resolves src uri from go module-path
+ """
+ repodata = self.__resolve_repository_static(modulepath)
+ if not repodata or not repodata.url:
+ repodata = self.__resolve_repository_dynamic(modulepath)
+ if not repodata or not repodata.url:
+ logger.error(
+ "Could not resolve repository for module path '%s'" % modulepath)
+ # There is no way to recover from this
+ sys.exit(14)
+ if repodata:
+ logger.debug(1, "Resolved download path for import '%s' => %s" % (
+ modulepath, repodata.url))
+ return repodata
+
+ def __split_path_version(self, path):
+ i = len(path)
+ dot = False
+ for j in range(i, 0, -1):
+ if path[j - 1] < '0' or path[j - 1] > '9':
+ break
+ if path[j - 1] == '.':
+ dot = True
+ break
+ i = j - 1
+
+ if i <= 1 or i == len(
+ path) or path[i - 1] != 'v' or path[i - 2] != '/':
+ return path, "", True
+
+ prefix, pathMajor = path[:i - 2], path[i - 2:]
+ if dot or len(
+ pathMajor) <= 2 or pathMajor[2] == '0' or pathMajor == "/v1":
+ return path, "", False
+
+ return prefix, pathMajor, True
+
+ def __get_path_major(self, pathMajor):
+ if not pathMajor:
+ return ""
+
+ if pathMajor[0] != '/' and pathMajor[0] != '.':
+ logger.error(
+ "pathMajor suffix %s passed to PathMajorPrefix lacks separator", pathMajor)
+
+ if pathMajor.startswith(".v") and pathMajor.endswith("-unstable"):
+ pathMajor = pathMajor[:len("-unstable") - 2]
+
+ return pathMajor[1:]
+
+ def __build_coderepo(self, repo, path):
+ codedir = ""
+ pathprefix, pathMajor, _ = self.__split_path_version(path)
+ if repo.root == path:
+ pathprefix = path
+ elif path.startswith(repo.root):
+ codedir = pathprefix[len(repo.root):].strip('/')
+
+ pseudoMajor = self.__get_path_major(pathMajor)
+
+ logger.debug("root='%s', codedir='%s', prefix='%s', pathMajor='%s', pseudoMajor='%s'",
+ repo.root, codedir, pathprefix, pathMajor, pseudoMajor)
+
+ return CodeRepo(path, repo.root, codedir,
+ pathMajor, pathprefix, pseudoMajor)
+
+ def __resolve_version(self, repo, path, version):
+ hash = None
+ coderoot = self.__build_coderepo(repo, path)
+
+ def vcs_fetch_all():
+ tmpdir = tempfile.mkdtemp()
+ clone_cmd = "%s clone --bare %s %s" % ('git', repo.url, tmpdir)
+ bb.process.run(clone_cmd)
+ log_cmd = "git log --all --pretty='%H %d' --decorate=short"
+ output, _ = bb.process.run(
+ log_cmd, shell=True, stderr=subprocess.PIPE, cwd=tmpdir)
+ bb.utils.prunedir(tmpdir)
+ return output.strip().split('\n')
+
+ def vcs_fetch_remote(tag):
+ # add * to grab ^{}
+ refs = {}
+ ls_remote_cmd = "git ls-remote -q --tags {} {}*".format(
+ repo.url, tag)
+ output, _ = bb.process.run(ls_remote_cmd)
+ output = output.strip().split('\n')
+ for line in output:
+ f = line.split(maxsplit=1)
+ if len(f) != 2:
+ continue
+
+ for prefix in ["HEAD", "refs/heads/", "refs/tags/"]:
+ if f[1].startswith(prefix):
+ refs[f[1][len(prefix):]] = f[0]
+
+ for key, hash in refs.items():
+ if key.endswith(r"^{}"):
+ refs[key.strip(r"^{}")] = hash
+
+ return refs[tag]
+
+ m_pseudo_semver = re_pseudo_semver.match(version)
+
+ if m_pseudo_semver:
+ remote_refs = vcs_fetch_all()
+ short_commit = m_pseudo_semver.group('commithash')
+ for l in remote_refs:
+ r = l.split(maxsplit=1)
+ sha1 = r[0] if len(r) else None
+ if not sha1:
+ logger.error(
+ "Ups: could not resolve abbref commit for %s" % short_commit)
+
+ elif sha1.startswith(short_commit):
+ hash = sha1
+ break
+ else:
+ m_semver = re_semver.match(version)
+ if m_semver:
+
+ def get_sha1_remote(re):
+ rsha1 = None
+ for line in remote_refs:
+ # Split lines of the following format:
+ # 22e90d9b964610628c10f673ca5f85b8c2a2ca9a (tag: sometag)
+ lineparts = line.split(maxsplit=1)
+ sha1 = lineparts[0] if len(lineparts) else None
+ refstring = lineparts[1] if len(
+ lineparts) == 2 else None
+ if refstring:
+ # Normalize tag string and split in case of multiple
+ # regs e.g. (tag: speech/v1.10.0, tag: orchestration/v1.5.0 ...)
+ refs = refstring.strip('(), ').split(',')
+ for ref in refs:
+ if re.match(ref.strip()):
+ rsha1 = sha1
+ return rsha1
+
+ semver = "v" + m_semver.group('major') + "."\
+ + m_semver.group('minor') + "."\
+ + m_semver.group('patch') \
+ + (("-" + m_semver.group('prerelease'))
+ if m_semver.group('prerelease') else "")
+
+ tag = os.path.join(
+ coderoot.codeDir, semver) if coderoot.codeDir else semver
+
+ # probe tag using 'ls-remote', which is faster than fetching
+ # complete history
+ hash = vcs_fetch_remote(tag)
+ if not hash:
+ # backup: fetch complete history
+ remote_refs = vcs_fetch_all()
+ hash = get_sha1_remote(
+ re.compile(fr"(tag:|HEAD ->) ({tag})"))
+
+ logger.debug(
+ "Resolving commit for tag '%s' -> '%s'", tag, hash)
+ return hash
+
+ def __generate_srcuri_inline_fcn(self, path, version, replaces=None):
+ """Generate SRC_URI functions for go imports"""
+
+ logger.info("Resolving repository for module %s", path)
+ # First try to resolve repo and commit from golang proxy
+ # Most info is already there and we don't have to go through the
+ # repository or even perform the version resolve magic
+ golang_proxy_info = self.__resolve_from_golang_proxy(path, version)
+ if golang_proxy_info:
+ repo = golang_proxy_info[0]
+ commit = golang_proxy_info[1]
+ else:
+ # Fallback
+ # Resolve repository by 'hand'
+ repo = self.__resolve_repository(path)
+ commit = self.__resolve_version(repo, path, version)
+
+ url = urllib.parse.urlparse(repo.url)
+ repo_url = url.netloc + url.path
+
+ coderoot = self.__build_coderepo(repo, path)
+
+ inline_fcn = "${@go_src_uri("
+ inline_fcn += f"'{repo_url}','{version}'"
+ if repo_url != path:
+ inline_fcn += f",path='{path}'"
+ if coderoot.codeDir:
+ inline_fcn += f",subdir='{coderoot.codeDir}'"
+ if repo.vcs != 'git':
+ inline_fcn += f",vcs='{repo.vcs}'"
+ if replaces:
+ inline_fcn += f",replaces='{replaces}'"
+ if coderoot.pathMajor:
+ inline_fcn += f",pathmajor='{coderoot.pathMajor}'"
+ inline_fcn += ")}"
+
+ return inline_fcn, commit
+
+ def __go_handle_dependencies(self, go_mod, srctree, localfilesdir, extravalues, d):
+
+ import re
+ src_uris = []
+ src_revs = []
+
+ def generate_src_rev(path, version, commithash):
+ src_rev = f"# {path}@{version} => {commithash}\n"
+ # Ups...maybe someone manipulated the source repository and the
+ # version or commit could not be resolved. This is a sign of
+ # a) the supply chain was manipulated (bad)
+ # b) the implementation for the version resolving didn't work
+ # anymore (less bad)
+ if not commithash:
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"#!!! Could not resolve version !!!\n"
+ src_rev += f"#!!! Possible supply chain attack !!!\n"
+ src_rev += f"#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
+ src_rev += f"SRCREV_{path.replace('/', '.')} = \"{commithash}\""
+
+ return src_rev
+
+ # we first go over replacement list, because we are essentialy
+ # interested only in the replaced path
+ if go_mod['Replace']:
+ for replacement in go_mod['Replace']:
+ oldpath = replacement['Old']['Path']
+ path = replacement['New']['Path']
+ version = ''
+ if 'Version' in replacement['New']:
+ version = replacement['New']['Version']
+
+ if os.path.exists(os.path.join(srctree, path)):
+ # the module refers to the local path, remove it from requirement list
+ # because it's a local module
+ go_mod['Require'][:] = [v for v in go_mod['Require'] if v.get('Path') != oldpath]
+ else:
+ # Replace the path and the version, so we don't iterate replacement list anymore
+ for require in go_mod['Require']:
+ if require['Path'] == oldpath:
+ require.update({'Path': path, 'Version': version})
+ break
+
+ for require in go_mod['Require']:
+ path = require['Path']
+ version = require['Version']
+
+ inline_fcn, commithash = self.__generate_srcuri_inline_fcn(
+ path, version)
+ src_uris.append(inline_fcn)
+ src_revs.append(generate_src_rev(path, version, commithash))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ go_mods_basename = "%s-modules.inc" % pn
+
+ go_mods_filename = os.path.join(localfilesdir, go_mods_basename)
+ with open(go_mods_filename, "w") as f:
+ # We introduce this indirection to make the tests a little easier
+ f.write("SRC_URI += \"${GO_DEPENDENCIES_SRC_URI}\"\n")
+ f.write("GO_DEPENDENCIES_SRC_URI = \"\\\n")
+ for uri in src_uris:
+ f.write(" " + uri + " \\\n")
+ f.write("\"\n\n")
+ for rev in src_revs:
+ f.write(rev + "\n")
+
+ extravalues['extrafiles'][go_mods_basename] = go_mods_filename
+
+ def __go_run_cmd(self, cmd, cwd, d):
+ return bb.process.run(cmd, env=dict(os.environ, PATH=d.getVar('PATH')),
+ shell=True, cwd=cwd)
+
+ def __go_native_version(self, d):
+ stdout, _ = self.__go_run_cmd("go version", None, d)
+ m = re.match(r".*\sgo((\d+).(\d+).(\d+))\s([\w\/]*)", stdout)
+ major = int(m.group(2))
+ minor = int(m.group(3))
+ patch = int(m.group(4))
+
+ return major, minor, patch
+
+ def __go_mod_patch(self, srctree, localfilesdir, extravalues, d):
+
+ patchfilename = "go.mod.patch"
+ go_native_version_major, go_native_version_minor, _ = self.__go_native_version(
+ d)
+ self.__go_run_cmd("go mod tidy -go=%d.%d" %
+ (go_native_version_major, go_native_version_minor), srctree, d)
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ # Create patch in order to upgrade go version
+ self.__go_run_cmd("git diff go.mod > %s" % (patchfilename), srctree, d)
+ # Restore original state
+ self.__go_run_cmd("git checkout HEAD go.mod go.sum", srctree, d)
+
+ go_mod = json.loads(stdout)
+ tmpfile = os.path.join(localfilesdir, patchfilename)
+ shutil.move(os.path.join(srctree, patchfilename), tmpfile)
+
+ extravalues['extrafiles'][patchfilename] = tmpfile
+
+ return go_mod, patchfilename
+
+ def __go_mod_vendor(self, go_mod, srctree, localfilesdir, extravalues, d):
+ # Perform vendoring to retrieve the correct modules.txt
+ tmp_vendor_dir = tempfile.mkdtemp()
+
+ # -v causes to go to print modules.txt to stderr
+ _, stderr = self.__go_run_cmd(
+ "go mod vendor -v -o %s" % (tmp_vendor_dir), srctree, d)
+
+ modules_txt_basename = "modules.txt"
+ modules_txt_filename = os.path.join(localfilesdir, modules_txt_basename)
+ with open(modules_txt_filename, "w") as f:
+ f.write(stderr)
+
+ extravalues['extrafiles'][modules_txt_basename] = modules_txt_filename
+
+ licenses = []
+ lic_files_chksum = []
+ licvalues = guess_license(tmp_vendor_dir, d)
+ shutil.rmtree(tmp_vendor_dir)
+
+ if licvalues:
+ for licvalue in licvalues:
+ license = licvalue[0]
+ lics = tidy_licenses(fixup_license(license))
+ lics = [lic for lic in lics if lic not in licenses]
+ if len(lics):
+ licenses.extend(lics)
+ lic_files_chksum.append(
+ 'file://src/${GO_IMPORT}/vendor/%s;md5=%s' % (licvalue[1], licvalue[2]))
+
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+ licenses_basename = "%s-licenses.inc" % pn
+
+ licenses_filename = os.path.join(localfilesdir, licenses_basename)
+ with open(licenses_filename, "w") as f:
+ f.write("GO_MOD_LICENSES = \"%s\"\n\n" %
+ ' & '.join(sorted(licenses, key=str.casefold)))
+ # We introduce this indirection to make the tests a little easier
+ f.write("LIC_FILES_CHKSUM += \"${VENDORED_LIC_FILES_CHKSUM}\"\n")
+ f.write("VENDORED_LIC_FILES_CHKSUM = \"\\\n")
+ for lic in lic_files_chksum:
+ f.write(" " + lic + " \\\n")
+ f.write("\"\n")
+
+ extravalues['extrafiles'][licenses_basename] = licenses_filename
+
+ def process(self, srctree, classes, lines_before,
+ lines_after, handled, extravalues):
+
+ if 'buildsystem' in handled:
+ return False
+
+ files = RecipeHandler.checkfiles(srctree, ['go.mod'])
+ if not files:
+ return False
+
+ d = bb.data.createCopy(tinfoil.config_data)
+ go_bindir = self.__ensure_go()
+ if not go_bindir:
+ sys.exit(14)
+
+ d.prependVar('PATH', '%s:' % go_bindir)
+ handled.append('buildsystem')
+ classes.append("go-vendor")
+
+ stdout, _ = self.__go_run_cmd("go mod edit -json", srctree, d)
+
+ go_mod = json.loads(stdout)
+ go_import = go_mod['Module']['Path']
+ go_version_match = re.match("([0-9]+).([0-9]+)", go_mod['Go'])
+ go_version_major = int(go_version_match.group(1))
+ go_version_minor = int(go_version_match.group(2))
+ src_uris = []
+
+ localfilesdir = tempfile.mkdtemp(prefix='recipetool-go-')
+ extravalues.setdefault('extrafiles', {})
+
+ # Use an explicit name determined from the module name because it
+ # might differ from the actual URL for replaced modules
+ # strip version part from module URL /vXX
+ baseurl = re.sub(r'/v(\d+)$', '', go_mod['Module']['Path'])
+ pn, _ = determine_from_url(baseurl)
+
+ # go.mod files with version < 1.17 may not include all indirect
+ # dependencies. Thus, we have to upgrade the go version.
+ if go_version_major == 1 and go_version_minor < 17:
+ logger.warning(
+ "go.mod files generated by Go < 1.17 might have incomplete indirect dependencies.")
+ go_mod, patchfilename = self.__go_mod_patch(srctree, localfilesdir,
+ extravalues, d)
+ src_uris.append(
+ "file://%s;patchdir=src/${GO_IMPORT}" % (patchfilename))
+
+ # Check whether the module is vendored. If so, we have nothing to do.
+ # Otherwise we gather all dependencies and add them to the recipe
+ if not os.path.exists(os.path.join(srctree, "vendor")):
+
+ # Write additional $BPN-modules.inc file
+ self.__go_mod_vendor(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("LICENSE += \" & ${GO_MOD_LICENSES}\"")
+ lines_before.append("require %s-licenses.inc" % (pn))
+
+ self.__rewrite_src_uri(lines_before, ["file://modules.txt"])
+
+ self.__go_handle_dependencies(go_mod, srctree, localfilesdir, extravalues, d)
+ lines_before.append("require %s-modules.inc" % (pn))
+
+ # Do generic license handling
+ handle_license_vars(srctree, lines_before, handled, extravalues, d)
+ self.__rewrite_lic_uri(lines_before)
+
+ lines_before.append("GO_IMPORT = \"{}\"".format(baseurl))
+ lines_before.append("SRCREV_FORMAT = \"${BPN}\"")
+
+ def __update_lines_before(self, updated, newlines, lines_before):
+ if updated:
+ del lines_before[:]
+ for line in newlines:
+ # Hack to avoid newlines that edit_metadata inserts
+ if line.endswith('\n'):
+ line = line[:-1]
+ lines_before.append(line)
+ return updated
+
+ def __rewrite_lic_uri(self, lines_before):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'LIC_FILES_CHKSUM':
+ new_licenses = []
+ licenses = origvalue.split('\\')
+ for license in licenses:
+ if not license:
+ logger.warning("No license file was detected for the main module!")
+ # the license list of the main recipe must be empty
+ # this can happen for example in case of CLOSED license
+ # Fall through to complete recipe generation
+ continue
+ license = license.strip()
+ uri, chksum = license.split(';', 1)
+ url = urllib.parse.urlparse(uri)
+ new_uri = os.path.join(
+ url.scheme + "://", "src", "${GO_IMPORT}", url.netloc + url.path) + ";" + chksum
+ new_licenses.append(new_uri)
+
+ return new_licenses, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(
+ lines_before, ['LIC_FILES_CHKSUM'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+ def __rewrite_src_uri(self, lines_before, additional_uris = []):
+
+ def varfunc(varname, origvalue, op, newlines):
+ if varname == 'SRC_URI':
+ src_uri = ["git://${GO_IMPORT};destsuffix=git/src/${GO_IMPORT};nobranch=1;name=${BPN};protocol=https"]
+ src_uri.extend(additional_uris)
+ return src_uri, None, -1, True
+ return origvalue, None, 0, True
+
+ updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
+ return self.__update_lines_before(updated, newlines, lines_before)
+
+
+def register_recipe_handlers(handlers):
+ handlers.append((GoRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/create_kernel.py b/scripts/lib/recipetool/create_kernel.py
index ca4996c7ac..5740589a68 100644
--- a/scripts/lib/recipetool/create_kernel.py
+++ b/scripts/lib/recipetool/create_kernel.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
diff --git a/scripts/lib/recipetool/create_kmod.py b/scripts/lib/recipetool/create_kmod.py
index 4569b53c80..cc00106961 100644
--- a/scripts/lib/recipetool/create_kmod.py
+++ b/scripts/lib/recipetool/create_kmod.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re
import logging
@@ -123,7 +113,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
kdirpath, _ = check_target(compile_lines, install=False)
if manual_install or not install_lines:
- lines_after.append('EXTRA_OEMAKE_append_task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
+ lines_after.append('EXTRA_OEMAKE:append:task-install = " -C ${STAGING_KERNEL_DIR} M=${S}"')
elif install_target and install_target != 'modules_install':
lines_after.append('MODULES_INSTALL_TARGET = "install"')
@@ -141,7 +131,7 @@ class KernelModuleRecipeHandler(RecipeHandler):
warnmsg = 'Unable to find means of passing kernel path into install makefile - if kernel path is hardcoded you will need to patch the makefile'
if warnmsg:
warnmsg += '. Note that the variable KERNEL_SRC will be passed in as the kernel source path.'
- logger.warn(warnmsg)
+ logger.warning(warnmsg)
lines_after.append('# %s' % warnmsg)
return True
diff --git a/scripts/lib/recipetool/create_npm.py b/scripts/lib/recipetool/create_npm.py
index bb42a5ca5c..113a89f6a6 100644
--- a/scripts/lib/recipetool/create_npm.py
+++ b/scripts/lib/recipetool/create_npm.py
@@ -1,330 +1,310 @@
-# Recipe creation tool - node.js NPM module support plugin
-#
# Copyright (C) 2016 Intel Corporation
+# Copyright (C) 2020 Savoir-Faire Linux
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+"""Recipe creation tool - npm module support plugin"""
-import os
+import json
import logging
-import subprocess
+import os
+import re
+import sys
import tempfile
-import shutil
-import json
-from recipetool.create import RecipeHandler, split_pkg_licenses, handle_license_vars
-
+import bb
+from bb.fetch2.npm import NpmEnvironment
+from bb.fetch2.npm import npm_package
+from bb.fetch2.npmsw import foreach_dependencies
+from recipetool.create import RecipeHandler
+from recipetool.create import get_license_md5sums
+from recipetool.create import guess_license
+from recipetool.create import split_pkg_licenses
logger = logging.getLogger('recipetool')
-
-tinfoil = None
+TINFOIL = None
def tinfoil_init(instance):
- global tinfoil
- tinfoil = instance
-
+ """Initialize tinfoil"""
+ global TINFOIL
+ TINFOIL = instance
class NpmRecipeHandler(RecipeHandler):
- lockdownpath = None
+ """Class to handle the npm recipe creation"""
+
+ @staticmethod
+ def _get_registry(lines):
+ """Get the registry value from the 'npm://registry' url"""
+ registry = None
+
+ def _handle_registry(varname, origvalue, op, newlines):
+ nonlocal registry
+ if origvalue.startswith("npm://"):
+ registry = re.sub(r"^npm://", "http://", origvalue.split(";")[0])
+ return origvalue, None, 0, True
+
+ bb.utils.edit_metadata(lines, ["SRC_URI"], _handle_registry)
+
+ return registry
+
+ @staticmethod
+ def _ensure_npm():
+ """Check if the 'npm' command is available in the recipes"""
+ if not TINFOIL.recipes_parsed:
+ TINFOIL.parse_recipes()
- def _ensure_npm(self, fixed_setup=False):
- if not tinfoil.recipes_parsed:
- tinfoil.parse_recipes()
try:
- rd = tinfoil.parse_recipe('nodejs-native')
+ d = TINFOIL.parse_recipe("nodejs-native")
except bb.providers.NoProvider:
- if fixed_setup:
- msg = 'nodejs-native is required for npm but is not available within this SDK'
- else:
- msg = 'nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs'
- logger.error(msg)
- return None
- bindir = rd.getVar('STAGING_BINDIR_NATIVE')
- npmpath = os.path.join(bindir, 'npm')
+ bb.error("Nothing provides 'nodejs-native' which is required for the build")
+ bb.note("You will likely need to add a layer that provides nodejs")
+ sys.exit(14)
+
+ bindir = d.getVar("STAGING_BINDIR_NATIVE")
+ npmpath = os.path.join(bindir, "npm")
+
if not os.path.exists(npmpath):
- tinfoil.build_targets('nodejs-native', 'addto_recipe_sysroot')
+ TINFOIL.build_targets("nodejs-native", "addto_recipe_sysroot")
+
if not os.path.exists(npmpath):
- logger.error('npm required to process specified source, but nodejs-native did not seem to populate it')
- return None
+ bb.error("Failed to add 'npm' to sysroot")
+ sys.exit(14)
+
return bindir
- def _handle_license(self, data):
- '''
- Handle the license value from an npm package.json file
- '''
- license = None
- if 'license' in data:
- license = data['license']
- if isinstance(license, dict):
- license = license.get('type', None)
- if license:
- if 'OR' in license:
- license = license.replace('OR', '|')
- license = license.replace('AND', '&')
- license = license.replace(' ', '_')
- if not license[0] == '(':
- license = '(' + license + ')'
- else:
- license = license.replace('AND', '&')
- if license[0] == '(':
- license = license[1:]
- if license[-1] == ')':
- license = license[:-1]
- license = license.replace('MIT/X11', 'MIT')
- license = license.replace('Public Domain', 'PD')
- license = license.replace('SEE LICENSE IN EULA',
- 'SEE-LICENSE-IN-EULA')
- return license
-
- def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
- try:
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warn('npm shrinkwrap failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'npm-shrinkwrap.json')
- shutil.move(os.path.join(srctree, 'npm-shrinkwrap.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
- lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
-
- def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- if not NpmRecipeHandler.lockdownpath:
- NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
- bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
- cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- relockbin = os.path.join(NpmRecipeHandler.lockdownpath, 'node_modules', 'lockdown', 'relock.js')
- if not os.path.exists(relockbin):
- logger.warn('Could not find relock.js within lockdown directory; skipping lockdown')
- return
- try:
- bb.process.run('node %s' % relockbin, cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
- except bb.process.ExecutionError as e:
- logger.warn('lockdown-relock failed:\n%s' % e.stdout)
- return
-
- tmpfile = os.path.join(localfilesdir, 'lockdown.json')
- shutil.move(os.path.join(srctree, 'lockdown.json'), tmpfile)
- extravalues.setdefault('extrafiles', {})
- extravalues['extrafiles']['lockdown.json'] = tmpfile
- lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
-
- def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
- import scriptutils
- # If this isn't a single module we need to get the dependencies
- # and add them to SRC_URI
- def varfunc(varname, origvalue, op, newlines):
- if varname == 'SRC_URI':
- if not origvalue.startswith('npm://'):
- src_uri = origvalue.split()
- deplist = {}
- for dep, depver in optdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in devdeps.items():
- depdata = self.get_npm_data(dep, depver, d)
- if self.check_npm_optional_dependency(depdata):
- deplist[dep] = depdata
- for dep, depver in deps.items():
- depdata = self.get_npm_data(dep, depver, d)
- deplist[dep] = depdata
-
- extra_urls = []
- for dep, depdata in deplist.items():
- version = depdata.get('version', None)
- if version:
- url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
- extra_urls.append(url)
- if extra_urls:
- scriptutils.fetch_url(tinfoil, ' '.join(extra_urls), None, srctree, logger)
- src_uri.extend(extra_urls)
- return src_uri, None, -1, True
- return origvalue, None, 0, True
- updated, newlines = bb.utils.edit_metadata(lines_before, ['SRC_URI'], varfunc)
- if updated:
- del lines_before[:]
- for line in newlines:
- # Hack to avoid newlines that edit_metadata inserts
- if line.endswith('\n'):
- line = line[:-1]
- lines_before.append(line)
- return updated
+ @staticmethod
+ def _npm_global_configs(dev):
+ """Get the npm global configuration"""
+ configs = []
+
+ if dev:
+ configs.append(("also", "development"))
+ else:
+ configs.append(("only", "production"))
+
+ configs.append(("save", "false"))
+ configs.append(("package-lock", "false"))
+ configs.append(("shrinkwrap", "false"))
+ return configs
+
+ def _run_npm_install(self, d, srctree, registry, dev):
+ """Run the 'npm install' command without building the addons"""
+ configs = self._npm_global_configs(dev)
+ configs.append(("ignore-scripts", "true"))
+
+ if registry:
+ configs.append(("registry", registry))
+
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm install", workdir=srctree)
+
+ def _generate_shrinkwrap(self, d, srctree, dev):
+ """Check and generate the 'npm-shrinkwrap.json' file if needed"""
+ configs = self._npm_global_configs(dev)
+
+ env = NpmEnvironment(d, configs=configs)
+ env.run("npm shrinkwrap", workdir=srctree)
+
+ return os.path.join(srctree, "npm-shrinkwrap.json")
+
+ def _handle_licenses(self, srctree, shrinkwrap_file, dev):
+ """Return the extra license files and the list of packages"""
+ licfiles = []
+ packages = {}
+
+ # Handle the parent package
+ packages["${PN}"] = ""
+
+ def _licfiles_append_fallback_readme_files(destdir):
+ """Append README files as fallback to license files if a license files is missing"""
+
+ fallback = True
+ readmes = []
+ basedir = os.path.join(srctree, destdir)
+ for fn in os.listdir(basedir):
+ upper = fn.upper()
+ if upper.startswith("README"):
+ fullpath = os.path.join(basedir, fn)
+ readmes.append(fullpath)
+ if upper.startswith("COPYING") or "LICENCE" in upper or "LICENSE" in upper:
+ fallback = False
+ if fallback:
+ for readme in readmes:
+ licfiles.append(os.path.relpath(readme, srctree))
+
+ # Handle the dependencies
+ def _handle_dependency(name, params, destdir):
+ deptree = destdir.split('node_modules/')
+ suffix = "-".join([npm_package(dep) for dep in deptree])
+ packages["${PN}" + suffix] = destdir
+ _licfiles_append_fallback_readme_files(destdir)
+
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ foreach_dependencies(shrinkwrap, _handle_dependency, dev)
+
+ return licfiles, packages
+
+ # Handle the peer dependencies
+ def _handle_peer_dependency(self, shrinkwrap_file):
+ """Check if package has peer dependencies and show warning if it is the case"""
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ packages = shrinkwrap.get("packages", {})
+ peer_deps = packages.get("", {}).get("peerDependencies", {})
+
+ for peer_dep in peer_deps:
+ peer_dep_yocto_name = npm_package(peer_dep)
+ bb.warn(peer_dep + " is a peer dependencie of the actual package. " +
+ "Please add this peer dependencie to the RDEPENDS variable as %s and generate its recipe with devtool"
+ % peer_dep_yocto_name)
+
+
def process(self, srctree, classes, lines_before, lines_after, handled, extravalues):
- import bb.utils
- import oe.package
- from collections import OrderedDict
+ """Handle the npm recipe creation"""
- if 'buildsystem' in handled:
+ if "buildsystem" in handled:
return False
- def read_package_json(fn):
- with open(fn, 'r', errors='surrogateescape') as f:
- return json.loads(f.read())
+ files = RecipeHandler.checkfiles(srctree, ["package.json"])
- files = RecipeHandler.checkfiles(srctree, ['package.json'])
- if files:
- d = bb.data.createCopy(tinfoil.config_data)
- npm_bindir = self._ensure_npm()
- if not npm_bindir:
- sys.exit(14)
- d.prependVar('PATH', '%s:' % npm_bindir)
-
- data = read_package_json(files[0])
- if 'name' in data and 'version' in data:
- extravalues['PN'] = data['name']
- extravalues['PV'] = data['version']
- classes.append('npm')
- handled.append('buildsystem')
- if 'description' in data:
- extravalues['SUMMARY'] = data['description']
- if 'homepage' in data:
- extravalues['HOMEPAGE'] = data['homepage']
-
- fetchdev = extravalues['fetchdev'] or None
- deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
- self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
-
- # Shrinkwrap
- localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
- self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Lockdown
- self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
-
- # Split each npm module out to is own package
- npmpackages = oe.package.npm_split_package_dirs(srctree)
- licvalues = None
- for item in handled:
- if isinstance(item, tuple):
- if item[0] == 'license':
- licvalues = item[1]
- break
- if not licvalues:
- licvalues = handle_license_vars(srctree, lines_before, handled, extravalues, d)
- if licvalues:
- # Augment the license list with information we have in the packages
- licenses = {}
- license = self._handle_license(data)
- if license:
- licenses['${PN}'] = license
- for pkgname, pkgitem in npmpackages.items():
- _, pdata = pkgitem
- license = self._handle_license(pdata)
- if license:
- licenses[pkgname] = license
- # Now write out the package-specific license values
- # We need to strip out the json data dicts for this since split_pkg_licenses
- # isn't expecting it
- packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
- packages['${PN}'] = ''
- pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
- all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
- if '&' in all_licenses:
- all_licenses.remove('&')
- extravalues['LICENSE'] = ' & '.join(all_licenses)
-
- # Need to move S setting after inherit npm
- for i, line in enumerate(lines_before):
- if line.startswith('S ='):
- lines_before.pop(i)
- lines_after.insert(0, '# Must be set after inherit npm since that itself sets S')
- lines_after.insert(1, line)
- break
-
- return True
-
- return False
-
- # FIXME this is duplicated from lib/bb/fetch2/npm.py
- def _parse_view(self, output):
- '''
- Parse the output of npm view --json; the last JSON result
- is assumed to be the one that we're interested in.
- '''
- pdata = None
- outdeps = {}
- datalines = []
- bracelevel = 0
- for line in output.splitlines():
- if bracelevel:
- datalines.append(line)
- elif '{' in line:
- datalines = []
- datalines.append(line)
- bracelevel = bracelevel + line.count('{') - line.count('}')
- if datalines:
- pdata = json.loads('\n'.join(datalines))
- return pdata
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_data(self, pkg, version, d):
- import bb.fetch2
- pkgfullname = pkg
- if version != '*' and not '/' in version:
- pkgfullname += "@'%s'" % version
- logger.debug(2, "Calling getdeps on %s" % pkg)
- runenv = dict(os.environ, PATH=d.getVar('PATH'))
- fetchcmd = "npm view %s --json" % pkgfullname
- output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
- data = self._parse_view(output)
- return data
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def get_npm_package_dependencies(self, pdata, fetchdev):
- dependencies = pdata.get('dependencies', {})
- optionalDependencies = pdata.get('optionalDependencies', {})
- dependencies.update(optionalDependencies)
- if fetchdev:
- devDependencies = pdata.get('devDependencies', {})
- dependencies.update(devDependencies)
- else:
- devDependencies = {}
- depsfound = {}
- optdepsfound = {}
- devdepsfound = {}
- for dep in dependencies:
- if dep in optionalDependencies:
- optdepsfound[dep] = dependencies[dep]
- elif dep in devDependencies:
- devdepsfound[dep] = dependencies[dep]
- else:
- depsfound[dep] = dependencies[dep]
- return depsfound, optdepsfound, devdepsfound
-
- # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
- # (split out from _getdependencies())
- def check_npm_optional_dependency(self, pdata):
- pkg_os = pdata.get('os', None)
- if pkg_os:
- if not isinstance(pkg_os, list):
- pkg_os = [pkg_os]
- blacklist = False
- for item in pkg_os:
- if item.startswith('!'):
- blacklist = True
- break
- if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
- pkg = pdata.get('name', 'Unnamed package')
- logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
- return False
- return True
+ if not files:
+ return False
+
+ with open(files[0], "r") as f:
+ data = json.load(f)
+ if "name" not in data or "version" not in data:
+ return False
+
+ extravalues["PN"] = npm_package(data["name"])
+ extravalues["PV"] = data["version"]
+
+ if "description" in data:
+ extravalues["SUMMARY"] = data["description"]
+
+ if "homepage" in data:
+ extravalues["HOMEPAGE"] = data["homepage"]
+
+ dev = bb.utils.to_boolean(str(extravalues.get("NPM_INSTALL_DEV", "0")), False)
+ registry = self._get_registry(lines_before)
+
+ bb.note("Checking if npm is available ...")
+ # The native npm is used here (and not the host one) to ensure that the
+ # npm version is high enough to ensure an efficient dependency tree
+ # resolution and avoid issue with the shrinkwrap file format.
+ # Moreover the native npm is mandatory for the build.
+ bindir = self._ensure_npm()
+
+ d = bb.data.createCopy(TINFOIL.config_data)
+ d.prependVar("PATH", bindir + ":")
+ d.setVar("S", srctree)
+
+ bb.note("Generating shrinkwrap file ...")
+ # To generate the shrinkwrap file the dependencies have to be installed
+ # first. During the generation process some files may be updated /
+ # deleted. By default devtool tracks the diffs in the srctree and raises
+ # errors when finishing the recipe if some diffs are found.
+ git_exclude_file = os.path.join(srctree, ".git", "info", "exclude")
+ if os.path.exists(git_exclude_file):
+ with open(git_exclude_file, "r+") as f:
+ lines = f.readlines()
+ for line in ["/node_modules/", "/npm-shrinkwrap.json"]:
+ if line not in lines:
+ f.write(line + "\n")
+
+ lock_file = os.path.join(srctree, "package-lock.json")
+ lock_copy = lock_file + ".copy"
+ if os.path.exists(lock_file):
+ bb.utils.copyfile(lock_file, lock_copy)
+
+ self._run_npm_install(d, srctree, registry, dev)
+ shrinkwrap_file = self._generate_shrinkwrap(d, srctree, dev)
+
+ with open(shrinkwrap_file, "r") as f:
+ shrinkwrap = json.load(f)
+
+ if os.path.exists(lock_copy):
+ bb.utils.movefile(lock_copy, lock_file)
+
+ # Add the shrinkwrap file as 'extrafiles'
+ shrinkwrap_copy = shrinkwrap_file + ".copy"
+ bb.utils.copyfile(shrinkwrap_file, shrinkwrap_copy)
+ extravalues.setdefault("extrafiles", {})
+ extravalues["extrafiles"]["npm-shrinkwrap.json"] = shrinkwrap_copy
+
+ url_local = "npmsw://%s" % shrinkwrap_file
+ url_recipe= "npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json"
+
+ if dev:
+ url_local += ";dev=1"
+ url_recipe += ";dev=1"
+
+ # Add the npmsw url in the SRC_URI of the generated recipe
+ def _handle_srcuri(varname, origvalue, op, newlines):
+ """Update the version value and add the 'npmsw://' url"""
+ value = origvalue.replace("version=" + data["version"], "version=${PV}")
+ value = value.replace("version=latest", "version=${PV}")
+ values = [line.strip() for line in value.strip('\n').splitlines()]
+ if "dependencies" in shrinkwrap.get("packages", {}).get("", {}):
+ values.append(url_recipe)
+ return values, None, 4, False
+
+ (_, newlines) = bb.utils.edit_metadata(lines_before, ["SRC_URI"], _handle_srcuri)
+ lines_before[:] = [line.rstrip('\n') for line in newlines]
+
+ # In order to generate correct licence checksums in the recipe the
+ # dependencies have to be fetched again using the npmsw url
+ bb.note("Fetching npm dependencies ...")
+ bb.utils.remove(os.path.join(srctree, "node_modules"), recurse=True)
+ fetcher = bb.fetch2.Fetch([url_local], d)
+ fetcher.download()
+ fetcher.unpack(srctree)
+
+ bb.note("Handling licences ...")
+ (licfiles, packages) = self._handle_licenses(srctree, shrinkwrap_file, dev)
+
+ def _guess_odd_license(licfiles):
+ import bb
+
+ md5sums = get_license_md5sums(d, linenumbers=True)
+
+ chksums = []
+ licenses = []
+ for licfile in licfiles:
+ f = os.path.join(srctree, licfile)
+ md5value = bb.utils.md5_file(f)
+ (license, beginline, endline, md5) = md5sums.get(md5value,
+ (None, "", "", ""))
+ if not license:
+ license = "Unknown"
+ logger.info("Please add the following line for '%s' to a "
+ "'lib/recipetool/licenses.csv' and replace `Unknown`, "
+ "`X`, `Y` and `MD5` with the license, begin line, "
+ "end line and partial MD5 checksum:\n" \
+ "%s,Unknown,X,Y,MD5" % (licfile, md5value))
+ chksums.append("file://%s%s%s;md5=%s" % (licfile,
+ ";beginline=%s" % (beginline) if beginline else "",
+ ";endline=%s" % (endline) if endline else "",
+ md5 if md5 else md5value))
+ licenses.append((license, licfile, md5value))
+ return (licenses, chksums)
+
+ (licenses, extravalues["LIC_FILES_CHKSUM"]) = _guess_odd_license(licfiles)
+ split_pkg_licenses([*licenses, *guess_license(srctree, d)], packages, lines_after)
+
+ classes.append("npm")
+ handled.append("buildsystem")
+
+ # Check if package has peer dependencies and inform the user
+ self._handle_peer_dependency(shrinkwrap_file)
+
+ return True
def register_recipe_handlers(handlers):
+ """Register the npm handler"""
handlers.append((NpmRecipeHandler(), 60))
diff --git a/scripts/lib/recipetool/edit.py b/scripts/lib/recipetool/edit.py
index c4789a9994..d5b980a1c0 100644
--- a/scripts/lib/recipetool/edit.py
+++ b/scripts/lib/recipetool/edit.py
@@ -6,18 +6,8 @@
#
# Copyright (C) 2018 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
@@ -44,7 +34,7 @@ def edit(args):
recipe_path = tinfoil.get_recipe_file(args.target)
appends = tinfoil.get_file_appends(recipe_path)
- return scriptutils.run_editor([recipe_path] + appends, logger)
+ return scriptutils.run_editor([recipe_path] + list(appends), logger)
def register_commands(subparsers):
diff --git a/scripts/lib/recipetool/licenses.csv b/scripts/lib/recipetool/licenses.csv
new file mode 100644
index 0000000000..80851111b3
--- /dev/null
+++ b/scripts/lib/recipetool/licenses.csv
@@ -0,0 +1,37 @@
+0636e73ff0215e8d672dc4c32c317bb3,GPL-2.0-only
+12f884d2ae1ff87c09e5b7ccc2c4ca7e,GPL-2.0-only
+18810669f13b87348459e611d31ab760,GPL-2.0-only
+252890d9eee26aab7b432e8b8a616475,LGPL-2.0-only
+2d5025d4aa3495befef8f17206a5b0a1,LGPL-2.1-only
+3214f080875748938ba060314b4f727d,LGPL-2.0-only
+385c55653886acac3821999a3ccd17b3,Artistic-1.0 | GPL-2.0-only
+393a5ca445f6965873eca0259a17f833,GPL-2.0-only
+3b83ef96387f14655fc854ddc3c6bd57,Apache-2.0
+3bf50002aefd002f49e7bb854063f7e7,LGPL-2.0-only
+4325afd396febcb659c36b49533135d4,GPL-2.0-only
+4fbd65380cdd255951079008b364516c,LGPL-2.1-only
+54c7042be62e169199200bc6477f04d1,BSD-3-Clause
+55ca817ccb7d5b5b66355690e9abc605,LGPL-2.0-only
+59530bdf33659b29e73d4adb9f9f6552,GPL-2.0-only
+5f30f0716dfdd0d91eb439ebec522ec2,LGPL-2.0-only
+6a6a8e020838b23406c81b19c1d46df6,LGPL-3.0-only
+751419260aa954499f7abaabaa882bbe,GPL-2.0-only
+7fbc338309ac38fefcd64b04bb903e34,LGPL-2.1-only
+8ca43cbc842c2336e835926c2166c28b,GPL-2.0-only
+94d55d512a9ba36caa9b7df079bae19f,GPL-2.0-only
+9ac2e7cff1ddaf48b6eab6028f23ef88,GPL-2.0-only
+9f604d8a4f8e74f4f5140845a21b6674,LGPL-2.0-only
+a6f89e2100d9b6cdffcea4f398e37343,LGPL-2.1-only
+b234ee4d69f5fce4486a80fdaf4a4263,GPL-2.0-only
+bbb461211a33b134d42ed5ee802b37ff,LGPL-2.1-only
+bfe1f75d606912a4111c90743d6c7325,MPL-1.1-only
+c93c0550bd3173f4504b2cbd8991e50b,GPL-2.0-only
+d32239bcb673463ab874e80d47fae504,GPL-3.0-only
+d7810fab7487fb0aad327b76f1be7cd7,GPL-2.0-only
+d8045f3b8f929c1cb29a1e3fd737b499,LGPL-2.1-only
+db979804f025cf55aabec7129cb671ed,LGPL-2.0-only
+eb723b61539feef013de476e68b5c50a,GPL-2.0-only
+ebb5c50ab7cab4baeffba14977030c07,GPL-2.0-only
+f27defe1e96c2e1ecd4e0c9be8967949,GPL-3.0-only
+fad9b3332be894bab9bc501572864b29,LGPL-2.1-only
+fbc093901857fcd118f065f900982c24,LGPL-2.1-only
diff --git a/scripts/lib/recipetool/newappend.py b/scripts/lib/recipetool/newappend.py
index decce83fac..08e2474dc4 100644
--- a/scripts/lib/recipetool/newappend.py
+++ b/scripts/lib/recipetool/newappend.py
@@ -7,18 +7,8 @@
#
# Copyright (C) 2015 Christopher Larson <kergoth@gmail.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import argparse
import errno
@@ -58,11 +48,11 @@ def newappend(args):
return 1
if not path_ok:
- logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
+ logger.warning('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
if not os.path.abspath(args.destlayer) in layerdirs:
- logger.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+ logger.warning('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
if not os.path.exists(append_path):
bb.utils.mkdirhier(os.path.dirname(append_path))
diff --git a/scripts/lib/recipetool/setvar.py b/scripts/lib/recipetool/setvar.py
index 9de315a0ef..b5ad335cae 100644
--- a/scripts/lib/recipetool/setvar.py
+++ b/scripts/lib/recipetool/setvar.py
@@ -2,18 +2,8 @@
#
# Copyright (C) 2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -59,6 +49,7 @@ def setvar(args):
for patch in patches:
for line in patch:
sys.stdout.write(line)
+ tinfoil.modified_files()
return 0
diff --git a/scripts/lib/resulttool/__init__.py b/scripts/lib/resulttool/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/scripts/lib/resulttool/__init__.py
diff --git a/scripts/lib/resulttool/log.py b/scripts/lib/resulttool/log.py
new file mode 100644
index 0000000000..15148ca288
--- /dev/null
+++ b/scripts/lib/resulttool/log.py
@@ -0,0 +1,107 @@
+# resulttool - Show logs
+#
+# Copyright (c) 2019 Garmin International
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+import os
+import resulttool.resultutils as resultutils
+
+def show_ptest(result, ptest, logger):
+ logdata = resultutils.ptestresult_get_log(result, ptest)
+ if logdata is not None:
+ print(logdata)
+ return 0
+
+ print("ptest '%s' log not found" % ptest)
+ return 1
+
+def show_reproducible(result, reproducible, logger):
+ try:
+ print(result['reproducible'][reproducible]['diffoscope.text'])
+ return 0
+
+ except KeyError:
+ print("reproducible '%s' not found" % reproducible)
+ return 1
+
+def log(args, logger):
+ results = resultutils.load_resultsdata(args.source)
+
+ for _, run_name, _, r in resultutils.test_run_results(results):
+ if args.list_ptest:
+ print('\n'.join(sorted(r['ptestresult.sections'].keys())))
+
+ if args.dump_ptest:
+ for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
+ if sectname in r:
+ for name, ptest in r[sectname].items():
+ logdata = resultutils.generic_get_log(sectname, r, name)
+ if logdata is not None:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+ if not sectname.startswith("ptest"):
+ dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
+
+ os.makedirs(dest_dir, exist_ok=True)
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ if os.path.exists(dest):
+ print("Overlapping ptest logs found, skipping %s. The '--prepend-run' option would avoid this" % name)
+ continue
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(logdata)
+
+ if args.raw_ptest:
+ found = False
+ for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
+ rawlog = resultutils.generic_get_rawlogs(sectname, r)
+ if rawlog is not None:
+ print(rawlog)
+ found = True
+ if not found:
+ print('Raw ptest logs not found')
+ return 1
+
+ if args.raw_reproducible:
+ if 'reproducible.rawlogs' in r:
+ print(r['reproducible.rawlogs']['log'])
+ else:
+ print('Raw reproducible logs not found')
+ return 1
+
+ for ptest in args.ptest:
+ if not show_ptest(r, ptest, logger):
+ return 1
+
+ for reproducible in args.reproducible:
+ if not show_reproducible(r, reproducible, logger):
+ return 1
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser = subparsers.add_parser('log', help='show logs',
+ description='show the logs from test results',
+ group='analysis')
+ parser.set_defaults(func=log)
+ parser.add_argument('source',
+ help='the results file/directory/URL to import')
+ parser.add_argument('--list-ptest', action='store_true',
+ help='list the ptest test names')
+ parser.add_argument('--ptest', action='append', default=[],
+ help='show logs for a ptest')
+ parser.add_argument('--dump-ptest', metavar='DIR',
+ help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--reproducible', action='append', default=[],
+ help='show logs for a reproducible test')
+ parser.add_argument('--prepend-run', action='store_true',
+ help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
+ Required if more than one test run is present in the result file''')
+ parser.add_argument('--raw', action='store_true',
+ help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
+ parser.add_argument('--raw-ptest', action='store_true',
+ help='show raw ptest log')
+ parser.add_argument('--raw-reproducible', action='store_true',
+ help='show raw reproducible build logs')
+
diff --git a/scripts/lib/resulttool/manualexecution.py b/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000000..ecb27c5933
--- /dev/null
+++ b/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,235 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+import copy
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(f):
+ with open(f, "r") as filedata:
+ return json.load(filedata)
+
+def write_json_file(f, json_data):
+ os.makedirs(os.path.dirname(f), exist_ok=True)
+ with open(f, 'w') as filedata:
+ filedata.write(json.dumps(json_data, sort_keys=True, indent=4))
+
+class ManualTestRunner(object):
+
+ def _get_test_module(self, case_file):
+ return os.path.basename(case_file).split('.')[0]
+
+ def _get_input(self, config):
+ while True:
+ output = input('{} = '.format(config))
+ if re.match('^[a-z0-9-.]+$', output):
+ break
+ print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
+ return output
+
+ def _get_available_config_options(self, config_options, test_module, target_config):
+ avail_config_options = None
+ if test_module in config_options:
+ avail_config_options = config_options[test_module].get(target_config)
+ return avail_config_options
+
+ def _choose_config_option(self, options):
+ while True:
+ output = input('{} = '.format('Option index number'))
+ if output in options:
+ break
+ print('Only integer index inputs from above available configuration options are allowed. Please try again.')
+ return options[output]
+
+ def _get_config(self, config_options, test_module):
+ from oeqa.utils.metadata import get_layers
+ from oeqa.utils.commands import get_bb_var
+ from resulttool.resultutils import store_map
+
+ layers = get_layers(get_bb_var('BBLAYERS'))
+ configurations = {}
+ configurations['LAYERS'] = layers
+ configurations['STARTTIME'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ configurations['TEST_TYPE'] = 'manual'
+ configurations['TEST_MODULE'] = test_module
+
+ extra_config = set(store_map['manual']) - set(configurations)
+ for config in sorted(extra_config):
+ avail_config_options = self._get_available_config_options(config_options, test_module, config)
+ if avail_config_options:
+ print('---------------------------------------------')
+ print('These are available configuration #%s options:' % config)
+ print('---------------------------------------------')
+ for option, _ in sorted(avail_config_options.items(), key=lambda x: int(x[0])):
+ print('%s: %s' % (option, avail_config_options[option]))
+ print('Please select configuration option, enter the integer index number.')
+ value_conf = self._choose_config_option(avail_config_options)
+ print('---------------------------------------------\n')
+ else:
+ print('---------------------------------------------')
+ print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
+ print('---------------------------------------------')
+ value_conf = self._get_input('Configuration Value')
+ print('---------------------------------------------\n')
+ configurations[config] = value_conf
+ return configurations
+
+ def _execute_test_steps(self, case):
+ test_result = {}
+ print('------------------------------------------------------------------------')
+ print('Executing test case: %s' % case['test']['@alias'])
+ print('------------------------------------------------------------------------')
+ print('You have total %s test steps to be executed.' % len(case['test']['execution']))
+ print('------------------------------------------------------------------------\n')
+ for step, _ in sorted(case['test']['execution'].items(), key=lambda x: int(x[0])):
+ print('Step %s: %s' % (step, case['test']['execution'][step]['action']))
+ expected_output = case['test']['execution'][step]['expected_results']
+ if expected_output:
+ print('Expected output: %s' % expected_output)
+ while True:
+ done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
+ result_types = {'p':'PASSED',
+ 'f':'FAILED',
+ 'b':'BLOCKED',
+ 's':'SKIPPED'}
+ if done in result_types:
+ for r in result_types:
+ if done == r:
+ res = result_types[r]
+ if res == 'FAILED':
+ log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+ test_result.update({case['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
+ else:
+ test_result.update({case['test']['@alias']: {'status': '%s' % res}})
+ break
+ print('Invalid input!')
+ return test_result
+
+ def _get_write_dir(self):
+ return os.environ['BUILDDIR'] + '/tmp/log/manual/'
+
+ def run_test(self, case_file, config_options_file, testcase_config_file):
+ test_module = self._get_test_module(case_file)
+ cases = load_json_file(case_file)
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ configurations = self._get_config(config_options, test_module)
+ result_id = 'manual_%s_%s' % (test_module, configurations['STARTTIME'])
+ test_results = {}
+ if testcase_config_file:
+ test_case_config = load_json_file(testcase_config_file)
+ test_case_to_execute = test_case_config['testcases']
+ for case in copy.deepcopy(cases) :
+ if case['test']['@alias'] not in test_case_to_execute:
+ cases.remove(case)
+
+ print('\nTotal number of test cases in this test suite: %s\n' % len(cases))
+ for c in cases:
+ test_result = self._execute_test_steps(c)
+ test_results.update(test_result)
+ return configurations, result_id, self._get_write_dir(), test_results
+
+ def _get_true_false_input(self, input_message):
+ yes_list = ['Y', 'YES']
+ no_list = ['N', 'NO']
+ while True:
+ more_config_option = input(input_message).upper()
+ if more_config_option in yes_list or more_config_option in no_list:
+ break
+ print('Invalid input!')
+ if more_config_option in no_list:
+ return False
+ return True
+
+ def make_config_option_file(self, logger, case_file, config_options_file):
+ config_options = {}
+ if config_options_file:
+ config_options = load_json_file(config_options_file)
+ new_test_module = self._get_test_module(case_file)
+ print('Creating configuration options file for test module: %s' % new_test_module)
+ new_config_options = {}
+
+ while True:
+ config_name = input('\nPlease provide test configuration to create:\n').upper()
+ new_config_options[config_name] = {}
+ while True:
+ config_value = self._get_input('Configuration possible option value')
+ config_option_index = len(new_config_options[config_name]) + 1
+ new_config_options[config_name][config_option_index] = config_value
+ more_config_option = self._get_true_false_input('\nIs there more configuration option input: (Y)es/(N)o\n')
+ if not more_config_option:
+ break
+ more_config = self._get_true_false_input('\nIs there more configuration to create: (Y)es/(N)o\n')
+ if not more_config:
+ break
+
+ if new_config_options:
+ config_options[new_test_module] = new_config_options
+ if not config_options_file:
+ config_options_file = os.path.join(self._get_write_dir(), 'manual_config_options.json')
+ write_json_file(config_options_file, config_options)
+ logger.info('Configuration option file created at %s' % config_options_file)
+
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
+def manualexecution(args, logger):
+ testrunner = ManualTestRunner()
+ if args.make_config_options_file:
+ testrunner.make_config_option_file(logger, args.file, args.config_options_file)
+ return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
+ configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
+ resultjsonhelper = OETestResultJSONHelper()
+ resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+ description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+ group='manualexecution')
+ parser_build.set_defaults(func=manualexecution)
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
+ parser_build.add_argument('-c', '--config-options-file', default='',
+ help='the config options file to import and used as available configuration option selection or make config option file')
+ parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
+ help='make the configuration options file based on provided inputs')
+ parser_build.add_argument('-t', '--testcase-config-file', default='',
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/scripts/lib/resulttool/merge.py b/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000000..18b4825a18
--- /dev/null
+++ b/scripts/lib/resulttool/merge.py
@@ -0,0 +1,46 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories/URLs',
+ description='merge the results from multiple files/directories/URLs into the target file or directory',
+ group='setup')
+ parser_build.set_defaults(func=merge)
+ parser_build.add_argument('base_results',
+ help='the results file/directory/URL to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/scripts/lib/resulttool/regression.py b/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000000..10e7d13841
--- /dev/null
+++ b/scripts/lib/resulttool/regression.py
@@ -0,0 +1,447 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import resulttool.resultutils as resultutils
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+METADATA_MATCH_TABLE = {
+ "oeselftest": "OESELFTEST_METADATA"
+}
+
+OESELFTEST_METADATA_GUESS_TABLE={
+ "trigger-build-posttrigger": {
+ "run_all_tests": False,
+ "run_tests":["buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "reproducible": {
+ "run_all_tests": False,
+ "run_tests":["reproducible"],
+ "skips": None,
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": None
+ },
+ "arch-qemu-quick": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-x86-or-x86_64": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-system"],
+ "exclude_tags": None
+ },
+ "arch-qemu-full-others": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": None,
+ "machine": None,
+ "select_tags":["machine", "toolchain-user"],
+ "exclude_tags": None
+ },
+ "selftest": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ },
+ "bringup": {
+ "run_all_tests": True,
+ "run_tests":None,
+ "skips": ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"],
+ "machine": None,
+ "select_tags":None,
+ "exclude_tags": ["machine", "toolchain-system", "toolchain-user"]
+ }
+}
+
+STATUS_STRINGS = {
+ "None": "No matching test result"
+}
+
+REGRESSIONS_DISPLAY_LIMIT=50
+
+MISSING_TESTS_BANNER = "-------------------------- Missing tests --------------------------"
+ADDITIONAL_DATA_BANNER = "--------------------- Matches and improvements --------------------"
+
+def test_has_at_least_one_matching_tag(test, tag_list):
+ return "oetags" in test and any(oetag in tag_list for oetag in test["oetags"])
+
+def all_tests_have_at_least_one_matching_tag(results, tag_list):
+ return all(test_has_at_least_one_matching_tag(test_result, tag_list) or test_name.startswith("ptestresult") for (test_name, test_result) in results.items())
+
+def any_test_have_any_matching_tag(results, tag_list):
+ return any(test_has_at_least_one_matching_tag(test, tag_list) for test in results.values())
+
+def have_skipped_test(result, test_prefix):
+ return all( result[test]['status'] == "SKIPPED" for test in result if test.startswith(test_prefix))
+
+def have_all_tests_skipped(result, test_prefixes_list):
+ return all(have_skipped_test(result, test_prefix) for test_prefix in test_prefixes_list)
+
+def guess_oeselftest_metadata(results):
+ """
+ When an oeselftest test result is lacking OESELFTEST_METADATA, we can try to guess it based on results content.
+ Check results for specific values (absence/presence of oetags, number and name of executed tests...),
+ and if it matches one of known configuration from autobuilder configuration, apply guessed OSELFTEST_METADATA
+ to it to allow proper test filtering.
+ This guessing process is tightly coupled to config.json in autobuilder. It should trigger less and less,
+ as new tests will have OESELFTEST_METADATA properly appended at test reporting time
+ """
+
+ if len(results) == 1 and "buildoptions.SourceMirroring.test_yocto_source_mirror" in results:
+ return OESELFTEST_METADATA_GUESS_TABLE['trigger-build-posttrigger']
+ elif all(result.startswith("reproducible") for result in results):
+ return OESELFTEST_METADATA_GUESS_TABLE['reproducible']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-quick']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-system"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-x86-or-x86_64']
+ elif all_tests_have_at_least_one_matching_tag(results, ["machine", "toolchain-user"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['arch-qemu-full-others']
+ elif not any_test_have_any_matching_tag(results, ["machine", "toolchain-user", "toolchain-system"]):
+ if have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror", "reproducible"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['selftest']
+ elif have_all_tests_skipped(results, ["distrodata.Distrodata.test_checkpkg", "buildoptions.SourceMirroring.test_yocto_source_mirror"]):
+ return OESELFTEST_METADATA_GUESS_TABLE['bringup']
+
+ return None
+
+
+def metadata_matches(base_configuration, target_configuration):
+ """
+ For passed base and target, check test type. If test type matches one of
+ properties described in METADATA_MATCH_TABLE, compare metadata if it is
+ present in base. Return true if metadata matches, or if base lacks some
+ data (either TEST_TYPE or the corresponding metadata)
+ """
+ test_type = base_configuration.get('TEST_TYPE')
+ if test_type not in METADATA_MATCH_TABLE:
+ return True
+
+ metadata_key = METADATA_MATCH_TABLE.get(test_type)
+ if target_configuration.get(metadata_key) != base_configuration.get(metadata_key):
+ return False
+
+ return True
+
+
+def machine_matches(base_configuration, target_configuration):
+ return base_configuration.get('MACHINE') == target_configuration.get('MACHINE')
+
+
+def can_be_compared(logger, base, target):
+ """
+ Some tests are not relevant to be compared, for example some oeselftest
+ run with different tests sets or parameters. Return true if tests can be
+ compared
+ """
+ ret = True
+ base_configuration = base['configuration']
+ target_configuration = target['configuration']
+
+ # Older test results lack proper OESELFTEST_METADATA: if not present, try to guess it based on tests results.
+ if base_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in base_configuration:
+ guess = guess_oeselftest_metadata(base['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {base_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {base_configuration['STARTTIME']} with {guess}")
+ base_configuration['OESELFTEST_METADATA'] = guess
+ if target_configuration.get('TEST_TYPE') == 'oeselftest' and 'OESELFTEST_METADATA' not in target_configuration:
+ guess = guess_oeselftest_metadata(target['result'])
+ if guess is None:
+ logger.error(f"ERROR: did not manage to guess oeselftest metadata for {target_configuration['STARTTIME']}")
+ else:
+ logger.debug(f"Enriching {target_configuration['STARTTIME']} with {guess}")
+ target_configuration['OESELFTEST_METADATA'] = guess
+
+ # Test runs with LTP results in should only be compared with other runs with LTP tests in them
+ if base_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in base['result']):
+ ret = target_configuration.get('TEST_TYPE') == 'runtime' and any(result.startswith("ltpresult") for result in target['result'])
+
+ return ret and metadata_matches(base_configuration, target_configuration) \
+ and machine_matches(base_configuration, target_configuration)
+
+def get_status_str(raw_status):
+ raw_status_lower = raw_status.lower() if raw_status else "None"
+ return STATUS_STRINGS.get(raw_status_lower, raw_status)
+
+def get_additional_info_line(new_pass_count, new_tests):
+ result=[]
+ if new_tests:
+ result.append(f'+{new_tests} test(s) present')
+ if new_pass_count:
+ result.append(f'+{new_pass_count} test(s) now passing')
+
+ if not result:
+ return ""
+
+ return ' -> ' + ', '.join(result) + '\n'
+
+def compare_result(logger, base_name, target_name, base_result, target_result, display_limit=None):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ new_tests = 0
+ regressions = {}
+ resultstring = ""
+ new_tests = 0
+ new_pass_count = 0
+
+ display_limit = int(display_limit) if display_limit else REGRESSIONS_DISPLAY_LIMIT
+
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+
+ # Also count new tests that were not present in base results: it
+ # could be newly added tests, but it could also highlights some tests
+ # renames or fixed faulty ptests
+ for k in target_result:
+ if k not in base_result:
+ new_tests += 1
+ if result:
+ new_pass_count = sum(test['target'] is not None and test['target'].startswith("PASS") for test in result.values())
+ # Print a regression report only if at least one test has a regression status (FAIL, SKIPPED, absent...)
+ if new_pass_count < len(result):
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ if not result[k]['target'] or not result[k]['target'].startswith("PASS"):
+ # Differentiate each ptest kind when listing regressions
+ key_parts = k.split('.')
+ key = '.'.join(key_parts[:2]) if k.startswith('ptest') else key_parts[0]
+ # Append new regression to corresponding test family
+ regressions[key] = regressions.setdefault(key, []) + [' %s: %s -> %s\n' % (k, get_status_str(result[k]['base']), get_status_str(result[k]['target']))]
+ resultstring += f" Total: {sum([len(regressions[r]) for r in regressions])} new regression(s):\n"
+ for k in regressions:
+ resultstring += f" {len(regressions[k])} regression(s) for {k}\n"
+ count_to_print=min([display_limit, len(regressions[k])]) if display_limit > 0 else len(regressions[k])
+ resultstring += ''.join(regressions[k][:count_to_print])
+ if count_to_print < len(regressions[k]):
+ resultstring+=' [...]\n'
+ if new_pass_count > 0:
+ resultstring += f' Additionally, {new_pass_count} previously failing test(s) is/are now passing\n'
+ if new_tests > 0:
+ resultstring += f' Additionally, {new_tests} new test(s) is/are present\n'
+ else:
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+ result = None
+ else:
+ resultstring = "%s\n%s\n" % (base_name, target_name)
+
+ if not result:
+ additional_info = get_additional_info_line(new_pass_count, new_tests)
+ if additional_info:
+ resultstring += additional_info
+
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+# Some test case naming is poor and contains random strings, particularly lttng/babeltrace.
+# Truncating the test names works since they contain file and line number identifiers
+# which allows us to match them without the random components.
+def fixup_ptest_names(results, logger):
+ for r in results:
+ for i in results[r]:
+ tests = list(results[r][i]['result'].keys())
+ for test in tests:
+ new = None
+ if test.startswith(("ptestresult.lttng-tools.", "ptestresult.babeltrace.", "ptestresult.babeltrace2")) and "_-_" in test:
+ new = test.split("_-_")[0]
+ elif test.startswith(("ptestresult.curl.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith(("ptestresult.dbus.")) and "__" in test:
+ new = test.split("__")[0]
+ elif test.startswith("ptestresult.binutils") and "build-st-" in test:
+ new = test.split(" ")[0]
+ elif test.startswith("ptestresult.gcc") and "/tmp/runtest." in test:
+ new = ".".join(test.split(".")[:2])
+ if new:
+ results[r][i]['result'][new] = results[r][i]['result'][test]
+ del results[r][i]['result'][test]
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ fixup_ptest_names(base_results, logger)
+ fixup_ptest_names(target_results, logger)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ if not can_be_compared(logger, base_results[a][c], target_results[a][b]):
+ continue
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b], args.limit)
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(sorted(regressions)))
+ print("\n" + MISSING_TESTS_BANNER + "\n")
+ print("\n".join(sorted(notfound)))
+ print("\n" + ADDITIONAL_DATA_BANNER + "\n")
+ print("\n".join(sorted(matches)))
+ return 0
+
+def regression_git(args, logger):
+ base_results = {}
+ target_results = {}
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory/URL for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory/URL to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+ description='regression analysis comparing base result set to target '
+ 'result set',
+ group='analysis')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) default select regression based on configurations unless base result '
+ 'id was provided')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) default select regression based on configurations unless target result '
+ 'id was provided')
+
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+ parser_build.add_argument('-l', '--limit', default=REGRESSIONS_DISPLAY_LIMIT, help="Maximum number of changes to display per test. Can be set to 0 to print all changes")
+
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000000..a349510ab8
--- /dev/null
+++ b/scripts/lib/resulttool/report.py
@@ -0,0 +1,315 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.ltptests = {}
+ self.ltpposixtests = {}
+ self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
+ 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
+ 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
+
+
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
+
+ if k == 'ptestresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ptestresult.sections']:
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[machine][suite]['duration'] += " T"
+ return True
+
+ # process test result
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return True
+
+ # Handle 'glib-2.0'
+ if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+
+ # do not process duplicate results
+ if test in self.ptests[machine][suite]["testcases"]:
+ print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
+ return False
+
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[machine][suite][tk] += 1
+ self.ptests[machine][suite]["testcases"].add(test)
+ return True
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
+
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
+
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[machine][suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult, machine):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ result = testresult.get('result', [])
+ for k in result:
+ test_status = result[k].get('status', [])
+ if k.startswith("ptestresult."):
+ if not self.handle_ptest_result(k, test_status, result, machine):
+ continue
+ elif k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result, machine)
+ elif k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
+
+ # process result if it was not skipped by a handler
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
+ test_count_report[tk] += 1
+ if test_status in self.result_types['failed']:
+ test_count_report['failed_testcases'].append(k)
+ return test_count_report
+
+ def print_test_report(self, template_file_name, test_count_reports):
+ from jinja2 import Environment, FileSystemLoader
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ template = env.get_template(template_file_name)
+ havefailed = False
+ reportvalues = []
+ machines = []
+ cols = ['passed', 'failed', 'skipped']
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
+ for line in test_count_reports:
+ total_tested = line['passed'] + line['failed'] + line['skipped']
+ vals = {}
+ vals['result_id'] = line['result_id']
+ vals['testseries'] = line['testseries']
+ vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
+ for k in cols:
+ if total_tested:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ else:
+ vals[k] = "0 (0%)"
+ for k in maxlen:
+ if k in vals and len(vals[k]) > maxlen[k]:
+ maxlen[k] = len(vals[k])
+ reportvalues.append(vals)
+ if line['failed_testcases']:
+ havefailed = True
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ reporttotalvalues = {}
+ for k in cols:
+ reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
+ reporttotalvalues['count'] = '%s' % len(test_count_reports)
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
+ output = template.render(reportvalues=reportvalues,
+ reporttotalvalues=reporttotalvalues,
+ havefailed=havefailed,
+ machines=machines,
+ ptests=self.ptests,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
+ maxlen=maxlen)
+ print(output)
+
+ def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
+ def print_selected_testcase_result(testresults, selected_test_case_only):
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]['result']
+ test_case_result = result.get(selected_test_case_only, {})
+ if test_case_result.get('status'):
+ print('Found selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ print(test_case_result['status'])
+ else:
+ print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ if test_case_result.get('log'):
+ print(test_case_result['log'])
+ test_count_reports = []
+ configmap = resultutils.store_map
+ if use_regression_map:
+ configmap = resultutils.regression_map
+ if commit:
+ if tag:
+ logger.warning("Ignoring --tag as --commit was specified")
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(source_dir)
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+ rev_index = gitarchive.rev_find(revs, 'commit', commit)
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
+ elif tag:
+ repo = GitRepo(source_dir)
+ testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
+ else:
+ testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
+ if raw_test:
+ raw_results = {}
+ for testsuite in testresults:
+ result = testresults[testsuite].get(raw_test, {})
+ if result:
+ raw_results[testsuite] = {raw_test: result}
+ if raw_results:
+ if selected_test_case_only:
+ print_selected_testcase_result(raw_results, selected_test_case_only)
+ else:
+ print(json.dumps(raw_results, sort_keys=True, indent=4))
+ else:
+ print('Could not find raw test result for %s' % raw_test)
+ return 0
+ if selected_test_case_only:
+ print_selected_testcase_result(testresults, selected_test_case_only)
+ return 0
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ skip = False
+ result = testresults[testsuite][resultid]
+ machine = result['configuration']['MACHINE']
+
+ # Check to see if there is already results for these kinds of tests for the machine
+ for key in result['result'].keys():
+ testtype = str(key).split('.')[0]
+ if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
+ (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
+ print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
+ skip = True
+ break
+ if skip:
+ break
+
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
+ test_count_report['testseries'] = result['configuration']['TESTSERIES']
+ test_count_report['result_id'] = resultid
+ test_count_reports.append(test_count_report)
+ self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+ report = ResultsTextReport()
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
+ args.raw_test_only, args.selected_test_case_only)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('report', help='summarise test results',
+ description='print a text-based summary of the test results',
+ group='analysis')
+ parser_build.set_defaults(func=report)
+ parser_build.add_argument('source_dir',
+ help='source file/directory/URL that contain the test result files to summarise')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--commit', help="Revision to report")
+ parser_build.add_argument('-t', '--tag', default='',
+ help='source_dir is a git repository, report on the tag specified from that repository')
+ parser_build.add_argument('-m', '--use_regression_map', action='store_true',
+ help='instead of the default "store_map", use the "regression_map" for report')
+ parser_build.add_argument('-r', '--raw_test_only', default='',
+ help='output raw test result only for the user provided test result id')
+ parser_build.add_argument('-s', '--selected_test_case_only', default='',
+ help='output selected test case result for the user provided test case id, if both test '
+ 'result id and test case id are provided then output the selected test case result '
+ 'from the provided test result id')
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000000..c5521d81bd
--- /dev/null
+++ b/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,228 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import base64
+import zlib
+import json
+import scriptpath
+import copy
+import urllib.request
+import posixpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+ "oeselftest": [],
+ "runtime": [],
+ "sdk": [],
+ "sdkext": [],
+ "manual": []
+}
+regression_map = {
+ "oeselftest": ['TEST_TYPE', 'MACHINE'],
+ "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+ "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+ "oeselftest": ['TEST_TYPE'],
+ "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+ "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+def is_url(p):
+ """
+ Helper for determining if the given path is a URL
+ """
+ return p.startswith('http://') or p.startswith('https://')
+
+extra_configvars = {'TESTSERIES': ''}
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
+ if type(f) is str:
+ if is_url(f):
+ with urllib.request.urlopen(f) as response:
+ data = json.loads(response.read().decode('utf-8'))
+ url = urllib.parse.urlparse(f)
+ testseries = posixpath.basename(posixpath.dirname(url.path))
+ else:
+ with open(f, "r") as filedata:
+ try:
+ data = json.load(filedata)
+ except json.decoder.JSONDecodeError:
+ print("Cannot decode {}. Possible corruption. Skipping.".format(f))
+ data = ""
+ testseries = os.path.basename(os.path.dirname(f))
+ else:
+ data = f
+ for res in data:
+ if "configuration" not in data[res] or "result" not in data[res]:
+ raise ValueError("Test results data without configuration or result section?")
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
+ testtype = data[res]["configuration"].get("TEST_TYPE")
+ if testtype not in configmap:
+ raise ValueError("Unknown test type %s" % testtype)
+ testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+ if testpath not in results:
+ results[testpath] = {}
+ results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
+ results = {}
+ if is_url(source) or os.path.isfile(source):
+ append_resultsdata(results, source, configmap, configvars)
+ return results
+ for root, dirs, files in os.walk(source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ append_resultsdata(results, f, configmap, configvars)
+ return results
+
+def filter_resultsdata(results, resultid):
+ newresults = {}
+ for r in results:
+ for i in results[r]:
+ if i == resultsid:
+ newresults[r] = {}
+ newresults[r][i] = results[r][i]
+ return newresults
+
+def strip_ptestresults(results):
+ newresults = copy.deepcopy(results)
+ #for a in newresults2:
+ # newresults = newresults2[a]
+ for res in newresults:
+ if 'result' not in newresults[res]:
+ continue
+ if 'ptestresult.rawlogs' in newresults[res]['result']:
+ del newresults[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in newresults[res]['result']:
+ for i in newresults[res]['result']['ptestresult.sections']:
+ if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
+ del newresults[res]['result']['ptestresult.sections'][i]['log']
+ return newresults
+
+def decode_log(logdata):
+ if isinstance(logdata, str):
+ return logdata
+ elif isinstance(logdata, dict):
+ if "compressed" in logdata:
+ data = logdata.get("compressed")
+ data = base64.b64decode(data.encode("utf-8"))
+ data = zlib.decompress(data)
+ return data.decode("utf-8", errors='ignore')
+ return None
+
+def generic_get_log(sectionname, results, section):
+ if sectionname not in results:
+ return None
+ if section not in results[sectionname]:
+ return None
+
+ ptest = results[sectionname][section]
+ if 'log' not in ptest:
+ return None
+ return decode_log(ptest['log'])
+
+def ptestresult_get_log(results, section):
+ return generic_get_log('ptestresult.sections', results, section)
+
+def generic_get_rawlogs(sectname, results):
+ if sectname not in results:
+ return None
+ if 'log' not in results[sectname]:
+ return None
+ return decode_log(results[sectname]['log'])
+
+def ptestresult_get_rawlogs(results):
+ return generic_get_rawlogs('ptestresult.rawlogs', results)
+
+def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
+ for res in results:
+ if res:
+ dst = destdir + "/" + res + "/" + fn
+ else:
+ dst = destdir + "/" + fn
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ resultsout = results[res]
+ if not ptestjson:
+ resultsout = strip_ptestresults(results[res])
+ with open(dst, 'w') as f:
+ f.write(json.dumps(resultsout, sort_keys=True, indent=4))
+ for res2 in results[res]:
+ if ptestlogs and 'result' in results[res][res2]:
+ seriesresults = results[res][res2]['result']
+ rawlogs = ptestresult_get_rawlogs(seriesresults)
+ if rawlogs is not None:
+ with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
+ f.write(rawlogs)
+ if 'ptestresult.sections' in seriesresults:
+ for i in seriesresults['ptestresult.sections']:
+ sectionlog = ptestresult_get_log(seriesresults, i)
+ if sectionlog is not None:
+ with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
+ f.write(sectionlog)
+
+def git_get_result(repo, tags, configmap=store_map):
+ git_objs = []
+ for tag in tags:
+ files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+ git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d))
+ return objs
+
+ # Optimize by reading all data with one git command
+ results = {}
+ for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+ append_resultsdata(results, obj, configmap=configmap)
+
+ return results
+
+def test_run_results(results):
+ """
+ Convenient generator function that iterates over all test runs that have a
+ result section.
+
+ Generates a tuple of:
+ (result json file path, test run name, test run (dict), test run "results" (dict))
+ for each test run that has a "result" section
+ """
+ for path in results:
+ for run_name, test_run in results[path].items():
+ if not 'result' in test_run:
+ continue
+ yield path, run_name, test_run, test_run['result']
+
diff --git a/scripts/lib/resulttool/store.py b/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000000..e0951f0a8f
--- /dev/null
+++ b/scripts/lib/resulttool/store.py
@@ -0,0 +1,104 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+ tempdir = tempfile.mkdtemp(prefix='testresults.')
+ try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if args.extra_test_env:
+ configvars['EXTRA_TEST_ENV'] = args.extra_test_env
+ results = {}
+ logger.info('Reading files from %s' % args.source)
+ if resultutils.is_url(args.source) or os.path.isfile(args.source):
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
+ else:
+ for root, dirs, files in os.walk(args.source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ resultutils.append_resultsdata(results, f, configvars=configvars)
+ elif args.all:
+ dst = f.replace(args.source, tempdir + "/")
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(f, dst)
+
+ revisions = {}
+
+ if not results and not args.all:
+ if args.allow_empty:
+ logger.info("No results found to store")
+ return 0
+ logger.error("No results found to store")
+ return 1
+
+ # Find the branch/commit/commit_count and ensure they all match
+ for suite in results:
+ for result in results[suite]:
+ config = results[suite][result]['configuration']['LAYERS']['meta']
+ revision = (config['commit'], config['branch'], str(config['commit_count']))
+ if revision not in revisions:
+ revisions[revision] = {}
+ if suite not in revisions[revision]:
+ revisions[revision][suite] = {}
+ revisions[revision][suite][result] = results[suite][result]
+
+ logger.info("Found %d revisions to store" % len(revisions))
+
+ for r in revisions:
+ results = revisions[r]
+ keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+ subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+ resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
+
+ logger.info('Storing test result into git repository %s' % args.git_dir)
+
+ gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+ "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+ False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+ 'Test run #{tag_number} of {branch}:{commit}', '',
+ [], [], False, keywords, logger)
+
+ finally:
+ subprocess.check_call(["rm", "-rf", tempdir])
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+ description='takes a results file or directory of results files and stores '
+ 'them into the destination git repository, splitting out the results '
+ 'files as configured',
+ group='setup')
+ parser_build.set_defaults(func=store)
+ parser_build.add_argument('source',
+ help='source file/directory/URL that contain the test result files to be stored')
+ parser_build.add_argument('git_dir',
+ help='the location of the git repository to store the results in')
+ parser_build.add_argument('-a', '--all', action='store_true',
+ help='include all files, not just testresults.json files')
+ parser_build.add_argument('-e', '--allow-empty', action='store_true',
+ help='don\'t error if no results to store are found')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
+ parser_build.add_argument('-t', '--extra-test-env', default='',
+ help='add extra test environment data to each result file configuration')
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000000..2efba2ef6f
--- /dev/null
+++ b/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,79 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+
+{% for machine in machines %}
+{% if ptests[machine] %}
+==============================================================================================================
+{{ machine }} PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltptests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+ {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}
diff --git a/scripts/lib/scriptpath.py b/scripts/lib/scriptpath.py
index d00317e18d..f32326db3a 100644
--- a/scripts/lib/scriptpath.py
+++ b/scripts/lib/scriptpath.py
@@ -3,18 +3,8 @@
# Copyright (C) 2012-2014 Intel Corporation
# Copyright (C) 2011 Mentor Graphics Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/lib/scriptutils.py b/scripts/lib/scriptutils.py
index 31e48ea4dc..f23e53cba9 100644
--- a/scripts/lib/scriptutils.py
+++ b/scripts/lib/scriptutils.py
@@ -2,20 +2,9 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import argparse
import glob
import logging
import os
@@ -26,10 +15,52 @@ import string
import subprocess
import sys
import tempfile
+import threading
+import importlib
+import importlib.machinery
+import importlib.util
+
+class KeepAliveStreamHandler(logging.StreamHandler):
+ def __init__(self, keepalive=True, **kwargs):
+ super().__init__(**kwargs)
+ if keepalive is True:
+ keepalive = 5000 # default timeout
+ self._timeout = threading.Condition()
+ self._stop = False
+
+ # background thread waits on condition, if the condition does not
+ # happen emit a keep alive message
+ def thread():
+ while not self._stop:
+ with self._timeout:
+ if not self._timeout.wait(keepalive):
+ self.emit(logging.LogRecord("keepalive", logging.INFO,
+ None, None, "Keepalive message", None, None))
+
+ self._thread = threading.Thread(target=thread, daemon=True)
+ self._thread.start()
-def logger_create(name, stream=None):
+ def close(self):
+ # mark the thread to stop and notify it
+ self._stop = True
+ with self._timeout:
+ self._timeout.notify()
+ # wait for it to join
+ self._thread.join()
+ super().close()
+
+ def emit(self, record):
+ super().emit(record)
+ # trigger timer reset
+ with self._timeout:
+ self._timeout.notify()
+
+def logger_create(name, stream=None, keepalive=None):
logger = logging.getLogger(name)
- loggerhandler = logging.StreamHandler(stream=stream)
+ if keepalive is not None:
+ loggerhandler = KeepAliveStreamHandler(stream=stream, keepalive=keepalive)
+ else:
+ loggerhandler = logging.StreamHandler(stream=stream)
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
logger.addHandler(loggerhandler)
logger.setLevel(logging.INFO)
@@ -37,25 +68,22 @@ def logger_create(name, stream=None):
def logger_setup_color(logger, color='auto'):
from bb.msg import BBLogFormatter
- console = logging.StreamHandler(sys.stdout)
- formatter = BBLogFormatter("%(levelname)s: %(message)s")
- console.setFormatter(formatter)
- logger.handlers = [console]
- if color == 'always' or (color=='auto' and console.stream.isatty()):
- formatter.enable_color()
+ for handler in logger.handlers:
+ if (isinstance(handler, logging.StreamHandler) and
+ isinstance(handler.formatter, BBLogFormatter)):
+ if color == 'always' or (color == 'auto' and handler.stream.isatty()):
+ handler.formatter.enable_color()
-def load_plugins(logger, plugins, pluginpath):
- import imp
+def load_plugins(logger, plugins, pluginpath):
def load_plugin(name):
logger.debug('Loading plugin %s' % name)
- fp, pathname, description = imp.find_module(name, [pluginpath])
- try:
- return imp.load_module(name, fp, pathname, description)
- finally:
- if fp:
- fp.close()
+ spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath])
+ if spec:
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ return mod
def plugin_name(filename):
return os.path.splitext(os.path.basename(filename))[0]
@@ -70,6 +98,7 @@ def load_plugins(logger, plugins, pluginpath):
plugin.plugin_init(plugins)
plugins.append(plugin)
+
def git_convert_standalone_clone(repodir):
"""If specified directory is a git repository, ensure it's a standalone clone"""
import bb.process
@@ -148,6 +177,7 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
f.write('BB_STRICT_CHECKSUM = "ignore"\n')
f.write('SRC_URI = "%s"\n' % srcuri)
f.write('SRCREV = "%s"\n' % srcrev)
+ f.write('PV = "0.0+"\n')
f.write('WORKDIR = "%s"\n' % tmpworkdir)
# Set S out of the way so it doesn't get created under the workdir
f.write('S = "%s"\n' % os.path.join(tmpdir, 'emptysrc'))
@@ -187,7 +217,8 @@ def fetch_url(tinfoil, srcuri, srcrev, destdir, logger, preserve_tmp=False, mirr
pathvars = ['T', 'RECIPE_SYSROOT', 'RECIPE_SYSROOT_NATIVE']
for pathvar in pathvars:
path = rd.getVar(pathvar)
- shutil.rmtree(path)
+ if os.path.exists(path):
+ shutil.rmtree(path)
finally:
if fetchrecipe:
try:
@@ -239,3 +270,13 @@ def is_src_url(param):
elif param.startswith('git@') or ('@' in param and param.endswith('.git')):
return True
return False
+
+def filter_src_subdirs(pth):
+ """
+ Filter out subdirectories of initial unpacked source trees that we do not care about.
+ Used by devtool and recipetool.
+ """
+ dirlist = os.listdir(pth)
+ filterout = ['git.indirectionsymlink', 'source-date-epoch', 'sstate-install-recipe_qa']
+ dirlist = [x for x in dirlist if x not in filterout]
+ return dirlist
diff --git a/scripts/lib/wic/__init__.py b/scripts/lib/wic/__init__.py
index 85876b138c..85567934ae 100644
--- a/scripts/lib/wic/__init__.py
+++ b/scripts/lib/wic/__init__.py
@@ -1,20 +1,10 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2007 Red Hat, Inc.
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class WicError(Exception):
pass
diff --git a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
index 7300e65e32..2fd286ff98 100644
--- a/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
+++ b/scripts/lib/wic/canned-wks/efi-bootdisk.wks.in
@@ -1,3 +1,3 @@
bootloader --ptable gpt
-part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.0
+part /boot --source rootfs --rootfs-dir=${IMAGE_ROOTFS}/boot --fstype=vfat --label boot --active --align 1024 --use-uuid --overhead-factor 1.1
part / --source rootfs --fstype=ext4 --label root --align 1024 --exclude-path boot/
diff --git a/scripts/lib/wic/canned-wks/qemuloongarch.wks b/scripts/lib/wic/canned-wks/qemuloongarch.wks
new file mode 100644
index 0000000000..8465c7a8c0
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuloongarch.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for LoongArch QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemuriscv.wks b/scripts/lib/wic/canned-wks/qemuriscv.wks
new file mode 100644
index 0000000000..12c68b7069
--- /dev/null
+++ b/scripts/lib/wic/canned-wks/qemuriscv.wks
@@ -0,0 +1,3 @@
+# short-description: Create qcow2 image for RISC-V QEMU machines
+
+part / --source rootfs --fstype=ext4 --label root --align 4096 --size 5G
diff --git a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index cfa32bd38a..808997611a 100644
--- a/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
include common.wks.inc
-bootloader --timeout=0 --append="vga=0 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
+bootloader --timeout=0 --append="rw oprofile.timer=1 rootfstype=ext4 console=tty console=ttyS0 "
diff --git a/scripts/lib/wic/canned-wks/sdimage-bootpart.wks b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
index 7ffd632f4a..63bc4dab6a 100644
--- a/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
+++ b/scripts/lib/wic/canned-wks/sdimage-bootpart.wks
@@ -2,5 +2,5 @@
# long-description: Creates a partitioned SD card image. Boot files
# are located in the first vfat partition.
-part /boot --source bootimg-partition --ondisk mmcblk --fstype=vfat --label boot --active --align 4 --size 16
-part / --source rootfs --ondisk mmcblk --fstype=ext4 --label root --align 4
+part /boot --source bootimg-partition --ondisk mmcblk0 --fstype=vfat --label boot --active --align 4 --size 16
+part / --source rootfs --ondisk mmcblk0 --fstype=ext4 --label root --align 4
diff --git a/scripts/lib/wic/engine.py b/scripts/lib/wic/engine.py
index f0c5ff0aaf..674ccfc244 100644
--- a/scripts/lib/wic/engine.py
+++ b/scripts/lib/wic/engine.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
@@ -33,9 +19,10 @@ import os
import tempfile
import json
import subprocess
+import shutil
+import re
from collections import namedtuple, OrderedDict
-from distutils.spawn import find_executable
from wic import WicError
from wic.filemap import sparse_copy
@@ -89,7 +76,8 @@ def find_canned_image(scripts_path, wks_file):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks") and wks_file + ".wks" == fname:
+ if ((fname.endswith(".wks") and wks_file + ".wks" == fname) or \
+ (fname.endswith(".wks.in") and wks_file + ".wks.in" == fname)):
fullpath = os.path.join(canned_wks_dir, fname)
return fullpath
return None
@@ -106,7 +94,7 @@ def list_canned_images(scripts_path):
for fname in files:
if fname.endswith("~") or fname.endswith("#"):
continue
- if fname.endswith(".wks"):
+ if fname.endswith(".wks") or fname.endswith(".wks.in"):
fullpath = os.path.join(canned_wks_dir, fname)
with open(fullpath) as wks:
for line in wks:
@@ -115,7 +103,7 @@ def list_canned_images(scripts_path):
if idx != -1:
desc = line[idx + len("short-description:"):].strip()
break
- basename = os.path.splitext(fname)[0]
+ basename = fname.split('.')[0]
print(" %s\t\t%s" % (basename.ljust(30), desc))
@@ -191,7 +179,7 @@ def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
if not os.path.exists(options.outdir):
os.makedirs(options.outdir)
- pname = 'direct'
+ pname = options.imager
plugin_class = PluginMgr.get_plugins('imager').get(pname)
if not plugin_class:
raise WicError('Unknown plugin: %s' % pname)
@@ -245,12 +233,19 @@ class Disk:
self._ptable_format = None
# find parted
- self.paths = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ # read paths from $PATH environment variable
+ # if it fails, use hardcoded paths
+ pathlist = "/bin:/usr/bin:/usr/sbin:/sbin/"
+ try:
+ self.paths = os.environ['PATH'] + ":" + pathlist
+ except KeyError:
+ self.paths = pathlist
+
if native_sysroot:
- for path in self.paths.split(':'):
+ for path in pathlist.split(':'):
self.paths = "%s%s:%s" % (native_sysroot, path, self.paths)
- self.parted = find_executable("parted", self.paths)
+ self.parted = shutil.which("parted", path=self.paths)
if not self.parted:
raise WicError("Can't find executable parted")
@@ -266,10 +261,15 @@ class Disk:
out = exec_cmd("%s -sm %s unit B print" % (self.parted, self.imagepath))
parttype = namedtuple("Part", "pnum start end size fstype")
splitted = out.splitlines()
- lsector_size, psector_size, self._ptable_format = splitted[1].split(":")[3:6]
+ # skip over possible errors in exec_cmd output
+ try:
+ idx =splitted.index("BYT;")
+ except ValueError:
+ raise WicError("Error getting partition information from %s" % (self.parted))
+ lsector_size, psector_size, self._ptable_format = splitted[idx + 1].split(":")[3:6]
self._lsector_size = int(lsector_size)
self._psector_size = int(psector_size)
- for line in splitted[2:]:
+ for line in splitted[idx + 2:]:
pnum, start, end, size, fstype = line.split(':')[:5]
partition = parttype(int(pnum), int(start[:-1]), int(end[:-1]),
int(size[:-1]), fstype)
@@ -280,10 +280,10 @@ class Disk:
def __getattr__(self, name):
"""Get path to the executable in a lazy way."""
if name in ("mdir", "mcopy", "mdel", "mdeltree", "sfdisk", "e2fsck",
- "resize2fs", "mkswap", "mkdosfs", "debugfs"):
+ "resize2fs", "mkswap", "mkdosfs", "debugfs","blkid"):
aname = "_%s" % name
if aname not in self.__dict__:
- setattr(self, aname, find_executable(name, self.paths))
+ setattr(self, aname, shutil.which(name, path=self.paths))
if aname not in self.__dict__ or self.__dict__[aname] is None:
raise WicError("Can't find executable '{}'".format(name))
return self.__dict__[aname]
@@ -291,7 +291,7 @@ class Disk:
def _get_part_image(self, pnum):
if pnum not in self.partitions:
- raise WicError("Partition %s is not in the image")
+ raise WicError("Partition %s is not in the image" % pnum)
part = self.partitions[pnum]
# check if fstype is supported
for fstype in self.fstypes:
@@ -314,6 +314,9 @@ class Disk:
seek=self.partitions[pnum].start)
def dir(self, pnum, path):
+ if pnum not in self.partitions:
+ raise WicError("Partition %s is not in the image" % pnum)
+
if self.partitions[pnum].fstype.startswith('ext'):
return exec_cmd("{} {} -R 'ls -l {}'".format(self.debugfs,
self._get_part_image(pnum),
@@ -323,26 +326,80 @@ class Disk:
self._get_part_image(pnum),
path))
- def copy(self, src, pnum, path):
+ def copy(self, src, dest):
"""Copy partition image into wic image."""
+ pnum = dest.part if isinstance(src, str) else src.part
+
if self.partitions[pnum].fstype.startswith('ext'):
- cmd = "echo -e 'cd {}\nwrite {} {}' | {} -w {}".\
- format(path, src, os.path.basename(src),
+ if isinstance(src, str):
+ cmd = "printf 'cd {}\nwrite {} {}\n' | {} -w {}".\
+ format(os.path.dirname(dest.path), src, os.path.basename(src),
self.debugfs, self._get_part_image(pnum))
+ else: # copy from wic
+ # run both dump and rdump to support both files and directory
+ cmd = "printf 'cd {}\ndump /{} {}\nrdump /{} {}\n' | {} {}".\
+ format(os.path.dirname(src.path), src.path,
+ dest, src.path, dest, self.debugfs,
+ self._get_part_image(pnum))
else: # fat
- cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ if isinstance(src, str):
+ cmd = "{} -i {} -snop {} ::{}".format(self.mcopy,
+ self._get_part_image(pnum),
+ src, dest.path)
+ else:
+ cmd = "{} -i {} -snop ::{} {}".format(self.mcopy,
self._get_part_image(pnum),
- src, path)
+ src.path, dest)
+
exec_cmd(cmd, as_shell=True)
self._put_part_image(pnum)
- def remove(self, pnum, path):
+ def remove_ext(self, pnum, path, recursive):
+ """
+ Remove files/dirs and their contents from the partition.
+ This only applies to ext* partition.
+ """
+ abs_path = re.sub('\/\/+', '/', path)
+ cmd = "{} {} -wR 'rm \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path)
+ out = exec_cmd(cmd , as_shell=True)
+ for line in out.splitlines():
+ if line.startswith("rm:"):
+ if "file is a directory" in line:
+ if recursive:
+ # loop through content and delete them one by one if
+ # flaged with -r
+ subdirs = iter(self.dir(pnum, abs_path).splitlines())
+ next(subdirs)
+ for subdir in subdirs:
+ dir = subdir.split(':')[1].split(" ", 1)[1]
+ if not dir == "." and not dir == "..":
+ self.remove_ext(pnum, "%s/%s" % (abs_path, dir), recursive)
+
+ rmdir_out = exec_cmd("{} {} -wR 'rmdir \"{}\"'".format(self.debugfs,
+ self._get_part_image(pnum),
+ abs_path.rstrip('/'))
+ , as_shell=True)
+
+ for rmdir_line in rmdir_out.splitlines():
+ if "directory not empty" in rmdir_line:
+ raise WicError("Could not complete operation: \n%s \n"
+ "use -r to remove non-empty directory" % rmdir_line)
+ if rmdir_line.startswith("rmdir:"):
+ raise WicError("Could not complete operation: \n%s "
+ "\n%s" % (str(line), rmdir_line))
+
+ else:
+ raise WicError("Could not complete operation: \n%s "
+ "\nUnable to remove %s" % (str(line), abs_path))
+
+ def remove(self, pnum, path, recursive):
"""Remove files/dirs from the partition."""
partimg = self._get_part_image(pnum)
if self.partitions[pnum].fstype.startswith('ext'):
- exec_cmd("{} {} -wR 'rm {}'".format(self.debugfs,
- self._get_part_image(pnum),
- path), as_shell=True)
+ self.remove_ext(pnum, path, recursive)
+
else: # fat
cmd = "{} -i {} ::{}".format(self.mdel, partimg, path)
try:
@@ -385,7 +442,7 @@ class Disk:
outf.flush()
def read_ptable(path):
- out = exec_cmd("{} -dJ {}".format(self.sfdisk, path))
+ out = exec_cmd("{} -J {}".format(self.sfdisk, path))
return json.loads(out)
def write_ptable(parts, target):
@@ -486,7 +543,8 @@ class Disk:
logger.info("creating swap partition {}".format(pnum))
label = part.get("name")
label_str = "-L {}".format(label) if label else ''
- uuid = part.get("uuid")
+ out = exec_cmd("{} --probe {}".format(self.blkid, self._get_part_image(pnum)))
+ uuid = out[out.index("UUID=\"")+6:out.index("UUID=\"")+42]
uuid_str = "-U {}".format(uuid) if uuid else ''
with open(partfname, 'w') as sparse:
os.ftruncate(sparse.fileno(), part['size'] * self._lsector_size)
@@ -494,7 +552,7 @@ class Disk:
sparse_copy(partfname, target, seek=part['start'] * self._lsector_size)
os.unlink(partfname)
elif part['type'] != 'f':
- logger.warn("skipping partition {}: unsupported fstype {}".format(pnum, fstype))
+ logger.warning("skipping partition {}: unsupported fstype {}".format(pnum, fstype))
def wic_ls(args, native_sysroot):
"""List contents of partitioned image or vfat partition."""
@@ -512,11 +570,15 @@ def wic_ls(args, native_sysroot):
def wic_cp(args, native_sysroot):
"""
- Copy local file or directory to the vfat partition of
+ Copy file or directory to/from the vfat/ext partition of
partitioned image.
"""
- disk = Disk(args.dest.image, native_sysroot)
- disk.copy(args.src, args.dest.part, args.dest.path)
+ if isinstance(args.dest, str):
+ disk = Disk(args.src.image, native_sysroot)
+ else:
+ disk = Disk(args.dest.image, native_sysroot)
+ disk.copy(args.src, args.dest)
+
def wic_rm(args, native_sysroot):
"""
@@ -524,13 +586,13 @@ def wic_rm(args, native_sysroot):
partitioned image.
"""
disk = Disk(args.path.image, native_sysroot)
- disk.remove(args.path.part, args.path.path)
+ disk.remove(args.path.part, args.path.path, args.recursive_delete)
def wic_write(args, native_sysroot):
"""
Write image to a target device.
"""
- disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'swap'))
+ disk = Disk(args.image, native_sysroot, ('fat', 'ext', 'linux-swap'))
disk.write(args.target, args.expand)
def find_canned(scripts_path, file_name):
diff --git a/scripts/lib/wic/filemap.py b/scripts/lib/wic/filemap.py
index a72fa09ef5..85b39d5d74 100644
--- a/scripts/lib/wic/filemap.py
+++ b/scripts/lib/wic/filemap.py
@@ -1,13 +1,8 @@
+#
# Copyright (c) 2012 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License, version 2,
-# as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
"""
This module implements python implements a way to get file block. Two methods
@@ -22,6 +17,7 @@ and returns an instance of the class.
# * Too many instance attributes (R0902)
# pylint: disable=R0902
+import errno
import os
import struct
import array
@@ -36,8 +32,13 @@ def get_block_size(file_obj):
"""
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
- binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
- bsize = struct.unpack('I', binary_data)[0]
+ try:
+ binary_data = fcntl.ioctl(file_obj, 2, struct.pack('I', 0))
+ bsize = struct.unpack('I', binary_data)[0]
+ except OSError:
+ bsize = None
+
+ # If ioctl causes OSError or give bsize to zero failback to os.fstat
if not bsize:
import os
stat = os.fstat(file_obj.fileno())
@@ -45,6 +46,13 @@ def get_block_size(file_obj):
bsize = stat.st_blksize
else:
raise IOError("Unable to determine block size")
+
+ # The logic in this script only supports a maximum of a 4KB
+ # block size
+ max_block_size = 4 * 1024
+ if bsize > max_block_size:
+ bsize = max_block_size
+
return bsize
class ErrorNotSupp(Exception):
@@ -141,15 +149,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def block_is_unmapped(self, block): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. It returns
- 'True' if block number 'block' of the image file is not mapped (hole)
- and 'False' otherwise.
- """
-
- raise Error("the method is not implemented")
-
def get_mapped_ranges(self, start, count): # pylint: disable=W0613,R0201
"""
This method has has to be implemented by child classes. This is a
@@ -163,15 +162,6 @@ class _FilemapBase(object):
raise Error("the method is not implemented")
- def get_unmapped_ranges(self, start, count): # pylint: disable=W0613,R0201
- """
- This method has has to be implemented by child classes. Just like
- 'get_mapped_ranges()', but yields unmapped block ranges instead
- (holes).
- """
-
- raise Error("the method is not implemented")
-
# The 'SEEK_HOLE' and 'SEEK_DATA' options of the file seek system call
_SEEK_DATA = 3
@@ -189,9 +179,9 @@ def _lseek(file_obj, offset, whence):
except OSError as err:
# The 'lseek' system call returns the ENXIO if there is no data or
# hole starting from the specified offset.
- if err.errno == os.errno.ENXIO:
+ if err.errno == errno.ENXIO:
return -1
- elif err.errno == os.errno.EINVAL:
+ elif err.errno == errno.EINVAL:
raise ErrorNotSupp("the kernel or file-system does not support "
"\"SEEK_HOLE\" and \"SEEK_DATA\"")
else:
@@ -264,15 +254,10 @@ class FilemapSeek(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _get_ranges(self, start, count, whence1, whence2):
"""
- This function implements 'get_mapped_ranges()' and
- 'get_unmapped_ranges()' depending on what is passed in the 'whence1'
- and 'whence2' arguments.
+ This function implements 'get_mapped_ranges()' depending
+ on what is passed in the 'whence1' and 'whence2' arguments.
"""
assert whence1 != whence2
@@ -302,12 +287,6 @@ class FilemapSeek(_FilemapBase):
% (start, count, start + count - 1))
return self._get_ranges(start, count, _SEEK_DATA, _SEEK_HOLE)
- def get_unmapped_ranges(self, start, count):
- """Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapSeek: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- return self._get_ranges(start, count, _SEEK_HOLE, _SEEK_DATA)
-
# Below goes the FIEMAP ioctl implementation, which is not very readable
# because it deals with the rather complex FIEMAP ioctl. To understand the
@@ -394,12 +373,12 @@ class FilemapFiemap(_FilemapBase):
except IOError as err:
# Note, the FIEMAP ioctl is supported by the Linux kernel starting
# from version 2.6.28 (year 2008).
- if err.errno == os.errno.EOPNOTSUPP:
+ if err.errno == errno.EOPNOTSUPP:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the file-system"
self._log.debug(errstr)
raise ErrorNotSupp(errstr)
- if err.errno == os.errno.ENOTTY:
+ if err.errno == errno.ENOTTY:
errstr = "FilemapFiemap: the FIEMAP ioctl is not supported " \
"by the kernel"
self._log.debug(errstr)
@@ -421,10 +400,6 @@ class FilemapFiemap(_FilemapBase):
% (block, result))
return result
- def block_is_unmapped(self, block):
- """Refer the '_FilemapBase' class for the documentation."""
- return not self.block_is_mapped(block)
-
def _unpack_fiemap_extent(self, index):
"""
Unpack a 'struct fiemap_extent' structure object number 'index' from
@@ -501,23 +476,28 @@ class FilemapFiemap(_FilemapBase):
% (first_prev, last_prev))
yield (first_prev, last_prev)
- def get_unmapped_ranges(self, start, count):
+class FilemapNobmap(_FilemapBase):
+ """
+ This class is used when both the 'SEEK_DATA/HOLE' and FIEMAP are not
+ supported by the filesystem or kernel.
+ """
+
+ def __init__(self, image, log=None):
"""Refer the '_FilemapBase' class for the documentation."""
- self._log.debug("FilemapFiemap: get_unmapped_ranges(%d, %d(%d))"
- % (start, count, start + count - 1))
- hole_first = start
- for first, last in self._do_get_mapped_ranges(start, count):
- if first > hole_first:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, first - 1))
- yield (hole_first, first - 1)
- hole_first = last + 1
+ # Call the base class constructor first
+ _FilemapBase.__init__(self, image, log)
+ self._log.debug("FilemapNobmap: initializing")
- if hole_first < start + count:
- self._log.debug("FilemapFiemap: yielding range (%d, %d)"
- % (hole_first, start + count - 1))
- yield (hole_first, start + count - 1)
+ def block_is_mapped(self, block):
+ """Refer the '_FilemapBase' class for the documentation."""
+ return True
+
+ def get_mapped_ranges(self, start, count):
+ """Refer the '_FilemapBase' class for the documentation."""
+ self._log.debug("FilemapNobmap: get_mapped_ranges(%d, %d(%d))"
+ % (start, count, start + count - 1))
+ yield (start, start + count -1)
def filemap(image, log=None):
"""
@@ -532,7 +512,10 @@ def filemap(image, log=None):
try:
return FilemapFiemap(image, log)
except ErrorNotSupp:
- return FilemapSeek(image, log)
+ try:
+ return FilemapSeek(image, log)
+ except ErrorNotSupp:
+ return FilemapNobmap(image, log)
def sparse_copy(src_fname, dst_fname, skip=0, seek=0,
length=0, api=None):
diff --git a/scripts/lib/wic/help.py b/scripts/lib/wic/help.py
index 842b868a57..163535e431 100644
--- a/scripts/lib/wic/help.py
+++ b/scripts/lib/wic/help.py
@@ -1,21 +1,6 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module implements some basic help invocation functions along
@@ -356,12 +341,15 @@ DESCRIPTION
wic_cp_usage = """
- Copy files and directories to the vfat or ext* partition
+ Copy files and directories to/from the vfat or ext* partition
- usage: wic cp <src> <image>:<partition>[<path>] [--native-sysroot <path>]
+ usage: wic cp <src> <dest> [--native-sysroot <path>]
- This command copies local files or directories to the vfat or ext* partitions
-of partitioned image.
+ source/destination image in format <image>:<partition>[<path>]
+
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
See 'wic help cp' for more detailed instructions.
@@ -370,16 +358,18 @@ of partitioned image.
wic_cp_help = """
NAME
- wic cp - copy files and directories to the vfat or ext* partitions
+ wic cp - copy files and directories to/from the vfat or ext* partitions
SYNOPSIS
- wic cp <src> <image>:<partition>
- wic cp <src> <image>:<partition><path>
- wic cp <src> <image>:<partition><path> --native-sysroot <path>
+ wic cp <src> <dest>:<partition>
+ wic cp <src>:<partition> <dest>
+ wic cp <src> <dest-image>:<partition><path>
+ wic cp <src> <dest-image>:<partition><path> --native-sysroot <path>
DESCRIPTION
- This command copies files and directories to the vfat or ext* partition of
- the partitioned image.
+ This command copies files or directories either
+ - from local to vfat or ext* partitions of partitioned image
+ - from vfat or ext* partitions of partitioned image to local
The first form of it copies file or directory to the root directory of
the partition:
@@ -412,6 +402,10 @@ DESCRIPTION
4 files 0 bytes
15 675 392 bytes free
+ The third form of the command copies file or directory from the specified directory
+ on the partition to local:
+ $ wic cp tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.wic:1/vmlinuz test
+
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
"""
@@ -437,6 +431,7 @@ NAME
SYNOPSIS
wic rm <src> <image>:<partition><path>
wic rm <src> <image>:<partition><path> --native-sysroot <path>
+ wic rm -r <image>:<partition><path>
DESCRIPTION
This command removes files or directories from the vfat or ext* partition of the
@@ -471,6 +466,9 @@ DESCRIPTION
The -n option is used to specify the path to the native sysroot
containing the tools(parted and mtools) to use.
+
+ The -r option is used to remove directories and their contents
+ recursively,this only applies to ext* partition.
"""
wic_write_usage = """
@@ -493,7 +491,7 @@ NAME
SYNOPSIS
wic write <image> <target>
wic write <image> <target> --expand auto
- wic write <image> <target> --expand 1:100M-2:300M
+ wic write <image> <target> --expand 1:100M,2:300M
wic write <image> <target> --native-sysroot <path>
DESCRIPTION
@@ -504,7 +502,7 @@ DESCRIPTION
The --expand option is used to resize image partitions.
--expand auto expands partitions to occupy all free space available on the target device.
It's also possible to specify expansion rules in a format
- <partition>:<size>[-<partition>:<size>...] for one or more partitions.
+ <partition>:<size>[,<partition>:<size>...] for one or more partitions.
Specifying size 0 will keep partition unmodified.
Note: Resizing boot partition can result in non-bootable image for non-EFI images. It is
recommended to use size 0 for boot partition to keep image bootable.
@@ -538,7 +536,8 @@ DESCRIPTION
Source plugins can also be implemented and added by external
layers - any plugins found in a scripts/lib/wic/plugins/source/
- directory in an external layer will also be made available.
+ or lib/wic/plugins/source/ directory in an external layer will
+ also be made available.
When the wic implementation needs to invoke a partition-specific
implementation, it looks for the plugin that has the same name as
@@ -638,7 +637,7 @@ DESCRIPTION
oe-core: directdisk.bbclass and mkefidisk.sh. The difference
between wic and those examples is that with wic the functionality
of those scripts is implemented by a general-purpose partitioning
- 'language' based on Redhat kickstart syntax).
+ 'language' based on Red Hat kickstart syntax).
The initial motivation and design considerations that lead to the
current tool are described exhaustively in Yocto Bug #3847
@@ -841,8 +840,8 @@ DESCRIPTION
meanings. The commands are based on the Fedora kickstart
documentation but with modifications to reflect wic capabilities.
- http://fedoraproject.org/wiki/Anaconda/Kickstart#part_or_partition
- http://fedoraproject.org/wiki/Anaconda/Kickstart#bootloader
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#part-or-partition
+ https://pykickstart.readthedocs.io/en/latest/kickstart-docs.html#bootloader
Commands
@@ -866,11 +865,11 @@ DESCRIPTION
Partitions with a <mountpoint> specified will be automatically mounted.
This is achieved by wic adding entries to the fstab during image
generation. In order for a valid fstab to be generated one of the
- --ondrive, --ondisk or --use-uuid partition options must be used for
- each partition that specifies a mountpoint. Note that with --use-uuid
- and non-root <mountpoint>, including swap, the mount program must
- understand the PARTUUID syntax. This currently excludes the busybox
- versions of these applications.
+ --ondrive, --ondisk, --use-uuid or --use-label partition options must
+ be used for each partition that specifies a mountpoint. Note that with
+ --use-{uuid,label} and non-root <mountpoint>, including swap, the mount
+ program must understand the PARTUUID or LABEL syntax. This currently
+ excludes the busybox versions of these applications.
The following are supported 'part' options:
@@ -931,6 +930,7 @@ DESCRIPTION
ext4
btrfs
squashfs
+ erofs
swap
--fsoptions: Specifies a free-form string of options to be
@@ -940,11 +940,25 @@ DESCRIPTION
quotes. If not specified, the default string is
"defaults".
+ --fspassno: Specifies the order in which filesystem checks are done
+ at boot time by fsck. See fs_passno parameter of
+ fstab(5). This parameter will be copied into the
+ /etc/fstab file of the installed system. If not
+ specified the default value of "0" will be used.
+
--label label: Specifies the label to give to the filesystem
to be made on the partition. If the given
label is already in use by another filesystem,
a new label is created for the partition.
+ --use-label: This option is specific to wic. It makes wic to use the
+ label in /etc/fstab to specify a partition. If the
+ --use-label and --use-uuid are used at the same time,
+ we prefer the uuid because it is less likely to cause
+ name confliction. We don't support using this parameter
+ on the root partition since it requires an initramfs to
+ parse this value and we do not currently support that.
+
--active: Marks the partition as active.
--align (in KBytes): This option is specific to wic and says
@@ -963,6 +977,29 @@ DESCRIPTION
is omitted, not the directory itself. This option only
has an effect with the rootfs source plugin.
+ --include-path: This option is specific to wic. It adds the contents
+ of the given path or a rootfs to the resulting image.
+ The option contains two fields, the origin and the
+ destination. When the origin is a rootfs, it follows
+ the same logic as the rootfs-dir argument and the
+ permissions and owners are kept. When the origin is a
+ path, it is relative to the directory in which wic is
+ running not the rootfs itself so use of an absolute
+ path is recommended, and the owner and group is set to
+ root:root. If no destination is given it is
+ automatically set to the root of the rootfs. This
+ option only has an effect with the rootfs source
+ plugin.
+
+ --change-directory: This option is specific to wic. It changes to the
+ given directory before copying the files. This
+ option is useful when we want to split a rootfs in
+ multiple partitions and we want to keep the right
+ permissions and usernames in all the partitions.
+
+ --no-fstab-update: This option is specific to wic. It does not update the
+ '/etc/fstab' stock file for the given partition.
+
--extra-space: This option is specific to wic. It adds extra
space after the space filled by the content
of the partition. The final size can go
@@ -1053,3 +1090,59 @@ NAME
DESCRIPTION
Specify a help topic to display it. Topics are shown above.
"""
+
+
+wic_help = """
+Creates a customized OpenEmbedded image.
+
+Usage: wic [--version]
+ wic help [COMMAND or TOPIC]
+ wic COMMAND [ARGS]
+
+ usage 1: Returns the current version of Wic
+ usage 2: Returns detailed help for a COMMAND or TOPIC
+ usage 3: Executes COMMAND
+
+
+COMMAND:
+
+ list - List available canned images and source plugins
+ ls - List contents of partitioned image or partition
+ rm - Remove files or directories from the vfat or ext* partitions
+ help - Show help for a wic COMMAND or TOPIC
+ write - Write an image to a device
+ cp - Copy files and directories to the vfat or ext* partitions
+ create - Create a new OpenEmbedded image
+
+
+TOPIC:
+ overview - Presents an overall overview of Wic
+ plugins - Presents an overview and API for Wic plugins
+ kickstart - Presents a Wic kickstart file reference
+
+
+Examples:
+
+ $ wic --version
+
+ Returns the current version of Wic
+
+
+ $ wic help cp
+
+ Returns the SYNOPSIS and DESCRIPTION for the Wic "cp" command.
+
+
+ $ wic list images
+
+ Returns the list of canned images (i.e. *.wks files located in
+ the /scripts/lib/wic/canned-wks directory.
+
+
+ $ wic create mkefidisk -e core-image-minimal
+
+ Creates an EFI disk image from artifacts used in a previous
+ core-image-minimal build in standard BitBake locations
+ (e.g. Cooked Mode).
+
+"""
diff --git a/scripts/lib/wic/ksparser.py b/scripts/lib/wic/ksparser.py
index e590b2fe3c..7ef3dc83dd 100644
--- a/scripts/lib/wic/ksparser.py
+++ b/scripts/lib/wic/ksparser.py
@@ -1,21 +1,8 @@
-#!/usr/bin/env python -tt
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides parser for kickstart format
@@ -28,14 +15,30 @@
import os
import shlex
import logging
+import re
from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
from wic.engine import find_canned
from wic.partition import Partition
+from wic.misc import get_bitbake_var
logger = logging.getLogger('wic')
+__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
+
+def expand_line(line):
+ while True:
+ m = __expand_var_regexp__.search(line)
+ if not m:
+ return line
+ key = m.group()[2:-1]
+ val = get_bitbake_var(key)
+ if val is None:
+ logger.warning("cannot expand variable %s" % key)
+ return line
+ line = line[:m.start()] + val + line[m.end():]
+
class KickStartError(Exception):
"""Custom exception."""
pass
@@ -48,26 +51,39 @@ class KickStartParser(ArgumentParser):
def error(self, message):
raise ArgumentError(None, message)
-def sizetype(arg):
- """
- Custom type for ArgumentParser
- Converts size string in <num>[K|k|M|G] format into the integer value
- """
- if arg.isdigit():
- return int(arg) * 1024
+def sizetype(default, size_in_bytes=False):
+ def f(arg):
+ """
+ Custom type for ArgumentParser
+ Converts size string in <num>[S|s|K|k|M|G] format into the integer value
+ """
+ try:
+ suffix = default
+ size = int(arg)
+ except ValueError:
+ try:
+ suffix = arg[-1:]
+ size = int(arg[:-1])
+ except ValueError:
+ raise ArgumentTypeError("Invalid size: %r" % arg)
- if not arg[:-1].isdigit():
- raise ArgumentTypeError("Invalid size: %r" % arg)
- size = int(arg[:-1])
- if arg.endswith("k") or arg.endswith("K"):
- return size
- if arg.endswith("M"):
- return size * 1024
- if arg.endswith("G"):
- return size * 1024 * 1024
+ if size_in_bytes:
+ if suffix == 's' or suffix == 'S':
+ return size * 512
+ mult = 1024
+ else:
+ mult = 1
- raise ArgumentTypeError("Invalid size: %r" % arg)
+ if suffix == "k" or suffix == "K":
+ return size * mult
+ if suffix == "M":
+ return size * mult * 1024
+ if suffix == "G":
+ return size * mult * 1024 * 1024
+
+ raise ArgumentTypeError("Invalid size: %r" % arg)
+ return f
def overheadtype(arg):
"""
@@ -133,28 +149,37 @@ class KickStart():
part.add_argument('mountpoint', nargs='?')
part.add_argument('--active', action='store_true')
part.add_argument('--align', type=int)
+ part.add_argument('--offset', type=sizetype("K", True))
part.add_argument('--exclude-path', nargs='+')
- part.add_argument("--extra-space", type=sizetype)
+ part.add_argument('--include-path', nargs='+', action='append')
+ part.add_argument('--change-directory')
+ part.add_argument("--extra-space", type=sizetype("M"))
part.add_argument('--fsoptions', dest='fsopts')
+ part.add_argument('--fspassno', dest='fspassno')
part.add_argument('--fstype', default='vfat',
choices=('ext2', 'ext3', 'ext4', 'btrfs',
- 'squashfs', 'vfat', 'msdos', 'swap'))
+ 'squashfs', 'vfat', 'msdos', 'erofs',
+ 'swap', 'none'))
part.add_argument('--mkfs-extraopts', default='')
part.add_argument('--label')
+ part.add_argument('--use-label', action='store_true')
part.add_argument('--no-table', action='store_true')
part.add_argument('--ondisk', '--ondrive', dest='disk', default='sda')
part.add_argument("--overhead-factor", type=overheadtype)
part.add_argument('--part-name')
part.add_argument('--part-type')
part.add_argument('--rootfs-dir')
+ part.add_argument('--type', default='primary',
+ choices = ('primary', 'logical'))
+ part.add_argument('--hidden', action='store_true')
# --size and --fixed-size cannot be specified together; options
# ----extra-space and --overhead-factor should also raise a parser
# --error, but since nesting mutually exclusive groups does not work,
# ----extra-space/--overhead-factor are handled later
sizeexcl = part.add_mutually_exclusive_group()
- sizeexcl.add_argument('--size', type=sizetype, default=0)
- sizeexcl.add_argument('--fixed-size', type=sizetype, default=0)
+ sizeexcl.add_argument('--size', type=sizetype("M"), default=0)
+ sizeexcl.add_argument('--fixed-size', type=sizetype("M"), default=0)
part.add_argument('--source')
part.add_argument('--sourceparams')
@@ -162,11 +187,13 @@ class KickStart():
part.add_argument('--use-uuid', action='store_true')
part.add_argument('--uuid')
part.add_argument('--fsuuid')
+ part.add_argument('--no-fstab-update', action='store_true')
+ part.add_argument('--mbr', action='store_true')
bootloader = subparsers.add_parser('bootloader')
bootloader.add_argument('--append')
bootloader.add_argument('--configfile')
- bootloader.add_argument('--ptable', choices=('msdos', 'gpt'),
+ bootloader.add_argument('--ptable', choices=('msdos', 'gpt', 'gpt-hybrid'),
default='msdos')
bootloader.add_argument('--timeout', type=int)
bootloader.add_argument('--source')
@@ -189,6 +216,7 @@ class KickStart():
line = line.strip()
lineno += 1
if line and line[0] != '#':
+ line = expand_line(line)
try:
line_args = shlex.split(line)
parsed = parser.parse_args(line_args)
@@ -196,9 +224,39 @@ class KickStart():
raise KickStartError('%s:%d: %s' % \
(confpath, lineno, err))
if line.startswith('part'):
- # SquashFS does not support UUID
- if parsed.fstype == 'squashfs' and parsed.use_uuid:
- err = "%s:%d: SquashFS does not support UUID" \
+ # SquashFS does not support filesystem UUID
+ if parsed.fstype == 'squashfs':
+ if parsed.fsuuid:
+ err = "%s:%d: SquashFS does not support UUID" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.label:
+ err = "%s:%d: SquashFS does not support LABEL" \
+ % (confpath, lineno)
+ raise KickStartError(err)
+ # erofs does not support filesystem labels
+ if parsed.fstype == 'erofs' and parsed.label:
+ err = "%s:%d: erofs does not support LABEL" % (confpath, lineno)
+ raise KickStartError(err)
+ if parsed.fstype == 'msdos' or parsed.fstype == 'vfat':
+ if parsed.fsuuid:
+ if parsed.fsuuid.upper().startswith('0X'):
+ if len(parsed.fsuuid) > 10:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
+ elif len(parsed.fsuuid) > 8:
+ err = "%s:%d: fsuuid %s given in wks kickstart file " \
+ "exceeds the length limit for %s filesystem. " \
+ "It should be in the form of a 32 bit hexadecimal" \
+ "number (for example, 0xABCD1234)." \
+ % (confpath, lineno, parsed.fsuuid, parsed.fstype)
+ raise KickStartError(err)
+ if parsed.use_label and not parsed.label:
+ err = "%s:%d: Must set the label with --label" \
% (confpath, lineno)
raise KickStartError(err)
# using ArgumentParser one cannot easily tell if option
@@ -229,6 +287,11 @@ class KickStart():
elif line.startswith('bootloader'):
if not self.bootloader:
self.bootloader = parsed
+ # Concatenate the strings set in APPEND
+ append_var = get_bitbake_var("APPEND")
+ if append_var:
+ self.bootloader.append = ' '.join(filter(None, \
+ (self.bootloader.append, append_var)))
else:
err = "%s:%d: more than one bootloader specified" \
% (confpath, lineno)
diff --git a/scripts/lib/wic/misc.py b/scripts/lib/wic/misc.py
index ee888b478c..1a7c140fa6 100644
--- a/scripts/lib/wic/misc.py
+++ b/scripts/lib/wic/misc.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides a place to collect various wic-related utils
@@ -30,16 +16,17 @@ import logging
import os
import re
import subprocess
+import shutil
from collections import defaultdict
-from distutils import spawn
from wic import WicError
logger = logging.getLogger('wic')
# executable -> recipe pairs for exec_native_cmd
-NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+NATIVE_RECIPES = {"bmaptool": "bmaptool",
+ "dumpe2fs": "e2fsprogs",
"grub-mkimage": "grub-efi",
"isohybrid": "syslinux",
"mcopy": "mtools",
@@ -49,6 +36,7 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"mkdosfs": "dosfstools",
"mkisofs": "cdrtools",
"mkfs.btrfs": "btrfs-tools",
+ "mkfs.erofs": "erofs-utils",
"mkfs.ext2": "e2fsprogs",
"mkfs.ext3": "e2fsprogs",
"mkfs.ext4": "e2fsprogs",
@@ -59,7 +47,8 @@ NATIVE_RECIPES = {"bmaptool": "bmap-tools",
"parted": "parted",
"sfdisk": "util-linux",
"sgdisk": "gptfdisk",
- "syslinux": "syslinux"
+ "syslinux": "syslinux",
+ "tar": "tar"
}
def runtool(cmdln_or_args):
@@ -126,6 +115,15 @@ def exec_cmd(cmd_and_args, as_shell=False):
"""
return _exec_cmd(cmd_and_args, as_shell)[1]
+def find_executable(cmd, paths):
+ recipe = cmd
+ if recipe in NATIVE_RECIPES:
+ recipe = NATIVE_RECIPES[recipe]
+ provided = get_bitbake_var("ASSUME_PROVIDED")
+ if provided and "%s-native" % recipe in provided:
+ return True
+
+ return shutil.which(cmd, path=paths)
def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
"""
@@ -142,15 +140,20 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
if pseudo:
cmd_and_args = pseudo + cmd_and_args
- native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
- (native_sysroot, native_sysroot, native_sysroot)
+ hosttools_dir = get_bitbake_var("HOSTTOOLS_DIR")
+ target_sys = get_bitbake_var("TARGET_SYS")
+
+ native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/usr/bin/%s:%s/bin:%s" % \
+ (native_sysroot, native_sysroot,
+ native_sysroot, native_sysroot, target_sys,
+ native_sysroot, hosttools_dir)
native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
(native_paths, cmd_and_args)
logger.debug("exec_native_cmd: %s", native_cmd_and_args)
# If the command isn't in the native sysroot say we failed.
- if spawn.find_executable(args[0], native_paths):
+ if find_executable(args[0], native_paths):
ret, out = _exec_cmd(native_cmd_and_args, True)
else:
ret = 127
diff --git a/scripts/lib/wic/partition.py b/scripts/lib/wic/partition.py
index c73af9d427..795707ec5d 100644
--- a/scripts/lib/wic/partition.py
+++ b/scripts/lib/wic/partition.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013-2016 Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This module provides the OpenEmbedded partition object definitions.
@@ -44,13 +30,18 @@ class Partition():
self.device = None
self.extra_space = args.extra_space
self.exclude_path = args.exclude_path
+ self.include_path = args.include_path
+ self.change_directory = args.change_directory
self.fsopts = args.fsopts
+ self.fspassno = args.fspassno
self.fstype = args.fstype
self.label = args.label
+ self.use_label = args.use_label
self.mkfs_extraopts = args.mkfs_extraopts
self.mountpoint = args.mountpoint
self.no_table = args.no_table
self.num = None
+ self.offset = args.offset
self.overhead_factor = args.overhead_factor
self.part_name = args.part_name
self.part_type = args.part_type
@@ -63,10 +54,16 @@ class Partition():
self.use_uuid = args.use_uuid
self.uuid = args.uuid
self.fsuuid = args.fsuuid
+ self.type = args.type
+ self.no_fstab_update = args.no_fstab_update
+ self.updated_fstab_path = None
+ self.has_fstab = False
+ self.update_fstab_in_rootfs = False
+ self.hidden = args.hidden
+ self.mbr = args.mbr
self.lineno = lineno
self.source_file = ""
- self.sourceparams_dict = {}
def get_extra_block_count(self, current_blocks):
"""
@@ -111,7 +108,7 @@ class Partition():
extra_blocks = self.extra_space
rootfs_size = actual_rootfs_size + extra_blocks
- rootfs_size *= self.overhead_factor
+ rootfs_size = int(rootfs_size * self.overhead_factor)
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, self.mountpoint, rootfs_size)
@@ -128,12 +125,18 @@ class Partition():
return self.fixed_size if self.fixed_size else self.size
def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
- bootimg_dir, kernel_dir, native_sysroot):
+ bootimg_dir, kernel_dir, native_sysroot, updated_fstab_path):
"""
Prepare content for individual partitions, depending on
partition command parameters.
"""
+ self.updated_fstab_path = updated_fstab_path
+ if self.updated_fstab_path and not (self.fstype.startswith("ext") or self.fstype == "msdos"):
+ self.update_fstab_in_rootfs = True
+
if not self.source:
+ if self.fstype == "none" or self.no_table:
+ return
if not self.size and not self.fixed_size:
raise WicError("The %s partition has a size of zero. Please "
"specify a non-zero --size/--fixed-size for that "
@@ -144,9 +147,9 @@ class Partition():
native_sysroot)
self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
else:
- if self.fstype == 'squashfs':
- raise WicError("It's not possible to create empty squashfs "
- "partition '%s'" % (self.mountpoint))
+ if self.fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to create empty %s "
+ "partition '%s'" % (self.fstype, self.mountpoint))
rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
@@ -173,7 +176,7 @@ class Partition():
# Split sourceparams string of the form key1=val1[,key2=val2,...]
# into a dict. Also accepts valueless keys i.e. without =
splitted = self.sourceparams.split(',')
- srcparams_dict = dict(par.split('=') for par in splitted if par)
+ srcparams_dict = dict((par.split('=', 1) + [None])[:2] for par in splitted if par)
plugin = PluginMgr.get_plugins('source')[self.source]
plugin.do_configure_partition(self, srcparams_dict, creator,
@@ -202,46 +205,62 @@ class Partition():
(self.mountpoint, self.size, self.fixed_size))
def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
- native_sysroot, real_rootfs = True):
+ native_sysroot, real_rootfs = True, pseudo_dir = None):
"""
Prepare content for a rootfs partition i.e. create a partition
and fill it from a /rootfs dir.
Currently handles ext2/3/4, btrfs, vfat and squashfs.
"""
- p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
- p_localstatedir = os.environ.get("PSEUDO_LOCALSTATEDIR",
- "%s/../pseudo" % rootfs_dir)
- p_passwd = os.environ.get("PSEUDO_PASSWD", rootfs_dir)
- p_nosymlinkexp = os.environ.get("PSEUDO_NOSYMLINKEXP", "1")
- pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
- pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
- pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
- pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
- pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
self.lineno, self.fstype)
if os.path.isfile(rootfs):
os.remove(rootfs)
- # Get rootfs size from bitbake variable if it's not set in .ks file
+ p_prefix = os.environ.get("PSEUDO_PREFIX", "%s/usr" % native_sysroot)
+ if (pseudo_dir):
+ # Canonicalize the ignore paths. This corresponds to
+ # calling oe.path.canonicalize(), which is used in bitbake.conf.
+ ignore_paths = [rootfs] + (get_bitbake_var("PSEUDO_IGNORE_PATHS") or "").split(",")
+ canonical_paths = []
+ for path in ignore_paths:
+ if "$" not in path:
+ trailing_slash = path.endswith("/") and "/" or ""
+ canonical_paths.append(os.path.realpath(path) + trailing_slash)
+ ignore_paths = ",".join(canonical_paths)
+
+ pseudo = "export PSEUDO_PREFIX=%s;" % p_prefix
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs_dir
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "export PSEUDO_IGNORE_PATHS=%s;" % ignore_paths
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ else:
+ pseudo = None
+
if not self.size and real_rootfs:
- # Bitbake variable ROOTFS_SIZE is calculated in
- # Image._get_rootfs_size method from meta/lib/oe/image.py
- # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
- # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+ # The rootfs size is not set in .ks file so try to get it
+ # from bitbake variable
rsize_bb = get_bitbake_var('ROOTFS_SIZE')
- if rsize_bb:
- logger.warning('overhead-factor was specified, but size was not,'
- ' so bitbake variables will be used for the size.'
- ' In this case both IMAGE_OVERHEAD_FACTOR and '
- '--overhead-factor will be applied')
+ rdir = get_bitbake_var('IMAGE_ROOTFS')
+ if rsize_bb and rdir == rootfs_dir:
+ # Bitbake variable ROOTFS_SIZE is calculated in
+ # Image._get_rootfs_size method from meta/lib/oe/image.py
+ # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+ # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
self.size = int(round(float(rsize_bb)))
+ else:
+ # Bitbake variable ROOTFS_SIZE is not defined so compute it
+ # from the rootfs_dir size using the same logic found in
+ # get_rootfs_size() from meta/classes/image.bbclass
+ du_cmd = "du -ks %s" % rootfs_dir
+ out = exec_cmd(du_cmd)
+ self.size = int(out.split()[0])
prefix = "ext" if self.fstype.startswith("ext") else self.fstype
method = getattr(self, "prepare_rootfs_" + prefix)
- method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+ method(rootfs, cr_workdir, oe_builddir, rootfs_dir, native_sysroot, pseudo)
self.source_file = rootfs
# get the rootfs size in the right units for kickstart (kB)
@@ -249,7 +268,7 @@ class Partition():
out = exec_cmd(du_cmd)
self.size = int(out.split()[0])
- def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_ext(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for an ext2/3/4 rootfs partition.
@@ -265,6 +284,20 @@ class Partition():
extraopts = self.mkfs_extraopts or "-F -i 8192"
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ if pseudo:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s;%s " % (sde_time, pseudo)
+ else:
+ pseudo = "export E2FSPROGS_FAKE_TIME=%s; " % sde_time
+
+ # Set hash_seed to generate deterministic directory indexes
+ namespace = uuid.UUID("e7429877-e7b3-4a68-a5c9-2f2fdf33d460")
+ if self.fsuuid:
+ namespace = uuid.UUID(self.fsuuid)
+ hash_seed = str(uuid.uuid5(namespace, str(sde_time)))
+ extraopts += " -E hash_seed=%s" % hash_seed
+
label_str = ""
if self.label:
label_str = "-L %s" % self.label
@@ -273,10 +306,45 @@ class Partition():
(self.fstype, extraopts, rootfs, label_str, self.fsuuid, rootfs_dir)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ with open(debugfs_script_path, "w") as f:
+ f.write("cd etc\n")
+ f.write("rm fstab\n")
+ f.write("write %s fstab\n" % (self.updated_fstab_path))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ sde_time = hex(int(os.getenv('SOURCE_DATE_EPOCH')))
+ debugfs_script_path = os.path.join(cr_workdir, "debugfs_script")
+ files = []
+ for root, dirs, others in os.walk(rootfs_dir):
+ base = root.replace(rootfs_dir, "").rstrip(os.sep)
+ files += [ "/" if base == "" else base ]
+ files += [ base + "/" + n for n in dirs + others ]
+ with open(debugfs_script_path, "w") as f:
+ f.write("set_current_time %s\n" % (sde_time))
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ f.write("set_inode_field /etc/fstab mtime %s\n" % (sde_time))
+ f.write("set_inode_field /etc/fstab mtime_extra 0\n")
+ for file in set(files):
+ for time in ["atime", "ctime", "crtime"]:
+ f.write("set_inode_field \"%s\" %s %s\n" % (file, time, sde_time))
+ f.write("set_inode_field \"%s\" %s_extra 0\n" % (file, time))
+ for time in ["wtime", "mkfs_time", "lastcheck"]:
+ f.write("set_super_value %s %s\n" % (time, sde_time))
+ for time in ["mtime", "first_error_time", "last_error_time"]:
+ f.write("set_super_value %s 0\n" % (time))
+ debugfs_cmd = "debugfs -w -f %s %s" % (debugfs_script_path, rootfs)
+ exec_native_cmd(debugfs_cmd, native_sysroot)
+
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
+ def prepare_rootfs_btrfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a btrfs rootfs partition.
@@ -299,7 +367,7 @@ class Partition():
self.mkfs_extraopts, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
- def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_msdos(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a msdos/vfat rootfs partition.
@@ -315,25 +383,27 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
dosfs_cmd = "mkdosfs %s -i %s %s %s -C %s %d" % \
(label_str, self.fsuuid, size_str, extraopts, rootfs,
- max(8250, rootfs_size))
+ rootfs_size)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
exec_native_cmd(mcopy_cmd, native_sysroot)
+ if self.updated_fstab_path and self.has_fstab and not self.no_fstab_update:
+ mcopy_cmd = "mcopy -m -i %s %s ::/etc/fstab" % (rootfs, self.updated_fstab_path)
+ exec_native_cmd(mcopy_cmd, native_sysroot)
+
chmod_cmd = "chmod 644 %s" % rootfs
exec_cmd(chmod_cmd)
prepare_rootfs_vfat = prepare_rootfs_msdos
- def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
+ def prepare_rootfs_squashfs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
native_sysroot, pseudo):
"""
Prepare content for a squashfs rootfs partition.
@@ -343,6 +413,19 @@ class Partition():
(rootfs_dir, rootfs, extraopts)
exec_native_cmd(squashfs_cmd, native_sysroot, pseudo=pseudo)
+ def prepare_rootfs_erofs(self, rootfs, cr_workdir, oe_builddir, rootfs_dir,
+ native_sysroot, pseudo):
+ """
+ Prepare content for a erofs rootfs partition.
+ """
+ extraopts = self.mkfs_extraopts or ''
+ erofs_cmd = "mkfs.erofs %s -U %s %s %s" % \
+ (extraopts, self.fsuuid, rootfs, rootfs_dir)
+ exec_native_cmd(erofs_cmd, native_sysroot, pseudo=pseudo)
+
+ def prepare_empty_partition_none(self, rootfs, oe_builddir, native_sysroot):
+ pass
+
def prepare_empty_partition_ext(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -362,6 +445,8 @@ class Partition():
(self.fstype, extraopts, label_str, self.fsuuid, rootfs)
exec_native_cmd(mkfs_cmd, native_sysroot)
+ self.check_for_Y2038_problem(rootfs, native_sysroot)
+
def prepare_empty_partition_btrfs(self, rootfs, oe_builddir,
native_sysroot):
"""
@@ -392,8 +477,6 @@ class Partition():
label_str = "-n %s" % self.label
size_str = ""
- if self.fstype == 'msdos':
- size_str = "-F 16" # FAT 16
extraopts = self.mkfs_extraopts or '-S 512'
@@ -423,3 +506,37 @@ class Partition():
mkswap_cmd = "mkswap %s -U %s %s" % (label_str, self.fsuuid, path)
exec_native_cmd(mkswap_cmd, native_sysroot)
+
+ def check_for_Y2038_problem(self, rootfs, native_sysroot):
+ """
+ Check if the filesystem is affected by the Y2038 problem
+ (Y2038 problem = 32 bit time_t overflow in January 2038)
+ """
+ def get_err_str(part):
+ err = "The {} filesystem {} has no Y2038 support."
+ if part.mountpoint:
+ args = [part.fstype, "mounted at %s" % part.mountpoint]
+ elif part.label:
+ args = [part.fstype, "labeled '%s'" % part.label]
+ elif part.part_name:
+ args = [part.fstype, "in partition '%s'" % part.part_name]
+ else:
+ args = [part.fstype, "in partition %s" % part.num]
+ return err.format(*args)
+
+ # ext2 and ext3 are always affected by the Y2038 problem
+ if self.fstype in ["ext2", "ext3"]:
+ logger.warn(get_err_str(self))
+ return
+
+ ret, out = exec_native_cmd("dumpe2fs %s" % rootfs, native_sysroot)
+
+ # if ext4 is affected by the Y2038 problem depends on the inode size
+ for line in out.splitlines():
+ if line.startswith("Inode size:"):
+ size = int(line.split(":")[1].strip())
+ if size < 256:
+ logger.warn("%s Inodes (of size %d) are too small." %
+ (get_err_str(self), size))
+ break
+
diff --git a/scripts/lib/wic/pluginbase.py b/scripts/lib/wic/pluginbase.py
index 686d2fee3b..b64568339b 100644
--- a/scripts/lib/wic/pluginbase.py
+++ b/scripts/lib/wic/pluginbase.py
@@ -1,34 +1,26 @@
-#!/usr/bin/env python -tt
+#!/usr/bin/env python3
#
# Copyright (c) 2011 Intel, Inc.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
__all__ = ['ImagerPlugin', 'SourcePlugin']
import os
import logging
+import types
from collections import defaultdict
-from importlib.machinery import SourceFileLoader
+import importlib
+import importlib.util
from wic import WicError
from wic.misc import get_bitbake_var
PLUGIN_TYPES = ["imager", "source"]
-SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+SCRIPTS_PLUGIN_DIR = ["scripts/lib/wic/plugins", "lib/wic/plugins"]
logger = logging.getLogger('wic')
@@ -48,10 +40,11 @@ class PluginMgr:
cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
layers = get_bitbake_var("BBLAYERS") or ''
for layer_path in layers.split():
- path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
- path = os.path.abspath(os.path.expanduser(path))
- if path not in cls._plugin_dirs and os.path.isdir(path):
- cls._plugin_dirs.insert(0, path)
+ for script_plugin_dir in SCRIPTS_PLUGIN_DIR:
+ path = os.path.join(layer_path, script_plugin_dir)
+ path = os.path.abspath(os.path.expanduser(path))
+ if path not in cls._plugin_dirs and os.path.isdir(path):
+ cls._plugin_dirs.insert(0, path)
if ptype not in PLUGINS:
# load all ptype plugins
@@ -63,7 +56,9 @@ class PluginMgr:
mname = fname[:-3]
mpath = os.path.join(ppath, fname)
logger.debug("loading plugin module %s", mpath)
- SourceFileLoader(mname, mpath).load_module()
+ spec = importlib.util.spec_from_file_location(mname, mpath)
+ module = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(module)
return PLUGINS.get(ptype)
diff --git a/scripts/lib/wic/plugins/imager/direct.py b/scripts/lib/wic/plugins/imager/direct.py
index 81583e97b9..a1d152659b 100644
--- a/scripts/lib/wic/plugins/imager/direct.py
+++ b/scripts/lib/wic/plugins/imager/direct.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'direct' imager plugin class for 'wic'
@@ -63,20 +49,21 @@ class DirectPlugin(ImagerPlugin):
# parse possible 'rootfs=name' items
self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
- self.replaced_rootfs_paths = {}
self.bootimg_dir = bootimg_dir
self.kernel_dir = kernel_dir
self.native_sysroot = native_sysroot
self.oe_builddir = oe_builddir
+ self.debug = options.debug
self.outdir = options.outdir
self.compressor = options.compressor
self.bmap = options.bmap
self.no_fstab_update = options.no_fstab_update
+ self.updated_fstab_path = None
self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
strftime("%Y%m%d%H%M"))
- self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+ self.workdir = self.setup_workdir(options.workdir)
self._image = None
self.ptable_format = self.ks.bootloader.ptable
self.parts = self.ks.partitions
@@ -90,7 +77,18 @@ class DirectPlugin(ImagerPlugin):
image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
self._image = PartitionedImage(image_path, self.ptable_format,
- self.parts, self.native_sysroot)
+ self.parts, self.native_sysroot,
+ options.extra_space)
+
+ def setup_workdir(self, workdir):
+ if workdir:
+ if os.path.exists(workdir):
+ raise WicError("Internal workdir '%s' specified in wic arguments already exists!" % (workdir))
+
+ os.makedirs(workdir)
+ return workdir
+ else:
+ return tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
def do_create(self):
"""
@@ -104,11 +102,8 @@ class DirectPlugin(ImagerPlugin):
finally:
self.cleanup()
- def _write_fstab(self, image_rootfs):
- """overriden to generate fstab (temporarily) in rootfs. This is called
- from _create, make sure it doesn't get called from
- BaseImage.create()
- """
+ def update_fstab(self, image_rootfs):
+ """Assume partition order same as in wks"""
if not image_rootfs:
return
@@ -119,30 +114,10 @@ class DirectPlugin(ImagerPlugin):
with open(fstab_path) as fstab:
fstab_lines = fstab.readlines()
- if self._update_fstab(fstab_lines, self.parts):
- # copy rootfs dir to workdir to update fstab
- # as rootfs can be used by other tasks and can't be modified
- new_pseudo = os.path.realpath(os.path.join(self.workdir, "pseudo"))
- from_dir = os.path.join(os.path.join(image_rootfs, ".."), "pseudo")
- from_dir = os.path.realpath(from_dir)
- copyhardlinktree(from_dir, new_pseudo)
- new_rootfs = os.path.realpath(os.path.join(self.workdir, "rootfs_copy"))
- copyhardlinktree(image_rootfs, new_rootfs)
- fstab_path = os.path.join(new_rootfs, 'etc/fstab')
-
- os.unlink(fstab_path)
-
- with open(fstab_path, "w") as fstab:
- fstab.writelines(fstab_lines)
-
- return new_rootfs
-
- def _update_fstab(self, fstab_lines, parts):
- """Assume partition order same as in wks"""
updated = False
- for part in parts:
+ for part in self.parts:
if not part.realnum or not part.mountpoint \
- or part.mountpoint == "/":
+ or part.mountpoint == "/" or not (part.mountpoint.startswith('/') or part.mountpoint == "swap"):
continue
if part.use_uuid:
@@ -155,19 +130,28 @@ class DirectPlugin(ImagerPlugin):
device_name = "UUID=%s" % part.fsuuid
else:
device_name = "PARTUUID=%s" % part.uuid
+ elif part.use_label:
+ device_name = "LABEL=%s" % part.label
else:
# mmc device partitions are named mmcblk0p1, mmcblk0p2..
prefix = 'p' if part.disk.startswith('mmcblk') else ''
device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
opts = part.fsopts if part.fsopts else "defaults"
+ passno = part.fspassno if part.fspassno else "0"
line = "\t".join([device_name, part.mountpoint, part.fstype,
- opts, "0", "0"]) + "\n"
+ opts, "0", passno]) + "\n"
fstab_lines.append(line)
updated = True
- return updated
+ if updated:
+ self.updated_fstab_path = os.path.join(self.workdir, "fstab")
+ with open(self.updated_fstab_path, "w") as f:
+ f.writelines(fstab_lines)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ fstab_time = int(os.getenv('SOURCE_DATE_EPOCH'))
+ os.utime(self.updated_fstab_path, (fstab_time, fstab_time))
def _full_path(self, path, name, extention):
""" Construct full file path to a file we generate. """
@@ -182,14 +166,8 @@ class DirectPlugin(ImagerPlugin):
filesystems from the artifacts directly and combine them into
a partitioned image.
"""
- if self.no_fstab_update:
- new_rootfs = None
- else:
- new_rootfs = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
- if new_rootfs:
- # rootfs was copied to update fstab
- self.replaced_rootfs_paths[new_rootfs] = self.rootfs_dir['ROOTFS_DIR']
- self.rootfs_dir['ROOTFS_DIR'] = new_rootfs
+ if not self.no_fstab_update:
+ self.update_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
for part in self.parts:
# get rootfs size from bitbake variable if it's not set in .ks file
@@ -265,8 +243,6 @@ class DirectPlugin(ImagerPlugin):
else:
suffix = '["%s"]:' % (part.mountpoint or part.label)
rootdir = part.rootfs_dir
- if rootdir in self.replaced_rootfs_paths:
- rootdir = self.replaced_rootfs_paths[rootdir]
msg += ' ROOTFS_DIR%s%s\n' % (suffix.ljust(20), rootdir)
msg += ' BOOTIMG_DIR: %s\n' % self.bootimg_dir
@@ -287,6 +263,8 @@ class DirectPlugin(ImagerPlugin):
if part.mountpoint == "/":
if part.uuid:
return "PARTUUID=%s" % part.uuid
+ elif part.label and self.ptable_format != 'msdos':
+ return "PARTLABEL=%s" % part.label
else:
suffix = 'p' if part.disk.startswith('mmcblk') else ''
return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
@@ -304,8 +282,9 @@ class DirectPlugin(ImagerPlugin):
if os.path.isfile(path):
shutil.move(path, os.path.join(self.outdir, fname))
- # remove work directory
- shutil.rmtree(self.workdir, ignore_errors=True)
+ # remove work directory when it is not in debugging mode
+ if not self.debug:
+ shutil.rmtree(self.workdir, ignore_errors=True)
# Overhead of the MBR partitioning scheme (just one sector)
MBR_OVERHEAD = 1
@@ -321,22 +300,31 @@ class PartitionedImage():
Partitioned image in a file.
"""
- def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+ def __init__(self, path, ptable_format, partitions, native_sysroot=None, extra_space=0):
self.path = path # Path to the image file
self.numpart = 0 # Number of allocated partitions
self.realpart = 0 # Number of partitions in the partition table
+ self.primary_part_num = 0 # Number of primary partitions (msdos)
+ self.extendedpart = 0 # Create extended partition before this logical partition (msdos)
+ self.extended_size_sec = 0 # Size of exteded partition (msdos)
+ self.logical_part_cnt = 0 # Number of total logical paritions (msdos)
self.offset = 0 # Offset of next partition (in sectors)
self.min_size = 0 # Minimum required disk size to fit
# all partitions (in bytes)
self.ptable_format = ptable_format # Partition table format
# Disk system identifier
- self.identifier = random.SystemRandom().randint(1, 0xffffffff)
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.identifier = random.Random(int(os.getenv('SOURCE_DATE_EPOCH'))).randint(1, 0xffffffff)
+ else:
+ self.identifier = random.SystemRandom().randint(1, 0xffffffff)
self.partitions = partitions
self.partimages = []
# Size of a sector used in calculations
self.sector_size = SECTOR_SIZE
self.native_sysroot = native_sysroot
+ num_real_partitions = len([p for p in self.partitions if not p.no_table])
+ self.extra_space = extra_space
# calculate the real partition number, accounting for partitions not
# in the partition table and logical partitions
@@ -346,7 +334,7 @@ class PartitionedImage():
part.realnum = 0
else:
realnum += 1
- if self.ptable_format == 'msdos' and realnum > 3 and len(partitions) > 4:
+ if self.ptable_format == 'msdos' and realnum > 3 and num_real_partitions > 4:
part.realnum = realnum + 1
continue
part.realnum = realnum
@@ -354,7 +342,7 @@ class PartitionedImage():
# generate parition and filesystem UUIDs
for part in self.partitions:
if not part.uuid and part.use_uuid:
- if self.ptable_format == 'gpt':
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
part.uuid = str(uuid.uuid4())
else: # msdos partition table
part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
@@ -363,6 +351,13 @@ class PartitionedImage():
part.fsuuid = '0x' + str(uuid.uuid4())[:8].upper()
else:
part.fsuuid = str(uuid.uuid4())
+ else:
+ #make sure the fsuuid for vfat/msdos align with format 0xYYYYYYYY
+ if part.fstype == 'vfat' or part.fstype == 'msdos':
+ if part.fsuuid.upper().startswith("0X"):
+ part.fsuuid = '0x' + part.fsuuid.upper()[2:].rjust(8,"0")
+ else:
+ part.fsuuid = '0x' + part.fsuuid.upper().rjust(8,"0")
def prepare(self, imager):
"""Prepare an image. Call prepare method of all image partitions."""
@@ -371,7 +366,8 @@ class PartitionedImage():
# sizes before we can add them and do the layout.
part.prepare(imager, imager.workdir, imager.oe_builddir,
imager.rootfs_dir, imager.bootimg_dir,
- imager.kernel_dir, imager.native_sysroot)
+ imager.kernel_dir, imager.native_sysroot,
+ imager.updated_fstab_path)
# Converting kB to sectors for parted
part.size_sec = part.disk_size * 1024 // self.sector_size
@@ -402,6 +398,10 @@ class PartitionedImage():
raise WicError("setting custom partition type is not " \
"implemented for msdos partitions")
+ if part.mbr and self.ptable_format != 'gpt-hybrid':
+ raise WicError("Partition may only be included in MBR with " \
+ "a gpt-hybrid partition table")
+
# Get the disk where the partition is located
self.numpart += 1
if not part.no_table:
@@ -410,18 +410,22 @@ class PartitionedImage():
if self.numpart == 1:
if self.ptable_format == "msdos":
overhead = MBR_OVERHEAD
- elif self.ptable_format == "gpt":
+ elif self.ptable_format in ("gpt", "gpt-hybrid"):
overhead = GPT_OVERHEAD
# Skip one sector required for the partitioning scheme overhead
self.offset += overhead
- if self.realpart > 3 and num_real_partitions > 4:
+ if self.ptable_format == "msdos":
+ if self.primary_part_num > 3 or \
+ (self.extendedpart == 0 and self.primary_part_num >= 3 and num_real_partitions > 4):
+ part.type = 'logical'
# Reserve a sector for EBR for every logical partition
# before alignment is performed.
- if self.ptable_format == "msdos":
- self.offset += 1
+ if part.type == 'logical':
+ self.offset += 2
+ align_sectors = 0
if part.align:
# If not first partition and we do have alignment set we need
# to align the partition.
@@ -444,21 +448,43 @@ class PartitionedImage():
# increase the offset so we actually start the partition on right alignment
self.offset += align_sectors
+ if part.offset is not None:
+ offset = part.offset // self.sector_size
+
+ if offset * self.sector_size != part.offset:
+ raise WicError("Could not place %s%s at offset %d with sector size %d" % (part.disk, self.numpart, part.offset, self.sector_size))
+
+ delta = offset - self.offset
+ if delta < 0:
+ raise WicError("Could not place %s%s at offset %d: next free sector is %d (delta: %d)" % (part.disk, self.numpart, part.offset, self.offset, delta))
+
+ logger.debug("Skipping %d sectors to place %s%s at offset %dK",
+ delta, part.disk, self.numpart, part.offset)
+
+ self.offset = offset
+
part.start = self.offset
self.offset += part.size_sec
- part.type = 'primary'
if not part.no_table:
part.num = self.realpart
else:
part.num = 0
- if self.ptable_format == "msdos":
- # only count the partitions that are in partition table
- if num_real_partitions > 4:
- if self.realpart > 3:
- part.type = 'logical'
- part.num = self.realpart + 1
+ if self.ptable_format == "msdos" and not part.no_table:
+ if part.type == 'logical':
+ self.logical_part_cnt += 1
+ part.num = self.logical_part_cnt + 4
+ if self.extendedpart == 0:
+ # Create extended partition as a primary partition
+ self.primary_part_num += 1
+ self.extendedpart = part.num
+ else:
+ self.extended_size_sec += align_sectors
+ self.extended_size_sec += part.size_sec + 2
+ else:
+ self.primary_part_num += 1
+ part.num = self.primary_part_num
logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
"sectors (%d bytes).", part.mountpoint, part.disk,
@@ -468,10 +494,11 @@ class PartitionedImage():
# Once all the partitions have been layed out, we can calculate the
# minumim disk size
self.min_size = self.offset
- if self.ptable_format == "gpt":
+ if self.ptable_format in ("gpt", "gpt-hybrid"):
self.min_size += GPT_OVERHEAD
self.min_size *= self.sector_size
+ self.min_size += self.extra_space
def _create_partition(self, device, parttype, fstype, start, size):
""" Create a partition on an image described by the 'device' object. """
@@ -488,27 +515,54 @@ class PartitionedImage():
return exec_native_cmd(cmd, self.native_sysroot)
+ def _write_identifier(self, device, identifier):
+ logger.debug("Set disk identifier %x", identifier)
+ with open(device, 'r+b') as img:
+ img.seek(0x1B8)
+ img.write(identifier.to_bytes(4, 'little'))
+
+ def _make_disk(self, device, ptable_format, min_size):
+ logger.debug("Creating sparse file %s", device)
+ with open(device, 'w') as sparse:
+ os.ftruncate(sparse.fileno(), min_size)
+
+ logger.debug("Initializing partition table for %s", device)
+ exec_native_cmd("parted -s %s mklabel %s" % (device, ptable_format),
+ self.native_sysroot)
+
+ def _write_disk_guid(self):
+ if self.ptable_format in ('gpt', 'gpt-hybrid'):
+ if os.getenv('SOURCE_DATE_EPOCH'):
+ self.disk_guid = uuid.UUID(int=int(os.getenv('SOURCE_DATE_EPOCH')))
+ else:
+ self.disk_guid = uuid.uuid4()
+
+ logger.debug("Set disk guid %s", self.disk_guid)
+ sfdisk_cmd = "sfdisk --disk-id %s %s" % (self.path, self.disk_guid)
+ exec_native_cmd(sfdisk_cmd, self.native_sysroot)
+
def create(self):
- logger.debug("Creating sparse file %s", self.path)
- with open(self.path, 'w') as sparse:
- os.ftruncate(sparse.fileno(), self.min_size)
+ self._make_disk(self.path,
+ "gpt" if self.ptable_format == "gpt-hybrid" else self.ptable_format,
+ self.min_size)
- logger.debug("Initializing partition table for %s", self.path)
- exec_native_cmd("parted -s %s mklabel %s" %
- (self.path, self.ptable_format), self.native_sysroot)
+ self._write_identifier(self.path, self.identifier)
+ self._write_disk_guid()
- logger.debug("Set disk identifier %x", self.identifier)
- with open(self.path, 'r+b') as img:
- img.seek(0x1B8)
- img.write(self.identifier.to_bytes(4, 'little'))
+ if self.ptable_format == "gpt-hybrid":
+ mbr_path = self.path + ".mbr"
+ self._make_disk(mbr_path, "msdos", self.min_size)
+ self._write_identifier(mbr_path, self.identifier)
logger.debug("Creating partitions")
+ hybrid_mbr_part_num = 0
+
for part in self.partitions:
if part.num == 0:
continue
- if self.ptable_format == "msdos" and part.num == 5:
+ if self.ptable_format == "msdos" and part.num == self.extendedpart:
# Create an extended partition (note: extended
# partition is described in MBR and contains all
# logical partitions). The logical partitions save a
@@ -521,8 +575,8 @@ class PartitionedImage():
# add a sector at the back, so that there is enough
# room for all logical partitions.
self._create_partition(self.path, "extended",
- None, part.start - 1,
- self.offset - part.start + 1)
+ None, part.start - 2,
+ self.extended_size_sec)
if part.fstype == "swap":
parted_fs_type = "linux-swap"
@@ -548,11 +602,19 @@ class PartitionedImage():
self._create_partition(self.path, part.type,
parted_fs_type, part.start, part.size_sec)
- if part.part_name:
+ if self.ptable_format == "gpt-hybrid" and part.mbr:
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+ self._create_partition(mbr_path, "primary",
+ parted_fs_type, part.start, part.size_sec)
+
+ if self.ptable_format in ("gpt", "gpt-hybrid") and (part.part_name or part.label):
+ partition_label = part.part_name if part.part_name else part.label
logger.debug("partition %d: set name to %s",
- part.num, part.part_name)
+ part.num, partition_label)
exec_native_cmd("sgdisk --change-name=%d:%s %s" % \
- (part.num, part.part_name,
+ (part.num, partition_label,
self.path), self.native_sysroot)
if part.part_type:
@@ -562,36 +624,57 @@ class PartitionedImage():
(part.num, part.part_type,
self.path), self.native_sysroot)
- if part.uuid and self.ptable_format == "gpt":
+ if part.uuid and self.ptable_format in ("gpt", "gpt-hybrid"):
logger.debug("partition %d: set UUID to %s",
part.num, part.uuid)
exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
(part.num, part.uuid, self.path),
self.native_sysroot)
- if part.label and self.ptable_format == "gpt":
- logger.debug("partition %d: set name to %s",
- part.num, part.label)
- exec_native_cmd("parted -s %s name %d %s" % \
- (self.path, part.num, part.label),
- self.native_sysroot)
-
if part.active:
- flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+ flag_name = "legacy_boot" if self.ptable_format in ('gpt', 'gpt-hybrid') else "boot"
logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
flag_name, part.num, self.path)
exec_native_cmd("parted -s %s set %d %s on" % \
(self.path, part.num, flag_name),
self.native_sysroot)
+ if self.ptable_format == 'gpt-hybrid' and part.mbr:
+ exec_native_cmd("parted -s %s set %d %s on" % \
+ (mbr_path, hybrid_mbr_part_num, "boot"),
+ self.native_sysroot)
if part.system_id:
exec_native_cmd("sfdisk --part-type %s %s %s" % \
(self.path, part.num, part.system_id),
self.native_sysroot)
+ if part.hidden and self.ptable_format == "gpt":
+ logger.debug("Set hidden attribute for partition '%s' on disk '%s'",
+ part.num, self.path)
+ exec_native_cmd("sfdisk --part-attrs %s %s RequiredPartition" % \
+ (self.path, part.num),
+ self.native_sysroot)
+
+ if self.ptable_format == "gpt-hybrid":
+ # Write a protective GPT partition
+ hybrid_mbr_part_num += 1
+ if hybrid_mbr_part_num > 4:
+ raise WicError("Extended MBR partitions are not supported in hybrid MBR")
+
+ # parted cannot directly create a protective GPT partition, so
+ # create with an arbitrary type, then change it to the correct type
+ # with sfdisk
+ self._create_partition(mbr_path, "primary", "fat32", 1, GPT_OVERHEAD)
+ exec_native_cmd("sfdisk --part-type %s %d 0xee" % (mbr_path, hybrid_mbr_part_num),
+ self.native_sysroot)
+
+ # Copy hybrid MBR
+ with open(mbr_path, "rb") as mbr_file:
+ with open(self.path, "r+b") as image_file:
+ mbr = mbr_file.read(512)
+ image_file.write(mbr)
+
def cleanup(self):
- # remove partition images
- for image in set(self.partimages):
- os.remove(image)
+ pass
def assemble(self):
logger.debug("Installing partitions")
diff --git a/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
new file mode 100644
index 0000000000..5bd7390680
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/bootimg-biosplusefi.py
@@ -0,0 +1,213 @@
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'bootimg-biosplusefi' source plugin class for 'wic'
+#
+# AUTHORS
+# William Bourque <wbourque [at) gmail.com>
+
+import types
+
+from wic.pluginbase import SourcePlugin
+from importlib.machinery import SourceFileLoader
+
+class BootimgBiosPlusEFIPlugin(SourcePlugin):
+ """
+ Create MBR + EFI boot partition
+
+ This plugin creates a boot partition that contains both
+ legacy BIOS and EFI content. It will be able to boot from both.
+ This is useful when managing PC fleet with some older machines
+ without EFI support.
+
+ Note it is possible to create an image that can boot from both
+ legacy BIOS and EFI by defining two partitions : one with arg
+ --source bootimg-efi and another one with --source bootimg-pcbios.
+ However, this method has the obvious downside that it requires TWO
+ partitions to be created on the storage device.
+ Both partitions will also be marked as "bootable" which does not work on
+ most BIOS, has BIOS often uses the "bootable" flag to determine
+ what to boot. If you have such a BIOS, you need to manually remove the
+ "bootable" flag from the EFI partition for the drive to be bootable.
+ Having two partitions also seems to confuse wic : the content of
+ the first partition will be duplicated into the second, even though it
+ will not be used at all.
+
+ Also, unlike "isoimage-isohybrid" that also does BIOS and EFI, this plugin
+ allows you to have more than only a single rootfs partitions and does
+ not turn the rootfs into an initramfs RAM image.
+
+ This plugin is made to put everything into a single /boot partition so it
+ does not have the limitations listed above.
+
+ The plugin is made so it does tries not to reimplement what's already
+ been done in other plugins; as such it imports "bootimg-pcbios"
+ and "bootimg-efi".
+ Plugin "bootimg-pcbios" is used to generate legacy BIOS boot.
+ Plugin "bootimg-efi" is used to generate the UEFI boot. Note that it
+ requires a --sourceparams argument to know which loader to use; refer
+ to "bootimg-efi" code/documentation for the list of loader.
+
+ Imports are handled with "SourceFileLoader" from importlib as it is
+ otherwise very difficult to import module that has hyphen "-" in their
+ filename.
+ The SourcePlugin() methods used in the plugins (do_install_disk,
+ do_configure_partition, do_prepare_partition) are then called on both,
+ beginning by "bootimg-efi".
+
+ Plugin options, such as "--sourceparams" can still be passed to a
+ plugin, as long they does not cause issue in the other plugin.
+
+ Example wic configuration:
+ part /boot --source bootimg-biosplusefi --sourceparams="loader=grub-efi"\\
+ --ondisk sda --label os_boot --active --align 1024 --use-uuid
+ """
+
+ name = 'bootimg-biosplusefi'
+
+ __PCBIOS_MODULE_NAME = "bootimg-pcbios"
+ __EFI_MODULE_NAME = "bootimg-efi"
+
+ __imgEFIObj = None
+ __imgBiosObj = None
+
+ @classmethod
+ def __init__(cls):
+ """
+ Constructor (init)
+ """
+
+ # XXX
+ # For some reasons, __init__ constructor is never called.
+ # Something to do with how pluginbase works?
+ cls.__instanciateSubClasses()
+
+ @classmethod
+ def __instanciateSubClasses(cls):
+ """
+
+ """
+
+ # Import bootimg-pcbios (class name "BootimgPcbiosPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__PCBIOS_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__PCBIOS_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgBiosObj = mod.BootimgPcbiosPlugin()
+
+ # Import bootimg-efi (class name "BootimgEFIPlugin")
+ modulePath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ cls.__EFI_MODULE_NAME + ".py")
+ loader = SourceFileLoader(cls.__EFI_MODULE_NAME, modulePath)
+ mod = types.ModuleType(loader.name)
+ loader.exec_module(mod)
+ cls.__imgEFIObj = mod.BootimgEFIPlugin()
+
+ @classmethod
+ def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
+ bootimg_dir, kernel_dir, native_sysroot):
+ """
+ Called after all partitions have been prepared and assembled into a
+ disk image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_install_disk(
+ disk,
+ disk_name,
+ creator,
+ workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_configure_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ native_sysroot):
+ """
+ Called before do_prepare_partition()
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_configure_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ native_sysroot)
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+
+ if ( (not cls.__imgEFIObj) or (not cls.__imgBiosObj) ):
+ cls.__instanciateSubClasses()
+
+ cls.__imgEFIObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
+
+ cls.__imgBiosObj.do_prepare_partition(
+ part,
+ source_params,
+ creator,
+ cr_workdir,
+ oe_builddir,
+ bootimg_dir,
+ kernel_dir,
+ rootfs_dir,
+ native_sysroot)
diff --git a/scripts/lib/wic/plugins/source/bootimg-efi.py b/scripts/lib/wic/plugins/source/bootimg-efi.py
index 0eb86a079f..13a9cddf4e 100644
--- a/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-efi' source plugin class for 'wic'
@@ -26,7 +12,11 @@
import logging
import os
+import tempfile
import shutil
+import re
+
+from glob import glob
from wic import WicError
from wic.engine import get_custom_config
@@ -45,6 +35,26 @@ class BootimgEFIPlugin(SourcePlugin):
name = 'bootimg-efi'
@classmethod
+ def _copy_additional_files(cls, hdddir, initrd, dtb):
+ bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not bootimg_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ if initrd:
+ initrds = initrd.split(';')
+ for rd in initrds:
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, rd, hdddir)
+ exec_cmd(cp_cmd, True)
+ else:
+ logger.debug("Ignoring missing initrd")
+
+ if dtb:
+ if ';' in dtb:
+ raise WicError("Only one DTB supported, exiting")
+ cp_cmd = "cp %s/%s %s" % (bootimg_dir, dtb, hdddir)
+ exec_cmd(cp_cmd, True)
+
+ @classmethod
def do_configure_grubefi(cls, hdddir, creator, cr_workdir, source_params):
"""
Create loader-specific (grub-efi) config
@@ -63,34 +73,44 @@ class BootimgEFIPlugin(SourcePlugin):
"get it from %s." % configfile)
initrd = source_params.get('initrd')
+ dtb = source_params.get('dtb')
- if initrd:
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
-
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
- else:
- logger.debug("Ignoring missing initrd")
+ cls._copy_additional_files(hdddir, initrd, dtb)
if not custom_cfg:
# Create grub configuration using parameters from wks file
bootloader = creator.ks.bootloader
+ title = source_params.get('title')
grubefi_conf = ""
grubefi_conf += "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1\n"
grubefi_conf += "default=boot\n"
grubefi_conf += "timeout=%s\n" % bootloader.timeout
- grubefi_conf += "menuentry 'boot'{\n"
+ grubefi_conf += "menuentry '%s'{\n" % (title if title else "boot")
+
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- kernel = "/bzImage"
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
- grubefi_conf += "linux %s root=%s rootwait %s\n" \
- % (kernel, creator.rootdev, bootloader.append)
+ grubefi_conf += "linux /%s %s rootwait %s\n" \
+ % (kernel, label_conf, bootloader.append)
if initrd:
- grubefi_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ grubefi_conf += "initrd"
+ for rd in initrds:
+ grubefi_conf += " /%s" % rd
+ grubefi_conf += "\n"
+
+ if dtb:
+ grubefi_conf += "devicetree /%s\n" % dtb
grubefi_conf += "}\n"
@@ -113,22 +133,18 @@ class BootimgEFIPlugin(SourcePlugin):
bootloader = creator.ks.bootloader
+ unified_image = source_params.get('create-unified-kernel-image') == "true"
+
loader_conf = ""
- loader_conf += "default boot\n"
+ if not unified_image:
+ loader_conf += "default boot\n"
loader_conf += "timeout %d\n" % bootloader.timeout
initrd = source_params.get('initrd')
+ dtb = source_params.get('dtb')
- if initrd:
- # obviously we need to have a common common deploy var
- bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
- if not bootimg_dir:
- raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
-
- cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
- exec_cmd(cp_cmd, True)
- else:
- logger.debug("Ignoring missing initrd")
+ if not unified_image:
+ cls._copy_additional_files(hdddir, initrd, dtb)
logger.debug("Writing systemd-boot config "
"%s/hdd/boot/loader/loader.conf", cr_workdir)
@@ -151,22 +167,40 @@ class BootimgEFIPlugin(SourcePlugin):
if not custom_cfg:
# Create systemd-boot configuration using parameters from wks file
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ title = source_params.get('title')
boot_conf = ""
- boot_conf += "title boot\n"
- boot_conf += "linux %s\n" % kernel
- boot_conf += "options LABEL=Boot root=%s %s\n" % \
- (creator.rootdev, bootloader.append)
+ boot_conf += "title %s\n" % (title if title else "boot")
+ boot_conf += "linux /%s\n" % kernel
+
+ label = source_params.get('label')
+ label_conf = "LABEL=Boot root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ boot_conf += "options %s %s\n" % \
+ (label_conf, bootloader.append)
if initrd:
- boot_conf += "initrd /%s\n" % initrd
+ initrds = initrd.split(';')
+ for rd in initrds:
+ boot_conf += "initrd /%s\n" % rd
- logger.debug("Writing systemd-boot config "
- "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
- cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
- cfg.write(boot_conf)
- cfg.close()
+ if dtb:
+ boot_conf += "devicetree /%s\n" % dtb
+
+ if not unified_image:
+ logger.debug("Writing systemd-boot config "
+ "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
+ cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
+ cfg.write(boot_conf)
+ cfg.close()
@classmethod
@@ -186,11 +220,64 @@ class BootimgEFIPlugin(SourcePlugin):
cls.do_configure_grubefi(hdddir, creator, cr_workdir, source_params)
elif source_params['loader'] == 'systemd-boot':
cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
+ elif source_params['loader'] == 'uefi-kernel':
+ pass
else:
raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
except KeyError:
raise WicError("bootimg-efi requires a loader, none specified")
+ if get_bitbake_var("IMAGE_EFI_BOOT_FILES") is None:
+ logger.debug('No boot files defined in IMAGE_EFI_BOOT_FILES')
+ else:
+ boot_files = None
+ for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
+ if fmt:
+ var = fmt % id
+ else:
+ var = ""
+
+ boot_files = get_bitbake_var("IMAGE_EFI_BOOT_FILES" + var)
+ if boot_files:
+ break
+
+ logger.debug('Boot files: %s', boot_files)
+
+ # list of tuples (src_name, dst_name)
+ deploy_files = []
+ for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files):
+ if ';' in src_entry:
+ dst_entry = tuple(src_entry.split(';'))
+ if not dst_entry[0] or not dst_entry[1]:
+ raise WicError('Malformed boot file entry: %s' % src_entry)
+ else:
+ dst_entry = (src_entry, src_entry)
+
+ logger.debug('Destination entry: %r', dst_entry)
+ deploy_files.append(dst_entry)
+
+ cls.install_task = [];
+ for deploy_entry in deploy_files:
+ src, dst = deploy_entry
+ if '*' in src:
+ # by default install files under their basename
+ entry_name_fn = os.path.basename
+ if dst != src:
+ # unless a target name was given, then treat name
+ # as a directory and append a basename
+ entry_name_fn = lambda name: \
+ os.path.join(dst,
+ os.path.basename(name))
+
+ srcs = glob(os.path.join(kernel_dir, src))
+
+ logger.debug('Globbed sources: %s', ', '.join(srcs))
+ for entry in srcs:
+ src = os.path.relpath(entry, kernel_dir)
+ entry_dst_name = entry_name_fn(entry)
+ cls.install_task.append((src, entry_dst_name))
+ else:
+ cls.install_task.append((src, dst))
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -210,10 +297,120 @@ class BootimgEFIPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (staging_kernel_dir, hdddir)
- exec_cmd(install_cmd)
-
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ if source_params.get('create-unified-kernel-image') == "true":
+ initrd = source_params.get('initrd')
+ if not initrd:
+ raise WicError("initrd= must be specified when create-unified-kernel-image=true, exiting")
+
+ deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ efi_stub = glob("%s/%s" % (deploy_dir, "linux*.efi.stub"))
+ if len(efi_stub) == 0:
+ raise WicError("Unified Kernel Image EFI stub not found, exiting")
+ efi_stub = efi_stub[0]
+
+ with tempfile.TemporaryDirectory() as tmp_dir:
+ label = source_params.get('label')
+ label_conf = "root=%s" % creator.rootdev
+ if label:
+ label_conf = "LABEL=%s" % label
+
+ bootloader = creator.ks.bootloader
+ cmdline = open("%s/cmdline" % tmp_dir, "w")
+ cmdline.write("%s %s" % (label_conf, bootloader.append))
+ cmdline.close()
+
+ initrds = initrd.split(';')
+ initrd = open("%s/initrd" % tmp_dir, "wb")
+ for f in initrds:
+ with open("%s/%s" % (deploy_dir, f), 'rb') as in_file:
+ shutil.copyfileobj(in_file, initrd)
+ initrd.close()
+
+ # Searched by systemd-boot:
+ # https://systemd.io/BOOT_LOADER_SPECIFICATION/#type-2-efi-unified-kernel-images
+ install_cmd = "install -d %s/EFI/Linux" % hdddir
+ exec_cmd(install_cmd)
+
+ staging_dir_host = get_bitbake_var("STAGING_DIR_HOST")
+ target_sys = get_bitbake_var("TARGET_SYS")
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -p %s" % efi_stub
+ objdump_cmd += " | awk '{ if ($1 == \"SectionAlignment\"){print $2} }'"
+
+ ret, align_str = exec_native_cmd(objdump_cmd, native_sysroot)
+ align = int(align_str, 16)
+
+ objdump_cmd = "%s-objdump" % target_sys
+ objdump_cmd += " -h %s | tail -2" % efi_stub
+ ret, output = exec_native_cmd(objdump_cmd, native_sysroot)
+
+ offset = int(output.split()[2], 16) + int(output.split()[3], 16)
+
+ osrel_off = offset + align - offset % align
+ osrel_path = "%s/usr/lib/os-release" % staging_dir_host
+ osrel_sz = os.stat(osrel_path).st_size
+
+ cmdline_off = osrel_off + osrel_sz
+ cmdline_off = cmdline_off + align - cmdline_off % align
+ cmdline_sz = os.stat(cmdline.name).st_size
+
+ dtb_off = cmdline_off + cmdline_sz
+ dtb_off = dtb_off + align - dtb_off % align
+
+ dtb = source_params.get('dtb')
+ if dtb:
+ if ';' in dtb:
+ raise WicError("Only one DTB supported, exiting")
+ dtb_path = "%s/%s" % (deploy_dir, dtb)
+ dtb_params = '--add-section .dtb=%s --change-section-vma .dtb=0x%x' % \
+ (dtb_path, dtb_off)
+ linux_off = dtb_off + os.stat(dtb_path).st_size
+ linux_off = linux_off + align - linux_off % align
+ else:
+ dtb_params = ''
+ linux_off = dtb_off
+
+ linux_path = "%s/%s" % (staging_kernel_dir, kernel)
+ linux_sz = os.stat(linux_path).st_size
+
+ initrd_off = linux_off + linux_sz
+ initrd_off = initrd_off + align - initrd_off % align
+
+ # https://www.freedesktop.org/software/systemd/man/systemd-stub.html
+ objcopy_cmd = "%s-objcopy" % target_sys
+ objcopy_cmd += " --enable-deterministic-archives"
+ objcopy_cmd += " --preserve-dates"
+ objcopy_cmd += " --add-section .osrel=%s" % osrel_path
+ objcopy_cmd += " --change-section-vma .osrel=0x%x" % osrel_off
+ objcopy_cmd += " --add-section .cmdline=%s" % cmdline.name
+ objcopy_cmd += " --change-section-vma .cmdline=0x%x" % cmdline_off
+ objcopy_cmd += dtb_params
+ objcopy_cmd += " --add-section .linux=%s" % linux_path
+ objcopy_cmd += " --change-section-vma .linux=0x%x" % linux_off
+ objcopy_cmd += " --add-section .initrd=%s" % initrd.name
+ objcopy_cmd += " --change-section-vma .initrd=0x%x" % initrd_off
+ objcopy_cmd += " %s %s/EFI/Linux/linux.efi" % (efi_stub, hdddir)
+
+ exec_native_cmd(objcopy_cmd, native_sysroot)
+ else:
+ if source_params.get('install-kernel-into-boot-dir') != 'false':
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (staging_kernel_dir, kernel, hdddir, kernel)
+ exec_cmd(install_cmd)
+
+ if get_bitbake_var("IMAGE_EFI_BOOT_FILES"):
+ for src_path, dst_path in cls.install_task:
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (os.path.join(kernel_dir, src_path),
+ os.path.join(hdddir, dst_path))
+ exec_cmd(install_cmd)
try:
if source_params['loader'] == 'grub-efi':
@@ -228,6 +425,28 @@ class BootimgEFIPlugin(SourcePlugin):
for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
exec_cmd(cp_cmd, True)
+ elif source_params['loader'] == 'uefi-kernel':
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if not kernel:
+ raise WicError("Empty KERNEL_IMAGETYPE %s\n" % target)
+ target = get_bitbake_var("TARGET_SYS")
+ if not target:
+ raise WicError("Unknown arch (TARGET_SYS) %s\n" % target)
+
+ if re.match("x86_64", target):
+ kernel_efi_image = "bootx64.efi"
+ elif re.match('i.86', target):
+ kernel_efi_image = "bootia32.efi"
+ elif re.match('aarch64', target):
+ kernel_efi_image = "bootaa64.efi"
+ elif re.match('arm', target):
+ kernel_efi_image = "bootarm.efi"
+ else:
+ raise WicError("UEFI stub kernel is incompatible with target %s" % target)
+
+ for mod in [x for x in os.listdir(kernel_dir) if x.startswith(kernel)]:
+ cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, kernel_efi_image)
+ exec_cmd(cp_cmd, True)
else:
raise WicError("unrecognized bootimg-efi loader: %s" %
source_params['loader'])
@@ -239,6 +458,11 @@ class BootimgEFIPlugin(SourcePlugin):
cp_cmd = "cp %s %s/" % (startup, hdddir)
exec_cmd(cp_cmd, True)
+ for paths in part.include_path or []:
+ for path in paths:
+ cp_cmd = "cp -r %s %s/" % (path, hdddir)
+ exec_cmd(cp_cmd, True)
+
du_cmd = "du -bks %s" % hdddir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
@@ -253,11 +477,20 @@ class BootimgEFIPlugin(SourcePlugin):
logger.debug("Added %d extra blocks to %s to get to %d total blocks",
extra_blocks, part.mountpoint, blocks)
+ # required for compatibility with certain devices expecting file system
+ # block count to be equal to partition block count
+ if blocks < part.fixed_size:
+ blocks = part.fixed_size
+ logger.debug("Overriding %s to %d total blocks for compatibility",
+ part.mountpoint, blocks)
+
# dosfs image, created by mkdosfs
bootimg = "%s/boot.img" % cr_workdir
- dosfs_cmd = "mkdosfs -n efi -i %s -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "ESP"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/bootimg-partition.py b/scripts/lib/wic/plugins/source/bootimg-partition.py
index b239fc0b4c..1071d1af3f 100644
--- a/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -1,18 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright OpenEmbedded Contributors
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-partition' source plugin class for
@@ -30,6 +19,7 @@ import re
from glob import glob
from wic import WicError
+from wic.engine import get_custom_config
from wic.pluginbase import SourcePlugin
from wic.misc import exec_cmd, get_bitbake_var
@@ -42,17 +32,14 @@ class BootimgPartitionPlugin(SourcePlugin):
"""
name = 'bootimg-partition'
+ image_boot_files_var_name = 'IMAGE_BOOT_FILES'
@classmethod
- def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ def do_configure_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
- rootfs_dir, native_sysroot):
+ native_sysroot):
"""
- Called to do the actual content population for a partition i.e. it
- 'prepares' the partition to be incorporated into the image.
- In this case, does the following:
- - sets up a vfat partition
- - copies all files listed in IMAGE_BOOT_FILES variable
+ Called before do_prepare_partition(), create u-boot specific boot config
"""
hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
install_cmd = "install -d %s" % hdddir
@@ -63,8 +50,6 @@ class BootimgPartitionPlugin(SourcePlugin):
if not kernel_dir:
raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
- logger.debug('Kernel dir: %s', bootimg_dir)
-
boot_files = None
for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)):
if fmt:
@@ -72,12 +57,12 @@ class BootimgPartitionPlugin(SourcePlugin):
else:
var = ""
- boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var)
+ boot_files = get_bitbake_var(cls.image_boot_files_var_name + var)
if boot_files is not None:
break
if boot_files is None:
- raise WicError('No boot files defined, IMAGE_BOOT_FILES unset for entry #%d' % part.lineno)
+ raise WicError('No boot files defined, %s unset for entry #%d' % (cls.image_boot_files_var_name, part.lineno))
logger.debug('Boot files: %s', boot_files)
@@ -94,9 +79,9 @@ class BootimgPartitionPlugin(SourcePlugin):
logger.debug('Destination entry: %r', dst_entry)
deploy_files.append(dst_entry)
+ cls.install_task = [];
for deploy_entry in deploy_files:
src, dst = deploy_entry
- install_task = []
if '*' in src:
# by default install files under their basename
entry_name_fn = os.path.basename
@@ -111,21 +96,101 @@ class BootimgPartitionPlugin(SourcePlugin):
logger.debug('Globbed sources: %s', ', '.join(srcs))
for entry in srcs:
+ src = os.path.relpath(entry, kernel_dir)
entry_dst_name = entry_name_fn(entry)
- install_task.append((entry,
- os.path.join(hdddir,
- entry_dst_name)))
+ cls.install_task.append((src, entry_dst_name))
+ else:
+ cls.install_task.append((src, dst))
+
+ if source_params.get('loader') != "u-boot":
+ return
+
+ configfile = cr.ks.bootloader.configfile
+ custom_cfg = None
+ if configfile:
+ custom_cfg = get_custom_config(configfile)
+ if custom_cfg:
+ # Use a custom configuration for extlinux.conf
+ extlinux_conf = custom_cfg
+ logger.debug("Using custom configuration file "
+ "%s for extlinux.conf", configfile)
else:
- install_task = [(os.path.join(kernel_dir, src),
- os.path.join(hdddir, dst))]
-
- for task in install_task:
- src_path, dst_path = task
- logger.debug('Install %s as %s',
- os.path.basename(src_path), dst_path)
- install_cmd = "install -m 0644 -D %s %s" \
- % (src_path, dst_path)
- exec_cmd(install_cmd)
+ raise WicError("configfile is specified but failed to "
+ "get it from %s." % configfile)
+
+ if not custom_cfg:
+ # The kernel types supported by the sysboot of u-boot
+ kernel_types = ["zImage", "Image", "fitImage", "uImage", "vmlinux"]
+ has_dtb = False
+ fdt_dir = '/'
+ kernel_name = None
+
+ # Find the kernel image name, from the highest precedence to lowest
+ for image in kernel_types:
+ for task in cls.install_task:
+ src, dst = task
+ if re.match(image, src):
+ kernel_name = os.path.join('/', dst)
+ break
+ if kernel_name:
+ break
+
+ for task in cls.install_task:
+ src, dst = task
+ # We suppose that all the dtb are in the same directory
+ if re.search(r'\.dtb', src) and fdt_dir == '/':
+ has_dtb = True
+ fdt_dir = os.path.join(fdt_dir, os.path.dirname(dst))
+ break
+
+ if not kernel_name:
+ raise WicError('No kernel file found')
+
+ # Compose the extlinux.conf
+ extlinux_conf = "default Yocto\n"
+ extlinux_conf += "label Yocto\n"
+ extlinux_conf += " kernel %s\n" % kernel_name
+ if has_dtb:
+ extlinux_conf += " fdtdir %s\n" % fdt_dir
+ bootloader = cr.ks.bootloader
+ extlinux_conf += "append root=%s rootwait %s\n" \
+ % (cr.rootdev, bootloader.append if bootloader.append else '')
+
+ install_cmd = "install -d %s/extlinux/" % hdddir
+ exec_cmd(install_cmd)
+ cfg = open("%s/extlinux/extlinux.conf" % hdddir, "w")
+ cfg.write(extlinux_conf)
+ cfg.close()
+
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ In this case, does the following:
+ - sets up a vfat partition
+ - copies all files listed in IMAGE_BOOT_FILES variable
+ """
+ hdddir = "%s/boot.%d" % (cr_workdir, part.lineno)
+
+ if not kernel_dir:
+ kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+ if not kernel_dir:
+ raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+ logger.debug('Kernel dir: %s', bootimg_dir)
+
+
+ for task in cls.install_task:
+ src_path, dst_path = task
+ logger.debug('Install %s as %s', src_path, dst_path)
+ install_cmd = "install -m 0644 -D %s %s" \
+ % (os.path.join(kernel_dir, src_path),
+ os.path.join(hdddir, dst_path))
+ exec_cmd(install_cmd)
logger.debug('Prepare boot partition using rootfs in %s', hdddir)
part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
diff --git a/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index d599112dd7..a207a83530 100644
--- a/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'bootimg-pcbios' source plugin class for 'wic'
@@ -26,6 +12,7 @@
import logging
import os
+import re
from wic import WicError
from wic.engine import get_custom_config
@@ -47,10 +34,17 @@ class BootimgPcbiosPlugin(SourcePlugin):
"""
Check if dirname exists in default bootimg_dir or in STAGING_DIR.
"""
- for result in (bootimg_dir, get_bitbake_var("STAGING_DATADIR")):
+ staging_datadir = get_bitbake_var("STAGING_DATADIR")
+ for result in (bootimg_dir, staging_datadir):
if os.path.exists("%s/%s" % (result, dirname)):
return result
+ # STAGING_DATADIR is expanded with MLPREFIX if multilib is enabled
+ # but dependency syslinux is still populated to original STAGING_DATADIR
+ nonarch_datadir = re.sub('/[^/]*recipe-sysroot', '/recipe-sysroot', staging_datadir)
+ if os.path.exists(os.path.join(nonarch_datadir, dirname)):
+ return nonarch_datadir
+
raise WicError("Couldn't find correct bootimg_dir, exiting")
@classmethod
@@ -128,7 +122,7 @@ class BootimgPcbiosPlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/vmlinuz"
+ kernel = "/" + get_bitbake_var("KERNEL_IMAGETYPE")
syslinux_conf += "KERNEL " + kernel + "\n"
syslinux_conf += "APPEND label=boot root=%s %s\n" % \
@@ -155,8 +149,14 @@ class BootimgPcbiosPlugin(SourcePlugin):
hdddir = "%s/hdd/boot" % cr_workdir
- cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
- (staging_kernel_dir, hdddir),
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ cmds = ("install -m 0644 %s/%s %s/%s" %
+ (staging_kernel_dir, kernel, hdddir, get_bitbake_var("KERNEL_IMAGETYPE")),
"install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
(bootimg_dir, hdddir),
"install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
@@ -186,8 +186,10 @@ class BootimgPcbiosPlugin(SourcePlugin):
# dosfs image, created by mkdosfs
bootimg = "%s/boot%s.img" % (cr_workdir, part.lineno)
- dosfs_cmd = "mkdosfs -n boot -i %s -S 512 -C %s %d" % \
- (part.fsuuid, bootimg, blocks)
+ label = part.label if part.label else "boot"
+
+ dosfs_cmd = "mkdosfs -n %s -i %s -S 512 -C %s %d" % \
+ (label, part.fsuuid, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (bootimg, hdddir)
diff --git a/scripts/lib/wic/plugins/source/empty.py b/scripts/lib/wic/plugins/source/empty.py
new file mode 100644
index 0000000000..4178912377
--- /dev/null
+++ b/scripts/lib/wic/plugins/source/empty.py
@@ -0,0 +1,89 @@
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# The empty wic plugin is used to create unformatted empty partitions for wic
+# images.
+# To use it you must pass "empty" as argument for the "--source" parameter in
+# the wks file. For example:
+# part foo --source empty --ondisk sda --size="1024" --align 1024
+#
+# The plugin supports writing zeros to the start of the
+# partition. This is useful to overwrite old content like
+# filesystem signatures which may be re-recognized otherwise.
+# This feature can be enabled with
+# '--sourceparams="[fill|size=<N>[S|s|K|k|M|G]][,][bs=<N>[S|s|K|k|M|G]]"'
+# Conflicting or missing options throw errors.
+
+import logging
+import os
+
+from wic import WicError
+from wic.ksparser import sizetype
+from wic.pluginbase import SourcePlugin
+
+logger = logging.getLogger('wic')
+
+class EmptyPartitionPlugin(SourcePlugin):
+ """
+ Populate unformatted empty partition.
+
+ The following sourceparams are supported:
+ - fill
+ Fill the entire partition with zeros. Requires '--fixed-size' option
+ to be set.
+ - size=<N>[S|s|K|k|M|G]
+ Set the first N bytes of the partition to zero. Default unit is 'K'.
+ - bs=<N>[S|s|K|k|M|G]
+ Write at most N bytes at a time during source file creation.
+ Defaults to '1M'. Default unit is 'K'.
+ """
+
+ name = 'empty'
+
+ @classmethod
+ def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
+ oe_builddir, bootimg_dir, kernel_dir,
+ rootfs_dir, native_sysroot):
+ """
+ Called to do the actual content population for a partition i.e. it
+ 'prepares' the partition to be incorporated into the image.
+ """
+ get_byte_count = sizetype('K', True)
+ size = 0
+
+ if 'fill' in source_params and 'size' in source_params:
+ raise WicError("Conflicting source parameters 'fill' and 'size' specified, exiting.")
+
+ # Set the size of the zeros to be written to the partition
+ if 'fill' in source_params:
+ if part.fixed_size == 0:
+ raise WicError("Source parameter 'fill' only works with the '--fixed-size' option, exiting.")
+ size = get_byte_count(part.fixed_size)
+ elif 'size' in source_params:
+ size = get_byte_count(source_params['size'])
+
+ if size == 0:
+ # Nothing to do, create empty partition
+ return
+
+ if 'bs' in source_params:
+ bs = get_byte_count(source_params['bs'])
+ else:
+ bs = get_byte_count('1M')
+
+ # Create a binary file of the requested size filled with zeros
+ source_file = os.path.join(cr_workdir, 'empty-plugin-zeros%s.bin' % part.lineno)
+ if not os.path.exists(os.path.dirname(source_file)):
+ os.makedirs(os.path.dirname(source_file))
+
+ quotient, remainder = divmod(size, bs)
+ with open(source_file, 'wb') as file:
+ for _ in range(quotient):
+ file.write(bytearray(bs))
+ file.write(bytearray(remainder))
+
+ part.size = (size + 1024 - 1) // 1024 # size in KB rounded up
+ part.source_file = source_file
diff --git a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 0d4f50d1f7..607356ad13 100644
--- a/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -1,18 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# Copyright OpenEmbedded Contributors
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'isoimage-isohybrid' source plugin class for 'wic'
@@ -83,8 +72,13 @@ class IsoImagePlugin(SourcePlugin):
syslinux_conf += "DEFAULT boot\n"
syslinux_conf += "LABEL boot\n"
- kernel = "/bzImage"
- syslinux_conf += "KERNEL " + kernel + "\n"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ syslinux_conf += "KERNEL /" + kernel + "\n"
syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
% bootloader.append
@@ -127,9 +121,13 @@ class IsoImagePlugin(SourcePlugin):
grubefi_conf += "\n"
grubefi_conf += "menuentry 'boot'{\n"
- kernel = "/bzImage"
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
- grubefi_conf += "linux %s rootwait %s\n" \
+ grubefi_conf += "linux /%s rootwait %s\n" \
% (kernel, bootloader.append)
grubefi_conf += "initrd /initrd \n"
grubefi_conf += "}\n"
@@ -191,10 +189,9 @@ class IsoImagePlugin(SourcePlugin):
else:
raise WicError("Couldn't find or build initrd, exiting.")
- exec_cmd("cd %s && find . | cpio -o -H newc -R +0:+0 >./initrd.cpio " \
- % initrd_dir, as_shell=True)
- exec_cmd("gzip -f -9 -c %s/initrd.cpio > %s" \
- % (cr_workdir, initrd), as_shell=True)
+ exec_cmd("cd %s && find . | cpio -o -H newc -R root:root >%s/initrd.cpio " \
+ % (initrd_dir, cr_workdir), as_shell=True)
+ exec_cmd("gzip -f -9 %s/initrd.cpio" % cr_workdir, as_shell=True)
shutil.rmtree(initrd_dir)
return initrd
@@ -221,6 +218,18 @@ class IsoImagePlugin(SourcePlugin):
creator.name = source_params['image_name'].strip()
logger.debug("The name of the image is: %s", creator.name)
+ @staticmethod
+ def _install_payload(source_params, iso_dir):
+ """
+ Copies contents of payload directory (as specified in 'payload_dir' param) into iso_dir
+ """
+
+ if source_params.get('payload_dir'):
+ payload_dir = source_params['payload_dir']
+
+ logger.debug("Payload directory: %s", payload_dir)
+ shutil.copytree(payload_dir, iso_dir, symlinks=True, dirs_exist_ok=True)
+
@classmethod
def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -233,6 +242,8 @@ class IsoImagePlugin(SourcePlugin):
isodir = "%s/ISO" % cr_workdir
+ cls._install_payload(source_params, isodir)
+
if part.rootfs_dir is None:
if not 'ROOTFS_DIR' in rootfs_dir:
raise WicError("Couldn't find --rootfs-dir, exiting.")
@@ -282,9 +293,14 @@ class IsoImagePlugin(SourcePlugin):
if os.path.isfile("%s/initrd.cpio.gz" % cr_workdir):
os.remove("%s/initrd.cpio.gz" % cr_workdir)
- # Install bzImage
- install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
- (kernel_dir, isodir)
+ kernel = get_bitbake_var("KERNEL_IMAGETYPE")
+ if get_bitbake_var("INITRAMFS_IMAGE_BUNDLE") == "1":
+ if get_bitbake_var("INITRAMFS_IMAGE"):
+ kernel = "%s-%s.bin" % \
+ (get_bitbake_var("KERNEL_IMAGETYPE"), get_bitbake_var("INITRAMFS_LINK_NAME"))
+
+ install_cmd = "install -m 0644 %s/%s %s/%s" % \
+ (kernel_dir, kernel, isodir, kernel)
exec_cmd(install_cmd)
#Create bootloader for efi boot
@@ -317,7 +333,7 @@ class IsoImagePlugin(SourcePlugin):
grub_src = os.path.join(deploy_dir, grub_src_image)
if not os.path.exists(grub_src):
raise WicError("Grub loader %s is not found in %s. "
- "Please build grub-efi first" % (grub_image, deploy_dir))
+ "Please build grub-efi first" % (grub_src_image, deploy_dir))
shutil.copy(grub_src, grub_target)
if not os.path.isfile(os.path.join(target_dir, "boot.cfg")):
@@ -336,19 +352,23 @@ class IsoImagePlugin(SourcePlugin):
(img_iso_dir, isodir)
exec_cmd(install_cmd)
else:
+ # Default to 100 blocks of extra space for file system overhead
+ esp_extra_blocks = int(source_params.get('esp_extra_blocks', '100'))
+
du_cmd = "du -bks %s/EFI" % isodir
out = exec_cmd(du_cmd)
blocks = int(out.split()[0])
- # Add some extra space for file system overhead
- blocks += 100
+ blocks += esp_extra_blocks
logger.debug("Added 100 extra blocks to %s to get to %d "
"total blocks", part.mountpoint, blocks)
# dosfs image for EFI boot
bootimg = "%s/efi.img" % isodir
- dosfs_cmd = 'mkfs.vfat -n "EFIimg" -S 512 -C %s %d' \
- % (bootimg, blocks)
+ esp_label = source_params.get('esp_label', 'EFIimg')
+
+ dosfs_cmd = 'mkfs.vfat -n \'%s\' -S 512 -C %s %d' \
+ % (esp_label, bootimg, blocks)
exec_native_cmd(dosfs_cmd, native_sysroot)
mmd_cmd = "mmd -i %s ::/EFI" % bootimg
diff --git a/scripts/lib/wic/plugins/source/rawcopy.py b/scripts/lib/wic/plugins/source/rawcopy.py
index e86398ac8f..21903c2f23 100644
--- a/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/scripts/lib/wic/plugins/source/rawcopy.py
@@ -1,22 +1,13 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright OpenEmbedded Contributors
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import logging
import os
+import signal
+import subprocess
from wic import WicError
from wic.pluginbase import SourcePlugin
@@ -34,6 +25,10 @@ class RawCopyPlugin(SourcePlugin):
@staticmethod
def do_image_label(fstype, dst, label):
+ # don't create label when fstype is none
+ if fstype == 'none':
+ return
+
if fstype.startswith('ext'):
cmd = 'tune2fs -L %s %s' % (label, dst)
elif fstype in ('msdos', 'vfat'):
@@ -42,15 +37,35 @@ class RawCopyPlugin(SourcePlugin):
cmd = 'btrfs filesystem label %s %s' % (dst, label)
elif fstype == 'swap':
cmd = 'mkswap -L %s %s' % (label, dst)
- elif fstype == 'squashfs':
- raise WicError("It's not possible to update a squashfs "
- "filesystem label '%s'" % (label))
+ elif fstype in ('squashfs', 'erofs'):
+ raise WicError("It's not possible to update a %s "
+ "filesystem label '%s'" % (fstype, label))
else:
raise WicError("Cannot update filesystem label: "
"Unknown fstype: '%s'" % (fstype))
exec_cmd(cmd)
+ @staticmethod
+ def do_image_uncompression(src, dst, workdir):
+ def subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ # SIGPIPE errors are known issues with gzip/bash
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ extension = os.path.splitext(src)[1]
+ decompressor = {
+ ".bz2": "bzip2",
+ ".gz": "gzip",
+ ".xz": "xz",
+ ".zst": "zstd -f",
+ }.get(extension)
+ if not decompressor:
+ raise WicError("Not supported compressor filename extension: %s" % extension)
+ cmd = "%s -dc %s > %s" % (decompressor, src, dst)
+ subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=workdir)
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -69,8 +84,17 @@ class RawCopyPlugin(SourcePlugin):
if 'file' not in source_params:
raise WicError("No file specified")
- src = os.path.join(kernel_dir, source_params['file'])
- dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
+ if 'unpack' in source_params:
+ img = os.path.join(kernel_dir, source_params['file'])
+ src = os.path.join(cr_workdir, os.path.splitext(source_params['file'])[0])
+ RawCopyPlugin.do_image_uncompression(img, src, cr_workdir)
+ else:
+ src = os.path.join(kernel_dir, source_params['file'])
+
+ dst = os.path.join(cr_workdir, "%s.%s" % (os.path.basename(source_params['file']), part.lineno))
+
+ if not os.path.exists(os.path.dirname(dst)):
+ os.makedirs(os.path.dirname(dst))
if 'skip' in source_params:
sparse_copy(src, dst, skip=int(source_params['skip']))
diff --git a/scripts/lib/wic/plugins/source/rootfs.py b/scripts/lib/wic/plugins/source/rootfs.py
index aec720fb22..c990143c0d 100644
--- a/scripts/lib/wic/plugins/source/rootfs.py
+++ b/scripts/lib/wic/plugins/source/rootfs.py
@@ -1,21 +1,7 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2014, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This implements the 'rootfs' source plugin class for 'wic'
@@ -31,10 +17,11 @@ import shutil
import sys
from oe.path import copyhardlinktree
+from pathlib import Path
from wic import WicError
from wic.pluginbase import SourcePlugin
-from wic.misc import get_bitbake_var
+from wic.misc import get_bitbake_var, exec_native_cmd
logger = logging.getLogger('wic')
@@ -46,8 +33,24 @@ class RootfsPlugin(SourcePlugin):
name = 'rootfs'
@staticmethod
+ def __validate_path(cmd, rootfs_dir, path):
+ if os.path.isabs(path):
+ logger.error("%s: Must be relative: %s" % (cmd, path))
+ sys.exit(1)
+
+ # Disallow climbing outside of parent directory using '..',
+ # because doing so could be quite disastrous (we will delete the
+ # directory, or modify a directory outside OpenEmbedded).
+ full_path = os.path.realpath(os.path.join(rootfs_dir, path))
+ if not full_path.startswith(os.path.realpath(rootfs_dir)):
+ logger.error("%s: Must point inside the rootfs: %s" % (cmd, path))
+ sys.exit(1)
+
+ return full_path
+
+ @staticmethod
def __get_rootfs_dir(rootfs_dir):
- if os.path.isdir(rootfs_dir):
+ if rootfs_dir and os.path.isdir(rootfs_dir):
return os.path.realpath(rootfs_dir)
image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
@@ -58,6 +61,15 @@ class RootfsPlugin(SourcePlugin):
return os.path.realpath(image_rootfs_dir)
+ @staticmethod
+ def __get_pseudo(native_sysroot, rootfs, pseudo_dir):
+ pseudo = "export PSEUDO_PREFIX=%s/usr;" % native_sysroot
+ pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % pseudo_dir
+ pseudo += "export PSEUDO_PASSWD=%s;" % rootfs
+ pseudo += "export PSEUDO_NOSYMLINKEXP=1;"
+ pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
+ return pseudo
+
@classmethod
def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
oe_builddir, bootimg_dir, kernel_dir,
@@ -82,45 +94,143 @@ class RootfsPlugin(SourcePlugin):
"it is not a valid path, exiting" % part.rootfs_dir)
part.rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
+ part.has_fstab = os.path.exists(os.path.join(part.rootfs_dir, "etc/fstab"))
+ pseudo_dir = os.path.join(part.rootfs_dir, "../pseudo")
+ if not os.path.lexists(pseudo_dir):
+ pseudo_dir = os.path.join(cls.__get_rootfs_dir(None), '../pseudo')
+
+ if not os.path.lexists(pseudo_dir):
+ logger.warn("%s folder does not exist. "
+ "Usernames and permissions will be invalid " % pseudo_dir)
+ pseudo_dir = None
new_rootfs = None
+ new_pseudo = None
# Handle excluded paths.
- if part.exclude_path is not None:
- # We need a new rootfs directory we can delete files from. Copy to
- # workdir.
+ if part.exclude_path or part.include_path or part.change_directory or part.update_fstab_in_rootfs:
+ # We need a new rootfs directory we can safely modify without
+ # interfering with other tasks. Copy to workdir.
new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs%d" % part.lineno))
if os.path.lexists(new_rootfs):
shutil.rmtree(os.path.join(new_rootfs))
- copyhardlinktree(part.rootfs_dir, new_rootfs)
+ if part.change_directory:
+ cd = part.change_directory
+ if cd[-1] == '/':
+ cd = cd[:-1]
+ orig_dir = cls.__validate_path("--change-directory", part.rootfs_dir, cd)
+ else:
+ orig_dir = part.rootfs_dir
+ copyhardlinktree(orig_dir, new_rootfs)
+
+ # Convert the pseudo directory to its new location
+ if (pseudo_dir):
+ new_pseudo = os.path.realpath(
+ os.path.join(cr_workdir, "pseudo%d" % part.lineno))
+ if os.path.lexists(new_pseudo):
+ shutil.rmtree(new_pseudo)
+ os.mkdir(new_pseudo)
+ shutil.copy(os.path.join(pseudo_dir, "files.db"),
+ os.path.join(new_pseudo, "files.db"))
+
+ pseudo_cmd = "%s -B -m %s -M %s" % (cls.__get_pseudo(native_sysroot,
+ new_rootfs,
+ new_pseudo),
+ orig_dir, new_rootfs)
+ exec_native_cmd(pseudo_cmd, native_sysroot)
+
+ for in_path in part.include_path or []:
+ #parse arguments
+ include_path = in_path[0]
+ if len(in_path) > 2:
+ logger.error("'Invalid number of arguments for include-path")
+ sys.exit(1)
+ if len(in_path) == 2:
+ path = in_path[1]
+ else:
+ path = None
+
+ # Pack files to be included into a tar file.
+ # We need to create a tar file, because that way we can keep the
+ # permissions from the files even when they belong to different
+ # pseudo enviroments.
+ # If we simply copy files using copyhardlinktree/copytree... the
+ # copied files will belong to the user running wic.
+ tar_file = os.path.realpath(
+ os.path.join(cr_workdir, "include-path%d.tar" % part.lineno))
+ if os.path.isfile(include_path):
+ parent = os.path.dirname(os.path.realpath(include_path))
+ tar_cmd = "tar c --owner=root --group=root -f %s -C %s %s" % (
+ tar_file, parent, os.path.relpath(include_path, parent))
+ exec_native_cmd(tar_cmd, native_sysroot)
+ else:
+ if include_path in krootfs_dir:
+ include_path = krootfs_dir[include_path]
+ include_path = cls.__get_rootfs_dir(include_path)
+ include_pseudo = os.path.join(include_path, "../pseudo")
+ if os.path.lexists(include_pseudo):
+ pseudo = cls.__get_pseudo(native_sysroot, include_path,
+ include_pseudo)
+ tar_cmd = "tar cf %s -C %s ." % (tar_file, include_path)
+ else:
+ pseudo = None
+ tar_cmd = "tar c --owner=root --group=root -f %s -C %s ." % (
+ tar_file, include_path)
+ exec_native_cmd(tar_cmd, native_sysroot, pseudo)
+
+ #create destination
+ if path:
+ destination = cls.__validate_path("--include-path", new_rootfs, path)
+ Path(destination).mkdir(parents=True, exist_ok=True)
+ else:
+ destination = new_rootfs
+
+ #extract destination
+ untar_cmd = "tar xf %s -C %s" % (tar_file, destination)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(untar_cmd, native_sysroot, pseudo)
+ os.remove(tar_file)
- for orig_path in part.exclude_path:
+ for orig_path in part.exclude_path or []:
path = orig_path
- if os.path.isabs(path):
- logger.error("Must be relative: --exclude-path=%s" % orig_path)
- sys.exit(1)
- full_path = os.path.realpath(os.path.join(new_rootfs, path))
+ full_path = cls.__validate_path("--exclude-path", new_rootfs, path)
- # Disallow climbing outside of parent directory using '..',
- # because doing so could be quite disastrous (we will delete the
- # directory).
- if not full_path.startswith(new_rootfs):
- logger.error("'%s' points to a path outside the rootfs" % orig_path)
- sys.exit(1)
+ if not os.path.lexists(full_path):
+ continue
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
if path.endswith(os.sep):
# Delete content only.
for entry in os.listdir(full_path):
full_entry = os.path.join(full_path, entry)
- if os.path.isdir(full_entry) and not os.path.islink(full_entry):
- shutil.rmtree(full_entry)
- else:
- os.remove(full_entry)
+ rm_cmd = "rm -rf %s" % (full_entry)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
else:
# Delete whole directory.
- shutil.rmtree(full_path)
+ rm_cmd = "rm -rf %s" % (full_path)
+ exec_native_cmd(rm_cmd, native_sysroot, pseudo)
+
+ # Update part.has_fstab here as fstab may have been added or
+ # removed by the above modifications.
+ part.has_fstab = os.path.exists(os.path.join(new_rootfs, "etc/fstab"))
+ if part.update_fstab_in_rootfs and part.has_fstab and not part.no_fstab_update:
+ fstab_path = os.path.join(new_rootfs, "etc/fstab")
+ # Assume that fstab should always be owned by root with fixed permissions
+ install_cmd = "install -m 0644 -p %s %s" % (part.updated_fstab_path, fstab_path)
+ if new_pseudo:
+ pseudo = cls.__get_pseudo(native_sysroot, new_rootfs, new_pseudo)
+ else:
+ pseudo = None
+ exec_native_cmd(install_cmd, native_sysroot, pseudo)
part.prepare_rootfs(cr_workdir, oe_builddir,
- new_rootfs or part.rootfs_dir, native_sysroot)
+ new_rootfs or part.rootfs_dir, native_sysroot,
+ pseudo_dir = new_pseudo or pseudo_dir)
diff --git a/scripts/lnr b/scripts/lnr
deleted file mode 100755
index 5fed780eb2..0000000000
--- a/scripts/lnr
+++ /dev/null
@@ -1,21 +0,0 @@
-#! /usr/bin/env python3
-
-# Create a *relative* symlink, just like ln --relative does but without needing
-# coreutils 8.16.
-
-import sys, os
-
-if len(sys.argv) != 3:
- print("$ lnr TARGET LINK_NAME")
- sys.exit(1)
-
-target = sys.argv[1]
-linkname = sys.argv[2]
-
-if os.path.isabs(target):
- if not os.path.isabs(linkname):
- linkname = os.path.abspath(linkname)
- start = os.path.dirname(linkname)
- target = os.path.relpath(target, start)
-
-os.symlink(target, linkname)
diff --git a/scripts/multilib_header_wrapper.h b/scripts/multilib_header_wrapper.h
index f516673b63..88f3193812 100644
--- a/scripts/multilib_header_wrapper.h
+++ b/scripts/multilib_header_wrapper.h
@@ -1,43 +1,13 @@
/*
* Copyright (C) 2005-2011 by Wind River Systems, Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
+ * SPDX-License-Identifier: MIT
*
*/
-
-#if defined (__arm__)
-#define __MHWORDSIZE 32
-#elif defined (__aarch64__) && defined ( __LP64__)
-#define __MHWORDSIZE 64
-#elif defined (__aarch64__)
-#define __MHWORDSIZE 32
-#else
#include <bits/wordsize.h>
-#if defined (__WORDSIZE)
-#define __MHWORDSIZE __WORDSIZE
-#else
-#error "__WORDSIZE is not defined"
-#endif
-#endif
-#if __MHWORDSIZE == 32
+#if __WORDSIZE == 32
#ifdef _MIPS_SIM
@@ -53,7 +23,7 @@
#include <ENTER_HEADER_FILENAME_HERE-32.h>
#endif
-#elif __MHWORDSIZE == 64
+#elif __WORDSIZE == 64
#include <ENTER_HEADER_FILENAME_HERE-64.h>
#else
#error "Unknown __WORDSIZE detected"
diff --git a/scripts/native-intercept/ar b/scripts/native-intercept/ar
new file mode 100755
index 0000000000..dcc623e3ed
--- /dev/null
+++ b/scripts/native-intercept/ar
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'ar' that defaults to deterministic archives
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'ar'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_ar = shutil.which('ar', path=path)
+
+if len(sys.argv) == 1:
+ os.execl(real_ar, 'ar')
+
+# modify args to mimic 'ar' configured with --default-deterministic-archives
+argv = sys.argv
+if argv[1].startswith('--'):
+ # No modifier given
+ None
+else:
+ # remove the optional '-'
+ if argv[1][0] == '-':
+ argv[1] = argv[1][1:]
+ if 'U' in argv[1]:
+ sys.stderr.write("ar: non-deterministic mode requested\n")
+ else:
+ argv[1] = argv[1].replace('u', '')
+ argv[1] = 'D' + argv[1]
+
+os.execv(real_ar, argv)
diff --git a/scripts/native-intercept/chgrp b/scripts/native-intercept/chgrp
new file mode 100755
index 0000000000..399c979f9a
--- /dev/null
+++ b/scripts/native-intercept/chgrp
@@ -0,0 +1,5 @@
+#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/native-intercept/chown b/scripts/native-intercept/chown
index 4f43271c2b..399c979f9a 100755
--- a/scripts/native-intercept/chown
+++ b/scripts/native-intercept/chown
@@ -1,2 +1,5 @@
#! /bin/sh
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
echo "Intercept $0: $@ -- do nothing"
diff --git a/scripts/nativesdk-intercept/chgrp b/scripts/nativesdk-intercept/chgrp
new file mode 100755
index 0000000000..f8ae84b8b3
--- /dev/null
+++ b/scripts/nativesdk-intercept/chgrp
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chgrp' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chgrp'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chgrp = shutil.which('chgrp', path=path)
+
+args = list()
+
+found = False
+
+args.append(real_chgrp)
+
+for i in sys.argv[1:]:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chgrp, args)
diff --git a/scripts/nativesdk-intercept/chown b/scripts/nativesdk-intercept/chown
new file mode 100755
index 0000000000..0805ceb70a
--- /dev/null
+++ b/scripts/nativesdk-intercept/chown
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+#
+# Wrapper around 'chown' that redirects to root in all cases
+
+import os
+import shutil
+import sys
+
+# calculate path to the real 'chown'
+path = os.environ['PATH']
+path = path.replace(os.path.dirname(sys.argv[0]), '')
+real_chown = shutil.which('chown', path=path)
+
+args = list()
+
+found = False
+
+args.append(real_chown)
+
+for i in sys.argv[1:]:
+ if i.startswith("-"):
+ args.append(i)
+ continue
+ if not found:
+ args.append("root:root")
+ found = True
+ else:
+ args.append(i)
+
+os.execv(real_chown, args)
diff --git a/scripts/oe-build-perf-report b/scripts/oe-build-perf-report
index 0bd05f44ef..7812ea4540 100755
--- a/scripts/oe-build-perf-report
+++ b/scripts/oe-build-perf-report
@@ -1,18 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Examine build performance test results
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import logging
@@ -37,58 +31,18 @@ from buildstats import BuildStats, diff_buildstats, BSVerDiff
scriptpath.add_oe_lib_path()
from oeqa.utils.git import GitRepo, GitError
+import oeqa.utils.gitarchive as gitarchive
# Setup logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger('oe-build-perf-report')
-
-# Container class for tester revisions
-TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
-
-
-def get_test_runs(repo, tag_name, **kwargs):
- """Get a sorted list of test runs, matching given pattern"""
- # First, get field names from the tag name pattern
- field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
- undef_fields = [f for f in field_names if f not in kwargs.keys()]
-
- # Fields for formatting tag name pattern
- str_fields = dict([(f, '*') for f in field_names])
- str_fields.update(kwargs)
-
- # Get a list of all matching tags
- tag_pattern = tag_name.format(**str_fields)
- tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
- log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
-
- # Parse undefined fields from tag names
- str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
- str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
- str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
- str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
- str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
- # escape parenthesis in fields in order to not messa up the regexp
- fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
- str_fields.update(fixed_fields)
- tag_re = re.compile(tag_name.format(**str_fields))
-
- # Parse fields from tags
- revs = []
- for tag in tags:
- m = tag_re.match(tag)
- groups = m.groupdict()
- revs.append([groups[f] for f in undef_fields] + [tag])
-
- # Return field names and a sorted list of revs
- return undef_fields, sorted(revs)
-
def list_test_revs(repo, tag_name, verbosity, **kwargs):
"""Get list of all tested revisions"""
valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
- fields, revs = get_test_runs(repo, tag_name, **valid_kwargs)
+ fields, revs = gitarchive.get_test_runs(log, repo, tag_name, **valid_kwargs)
ignore_fields = ['tag_number']
if verbosity < 2:
extra_fields = ['COMMITS', 'TEST RUNS']
@@ -133,36 +87,6 @@ def list_test_revs(repo, tag_name, verbosity, **kwargs):
print_table(rows)
-def get_test_revs(repo, tag_name, **kwargs):
- """Get list of all tested revisions"""
- fields, runs = get_test_runs(repo, tag_name, **kwargs)
-
- revs = {}
- commit_i = fields.index('commit')
- commit_num_i = fields.index('commit_number')
- for run in runs:
- commit = run[commit_i]
- commit_num = run[commit_num_i]
- tag = run[-1]
- if not commit in revs:
- revs[commit] = TestedRev(commit, commit_num, [tag])
- else:
- assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
- revs[commit].tags.append(tag)
-
- # Return in sorted table
- revs = sorted(revs.values(), key=attrgetter('commit_number'))
- log.debug("Found %d tested revisions:\n %s", len(revs),
- "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
- return revs
-
-def rev_find(revs, attr, val):
- """Search from a list of TestedRev"""
- for i, rev in enumerate(revs):
- if getattr(rev, attr) == val:
- return i
- raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
-
def is_xml_format(repo, commit):
"""Check if the commit contains xml (or json) data"""
if repo.rev_parse(commit + ':results.xml'):
@@ -427,9 +351,9 @@ def print_html_report(data, id_comp, buildstats):
# Compare buildstats
bs_key = test + '.' + meas
- rev = metadata['commit_num']['value']
- comp_rev = metadata['commit_num']['value_old']
- if (rev in buildstats and bs_key in buildstats[rev] and
+ rev = str(metadata['commit_num']['value'])
+ comp_rev = str(metadata['commit_num']['value_old'])
+ if (buildstats and rev in buildstats and bs_key in buildstats[rev] and
comp_rev in buildstats and bs_key in buildstats[comp_rev]):
new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
buildstats[rev][bs_key])
@@ -448,7 +372,7 @@ def print_html_report(data, id_comp, buildstats):
chart_opts=chart_opts))
-def get_buildstats(repo, notes_ref, revs, outdir=None):
+def get_buildstats(repo, notes_ref, notes_ref2, revs, outdir=None):
"""Get the buildstats from git notes"""
full_ref = 'refs/notes/' + notes_ref
if not repo.rev_parse(full_ref):
@@ -467,8 +391,13 @@ def get_buildstats(repo, notes_ref, revs, outdir=None):
for tag in rev.tags:
log.debug(' %s', tag)
try:
- bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
- 'show', tag + '^0']))
+ try:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref, 'show', tag + '^0']))
+ except GitError:
+ if notes_ref2:
+ bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref2, 'show', tag + '^0']))
+ else:
+ raise
except GitError:
log.warning("Buildstats not found for %s", tag)
bs_all = {}
@@ -512,10 +441,10 @@ def auto_args(repo, args):
key = split[0]
val = split[1].strip()
- if key == 'hostname':
+ if key == 'hostname' and not args.hostname:
log.debug("Using hostname %s", val)
args.hostname = val
- elif key == 'branch':
+ elif key == 'branch' and not args.branch:
log.debug("Using branch %s", val)
args.branch = val
@@ -541,7 +470,8 @@ Examine build performance test results from a Git repository"""
default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
help="Tag name (pattern) for finding results")
group.add_argument('--hostname', '-H')
- group.add_argument('--branch', '-B', default='master')
+ group.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ group.add_argument('--branch2', help="Branch to find comparision revisions in")
group.add_argument('--machine', default='qemux86')
group.add_argument('--history-length', default=25, type=int,
help="Number of tested revisions to plot in html report")
@@ -577,32 +507,51 @@ def main(argv=None):
if not args.hostname:
auto_args(repo, args)
- revs = get_test_revs(repo, args.tag_name, hostname=args.hostname,
- branch=args.branch, machine=args.machine)
- if len(revs) < 2:
- log.error("%d tester revisions found, unable to generate report",
- len(revs))
- return 1
+ revs = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch, machine=args.machine)
+ if args.branch2 and args.branch2 != args.branch:
+ revs2 = gitarchive.get_test_revs(log, repo, args.tag_name, hostname=args.hostname,
+ branch=args.branch2, machine=args.machine)
+ if not len(revs2):
+ log.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ log.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ log.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
# Pick revisions
if args.commit:
if args.commit_number:
log.warning("Ignoring --commit-number as --commit was specified")
- index1 = rev_find(revs, 'commit', args.commit)
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
elif args.commit_number:
- index1 = rev_find(revs, 'commit_number', args.commit_number)
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
else:
index1 = len(revs) - 1
+ if args.branch2 and args.branch2 != args.branch:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
if args.commit2:
if args.commit_number2:
log.warning("Ignoring --commit-number2 as --commit2 was specified")
- index2 = rev_find(revs, 'commit', args.commit2)
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
elif args.commit_number2:
- index2 = rev_find(revs, 'commit_number', args.commit_number2)
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
else:
if index1 > 0:
index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
else:
log.error("Unable to determine the other commit, use "
"--commit2 or --commit-number2 to specify it")
@@ -645,9 +594,12 @@ def main(argv=None):
buildstats = None
if args.dump_buildstats or args.html:
outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
- notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
- args.machine)
- buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ notes_ref2 = None
+ if args.branch2:
+ notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch2, args.machine)
+ notes_ref2 = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch, args.machine)
+ buildstats = get_buildstats(repo, notes_ref, notes_ref2, [rev_l, rev_r], outdir)
# Print report
if not args.html:
diff --git a/scripts/oe-build-perf-test b/scripts/oe-build-perf-test
index 669470fa97..00e00b4ce9 100755
--- a/scripts/oe-build-perf-test
+++ b/scripts/oe-build-perf-test
@@ -1,19 +1,12 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Build performance test script
#
# Copyright (c) 2016, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
-#
-"""Build performance test script"""
+
import argparse
import errno
import fcntl
diff --git a/scripts/oe-buildenv-internal b/scripts/oe-buildenv-internal
index 6773872326..2fdb19565a 100755
--- a/scripts/oe-buildenv-internal
+++ b/scripts/oe-buildenv-internal
@@ -4,19 +4,8 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
if ! $(return >/dev/null 2>&1) ; then
echo 'oe-buildenv-internal: error: this script must be sourced'
@@ -40,31 +29,15 @@ if [ -z "$OE_SKIP_SDK_CHECK" ] && [ -n "$OECORE_SDK_VERSION" ]; then
return 1
fi
-# Make sure we're not using python v3.x as 'python', we don't support it.
-py_v2_check=$(/usr/bin/env python --version 2>&1 | grep "Python 3")
-if [ -n "$py_v2_check" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please set up python v2 as your default 'python' interpreter."
- return 1
-fi
-unset py_v2_check
-
-py_v27_check=$(python -c 'import sys; print sys.version_info >= (2,7,3)')
-if [ "$py_v27_check" != "True" ]; then
- echo >&2 "OpenEmbedded requires 'python' to be python v2 (>= 2.7.3), not python v3."
- echo >&2 "Please upgrade your python v2."
-fi
-unset py_v27_check
-
# We potentially have code that doesn't parse correctly with older versions
# of Python, and rather than fixing that and being eternally vigilant for
# any other new feature use, just check the version here.
-py_v34_check=$(python3 -c 'import sys; print(sys.version_info >= (3,4,0))')
-if [ "$py_v34_check" != "True" ]; then
- echo >&2 "BitBake requires Python 3.4.0 or later as 'python3'"
+py_v38_check=$(python3 -c 'import sys; print(sys.version_info >= (3,8,0))')
+if [ "$py_v38_check" != "True" ]; then
+ echo >&2 "BitBake requires Python 3.8.0 or later as 'python3' (scripts/install-buildtools can be used if needed)"
return 1
fi
-unset py_v34_check
+unset py_v38_check
if [ -z "$BDIR" ]; then
if [ -z "$1" ]; then
@@ -115,26 +88,32 @@ if [ ! -d "$BITBAKEDIR" ]; then
return 1
fi
+# Add BitBake's library to PYTHONPATH
+PYTHONPATH=$BITBAKEDIR/lib:$PYTHONPATH
+export PYTHONPATH
+
+# Remove any paths added by sourcing this script before
+[ -n "$OE_ADDED_PATHS" ] && PATH=$(echo $PATH | sed -e "s#$OE_ADDED_PATHS##") ||
+ PATH=$(echo $PATH | sed -e "s#$OEROOT/scripts:$BITBAKEDIR/bin:##")
+
# Make sure our paths are at the beginning of $PATH
-for newpath in "$BITBAKEDIR/bin" "$OEROOT/scripts"; do
- # Remove any existences of $newpath from $PATH
- PATH=$(echo $PATH | sed -re "s#(^|:)$newpath(:|$)#\2#g;s#^:##")
+OE_ADDED_PATHS="$OEROOT/scripts:$BITBAKEDIR/bin:"
+PATH="$OE_ADDED_PATHS$PATH"
+export OE_ADDED_PATHS
- # Add $newpath to $PATH
- PATH="$newpath:$PATH"
-done
-unset BITBAKEDIR newpath
+# This is not needed anymore
+unset BITBAKEDIR
# Used by the runqemu script
export BUILDDIR
-export PATH
-BB_ENV_EXTRAWHITE_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
+BB_ENV_PASSTHROUGH_ADDITIONS_OE="MACHINE DISTRO TCMODE TCLIBC HTTP_PROXY http_proxy \
HTTPS_PROXY https_proxy FTP_PROXY ftp_proxy FTPS_PROXY ftps_proxy ALL_PROXY \
all_proxy NO_PROXY no_proxy SSH_AGENT_PID SSH_AUTH_SOCK BB_SRCREV_POLICY \
SDKMACHINE BB_NUMBER_THREADS BB_NO_NETWORK PARALLEL_MAKE GIT_PROXY_COMMAND \
-SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE"
+SOCKS5_PASSWD SOCKS5_USER SCREENDIR STAMPS_DIR BBPATH_EXTRA BB_SETSCENE_ENFORCE \
+BB_LOGCONFIG"
-BB_ENV_EXTRAWHITE="$(echo $BB_ENV_EXTRAWHITE $BB_ENV_EXTRAWHITE_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
+BB_ENV_PASSTHROUGH_ADDITIONS="$(echo $BB_ENV_PASSTHROUGH_ADDITIONS $BB_ENV_PASSTHROUGH_ADDITIONS_OE | tr ' ' '\n' | LC_ALL=C sort --unique | tr '\n' ' ')"
-export BB_ENV_EXTRAWHITE
+export BB_ENV_PASSTHROUGH_ADDITIONS
diff --git a/scripts/oe-check-sstate b/scripts/oe-check-sstate
index d06efe436a..0d171c4463 100755
--- a/scripts/oe-check-sstate
+++ b/scripts/oe-check-sstate
@@ -5,18 +5,8 @@
# Copyright 2016 Intel Corporation
# Authored-by: Paul Eggleton <paul.eggleton@intel.com>
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -28,7 +18,6 @@ import re
scripts_path = os.path.dirname(os.path.realpath(__file__))
lib_path = scripts_path + '/lib'
sys.path = sys.path + [lib_path]
-import scriptutils
import scriptpath
scriptpath.add_bitbake_lib_path()
import argparse_oe
@@ -57,17 +46,14 @@ def check(args):
try:
env = os.environ.copy()
if not args.same_tmpdir:
- env['BB_ENV_EXTRAWHITE'] = env.get('BB_ENV_EXTRAWHITE', '') + ' TMPDIR_forcevariable'
- env['TMPDIR_forcevariable'] = tmpdir
+ env['BB_ENV_PASSTHROUGH_ADDITIONS'] = env.get('BB_ENV_PASSTHROUGH_ADDITIONS', '') + ' TMPDIR:forcevariable'
+ env['TMPDIR:forcevariable'] = tmpdir
try:
- output = subprocess.check_output(
- 'bitbake -n %s' % ' '.join(args.target),
- stderr=subprocess.STDOUT,
- env=env,
- shell=True)
+ cmd = ['bitbake', '--dry-run', '--runall=build'] + args.target
+ output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
- task_re = re.compile('NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
+ task_re = re.compile(r'NOTE: Running setscene task [0-9]+ of [0-9]+ \(([^)]+)\)')
tasks = []
for line in output.decode('utf-8').splitlines():
res = task_re.match(line)
diff --git a/scripts/oe-debuginfod b/scripts/oe-debuginfod
new file mode 100755
index 0000000000..b525310225
--- /dev/null
+++ b/scripts/oe-debuginfod
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os
+import sys
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + "/lib"
+sys.path.insert(0, lib_path)
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+
+import bb.tinfoil
+import subprocess
+
+if __name__ == "__main__":
+ with bb.tinfoil.Tinfoil() as tinfoil:
+ tinfoil.prepare(config_only=True)
+ package_classes_var = "DEPLOY_DIR_" + tinfoil.config_data.getVar("PACKAGE_CLASSES").split()[0].replace("package_", "").upper()
+ feed_dir = tinfoil.config_data.getVar(package_classes_var, expand=True)
+
+ subprocess.call(['bitbake', '-c', 'addto_recipe_sysroot', 'elfutils-native'])
+
+ subprocess.call(['oe-run-native', 'elfutils-native', 'debuginfod', '--verbose', '-R', '-U', feed_dir])
+ print("\nTo use the debuginfod server please ensure that this variable PACKAGECONFIG:pn-elfutils-native = \"debuginfod libdebuginfod\" is set in the local.conf")
diff --git a/scripts/oe-depends-dot b/scripts/oe-depends-dot
index 6c7e9d3383..d02ee455f6 100755
--- a/scripts/oe-depends-dot
+++ b/scripts/oe-depends-dot
@@ -2,18 +2,8 @@
#
# Copyright (C) 2018 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
@@ -24,8 +14,8 @@ import re
class Dot(object):
def __init__(self):
parser = argparse.ArgumentParser(
- description="Analyse recipe-depends.dot generated by bitbake -g",
- epilog="Use %(prog)s --help to get help")
+ description="Analyse task-depends.dot generated by bitbake -g",
+ formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("dotfile",
help = "Specify the dotfile", nargs = 1, action='store', default='')
parser.add_argument("-k", "--key",
@@ -42,6 +32,21 @@ class Dot(object):
" For example, A->B, B->C, A->C, then A->C can be removed.",
action="store_true", default=False)
+ parser.epilog = """
+Examples:
+First generate the .dot file:
+ bitbake -g core-image-minimal
+
+To find out why a package is being built:
+ %(prog)s -k <package> -w ./task-depends.dot
+
+To find out what a package depends on:
+ %(prog)s -k <package> -d ./task-depends.dot
+
+Reduce the .dot file packages only, no tasks:
+ %(prog)s -r ./task-depends.dot
+"""
+
self.args = parser.parse_args()
if len(sys.argv) != 3 and len(sys.argv) < 5:
@@ -109,6 +114,10 @@ class Dot(object):
if key == "meta-world-pkgdata":
continue
dep = m.group(2)
+ key = key.split('.')[0]
+ dep = dep.split('.')[0]
+ if key == dep:
+ continue
if key in depends:
if not key in depends[key]:
depends[key].add(dep)
@@ -150,9 +159,14 @@ class Dot(object):
reverse_deps = []
if self.args.why:
- for k, v in depends.items():
- if self.args.key in v and not k in reverse_deps:
- reverse_deps.append(k)
+ key_list = [self.args.key]
+ current_key = self.args.key
+ while (len(key_list) != 0):
+ current_key = key_list.pop()
+ for k, v in depends.items():
+ if current_key in v and not k in reverse_deps:
+ reverse_deps.append(k)
+ key_list.append(k)
print('Because: %s' % ' '.join(reverse_deps))
Dot.print_dep_chains(self.args.key, reverse_deps, depends)
diff --git a/scripts/oe-find-native-sysroot b/scripts/oe-find-native-sysroot
index 350ea21373..6228efcbee 100755
--- a/scripts/oe-find-native-sysroot
+++ b/scripts/oe-find-native-sysroot
@@ -17,18 +17,8 @@
#
# Copyright (c) 2010 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
@@ -46,20 +36,9 @@ if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
fi
# Global vars
-BITBAKE_E=""
set_oe_native_sysroot(){
- echo "Running bitbake -e $1"
- BITBAKE_E="`bitbake -e $1`"
- OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE= | cut -d '"' -f2`
-
- if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
- # This indicates that there was an error running bitbake -e that
- # the user needs to be informed of
- echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
- echo "Here is the output from bitbake -e $1"
- echo $BITBAKE_E
- exit 1
- fi
+ echo "Getting sysroot..."
+ OECORE_NATIVE_SYSROOT=$(bitbake-getvar -r $1 --value STAGING_DIR_NATIVE)
}
if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
@@ -110,6 +89,4 @@ pseudo="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
if [ -e "$pseudo" ]; then
echo "PSEUDO=$pseudo"
PSEUDO="$pseudo"
-else
- echo "PSEUDO $pseudo is not found."
fi
diff --git a/scripts/oe-git-archive b/scripts/oe-git-archive
index ab19cb9aa3..9305ed0b0f 100755
--- a/scripts/oe-git-archive
+++ b/scripts/oe-git-archive
@@ -1,29 +1,17 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
#
# Helper script for committing data to git and pushing upstream
#
# Copyright (c) 2017, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
-import glob
-import json
import logging
-import math
import os
import re
import sys
-from collections import namedtuple, OrderedDict
-from datetime import datetime, timedelta, tzinfo
-from operator import attrgetter
# Import oe and bitbake libs
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -34,128 +22,13 @@ scriptpath.add_oe_lib_path()
from oeqa.utils.git import GitRepo, GitError
from oeqa.utils.metadata import metadata_from_bb
-
+import oeqa.utils.gitarchive as gitarchive
# Setup logging
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
log = logging.getLogger()
-class ArchiveError(Exception):
- """Internal error handling of this script"""
-
-
-def format_str(string, fields):
- """Format string using the given fields (dict)"""
- try:
- return string.format(**fields)
- except KeyError as err:
- raise ArchiveError("Unable to expand string '{}': unknown field {} "
- "(valid fields are: {})".format(
- string, err, ', '.join(sorted(fields.keys()))))
-
-
-def init_git_repo(path, no_create, bare):
- """Initialize local Git repository"""
- path = os.path.abspath(path)
- if os.path.isfile(path):
- raise ArchiveError("Invalid Git repo at {}: path exists but is not a "
- "directory".format(path))
- if not os.path.isdir(path) or not os.listdir(path):
- if no_create:
- raise ArchiveError("No git repo at {}, refusing to create "
- "one".format(path))
- if not os.path.isdir(path):
- try:
- os.mkdir(path)
- except (FileNotFoundError, PermissionError) as err:
- raise ArchiveError("Failed to mkdir {}: {}".format(path, err))
- if not os.listdir(path):
- log.info("Initializing a new Git repo at %s", path)
- repo = GitRepo.init(path, bare)
- try:
- repo = GitRepo(path, is_topdir=True)
- except GitError:
- raise ArchiveError("Non-empty directory that is not a Git repository "
- "at {}\nPlease specify an existing Git repository, "
- "an empty directory or a non-existing directory "
- "path.".format(path))
- return repo
-
-
-def git_commit_data(repo, data_dir, branch, message, exclude, notes):
- """Commit data into a Git repository"""
- log.info("Committing data into to branch %s", branch)
- tmp_index = os.path.join(repo.git_dir, 'index.oe-git-archive')
- try:
- # Create new tree object from the data
- env_update = {'GIT_INDEX_FILE': tmp_index,
- 'GIT_WORK_TREE': os.path.abspath(data_dir)}
- repo.run_cmd('add .', env_update)
-
- # Remove files that are excluded
- if exclude:
- repo.run_cmd(['rm', '--cached'] + [f for f in exclude], env_update)
-
- tree = repo.run_cmd('write-tree', env_update)
-
- # Create new commit object from the tree
- parent = repo.rev_parse(branch)
- git_cmd = ['commit-tree', tree, '-m', message]
- if parent:
- git_cmd += ['-p', parent]
- commit = repo.run_cmd(git_cmd, env_update)
-
- # Create git notes
- for ref, filename in notes:
- ref = ref.format(branch_name=branch)
- repo.run_cmd(['notes', '--ref', ref, 'add',
- '-F', os.path.abspath(filename), commit])
-
- # Update branch head
- git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
- if parent:
- git_cmd.append(parent)
- repo.run_cmd(git_cmd)
-
- # Update current HEAD, if we're on branch 'branch'
- if not repo.bare and repo.get_current_branch() == branch:
- log.info("Updating %s HEAD to latest commit", repo.top_dir)
- repo.run_cmd('reset --hard')
-
- return commit
- finally:
- if os.path.exists(tmp_index):
- os.unlink(tmp_index)
-
-
-def expand_tag_strings(repo, name_pattern, msg_subj_pattern, msg_body_pattern,
- keywords):
- """Generate tag name and message, with support for running id number"""
- keyws = keywords.copy()
- # Tag number is handled specially: if not defined, we autoincrement it
- if 'tag_number' not in keyws:
- # Fill in all other fields than 'tag_number'
- keyws['tag_number'] = '{tag_number}'
- tag_re = format_str(name_pattern, keyws)
- # Replace parentheses for proper regex matching
- tag_re = tag_re.replace('(', '\(').replace(')', '\)') + '$'
- # Inject regex group pattern for 'tag_number'
- tag_re = tag_re.format(tag_number='(?P<tag_number>[0-9]{1,5})')
-
- keyws['tag_number'] = 0
- for existing_tag in repo.run_cmd('tag').splitlines():
- match = re.match(tag_re, existing_tag)
-
- if match and int(match.group('tag_number')) >= keyws['tag_number']:
- keyws['tag_number'] = int(match.group('tag_number')) + 1
-
- tag_name = format_str(name_pattern, keyws)
- msg_subj= format_str(msg_subj_pattern.strip(), keyws)
- msg_body = format_str(msg_body_pattern, keyws)
- return tag_name, msg_subj + '\n\n' + msg_body
-
-
def parse_args(argv):
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
@@ -208,60 +81,34 @@ def parse_args(argv):
help="Data to commit")
return parser.parse_args(argv)
+def get_nested(d, list_of_keys):
+ try:
+ for k in list_of_keys:
+ d = d[k]
+ return d
+ except KeyError:
+ return ""
def main(argv=None):
- """Script entry point"""
args = parse_args(argv)
if args.debug:
log.setLevel(logging.DEBUG)
try:
- if not os.path.isdir(args.data_dir):
- raise ArchiveError("Not a directory: {}".format(args.data_dir))
-
- data_repo = init_git_repo(args.git_dir, args.no_create, args.bare)
-
# Get keywords to be used in tag and branch names and messages
metadata = metadata_from_bb()
- keywords = {'hostname': metadata['hostname'],
- 'branch': metadata['layers']['meta']['branch'],
- 'commit': metadata['layers']['meta']['commit'],
- 'commit_count': metadata['layers']['meta']['commit_count'],
- 'machine': metadata['config']['MACHINE']}
-
- # Expand strings early in order to avoid getting into inconsistent
- # state (e.g. no tag even if data was committed)
- commit_msg = format_str(args.commit_msg_subject.strip(), keywords)
- commit_msg += '\n\n' + format_str(args.commit_msg_body, keywords)
- branch_name = format_str(args.branch_name, keywords)
- tag_name = None
- if not args.no_tag and args.tag_name:
- tag_name, tag_msg = expand_tag_strings(data_repo, args.tag_name,
- args.tag_msg_subject,
- args.tag_msg_body, keywords)
-
- # Commit data
- commit = git_commit_data(data_repo, args.data_dir, branch_name,
- commit_msg, args.exclude, args.notes)
-
- # Create tag
- if tag_name:
- log.info("Creating tag %s", tag_name)
- data_repo.run_cmd(['tag', '-a', '-m', tag_msg, tag_name, commit])
-
- # Push data to remote
- if args.push:
- cmd = ['push', '--tags']
- # If no remote is given we push with the default settings from
- # gitconfig
- if args.push is not True:
- notes_refs = ['refs/notes/' + ref.format(branch_name=branch_name)
- for ref, _ in args.notes]
- cmd.extend([args.push, branch_name] + notes_refs)
- log.info("Pushing data to remote")
- data_repo.run_cmd(cmd)
-
- except ArchiveError as err:
+ keywords = {'hostname': get_nested(metadata, ['hostname']),
+ 'branch': get_nested(metadata, ['layers', 'meta', 'branch']),
+ 'commit': get_nested(metadata, ['layers', 'meta', 'commit']),
+ 'commit_count': get_nested(metadata, ['layers', 'meta', 'commit_count']),
+ 'machine': get_nested(metadata, ['config', 'MACHINE'])}
+
+ gitarchive.gitarchive(args.data_dir, args.git_dir, args.no_create, args.bare,
+ args.commit_msg_subject.strip(), args.commit_msg_body, args.branch_name,
+ args.no_tag, args.tag_name, args.tag_msg_subject, args.tag_msg_body,
+ args.exclude, args.notes, args.push, keywords, log)
+
+ except gitarchive.ArchiveError as err:
log.error(str(err))
return 1
diff --git a/scripts/oe-git-proxy b/scripts/oe-git-proxy
index 7a43fe6a6e..aa9b9dc9a9 100755
--- a/scripts/oe-git-proxy
+++ b/scripts/oe-git-proxy
@@ -13,11 +13,15 @@
# ALL_PROXY=https://proxy.example.com:8080
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
+#
+# SPDX-License-Identifier: GPL-2.0-only
#
# AUTHORS
# Darren Hart <dvhart@linux.intel.com>
+# disable pathname expansion, NO_PROXY fields could start with "*" or be it
+set -f
+
if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'oe-git-proxy: error: the following arguments are required: host port'
echo 'Usage: oe-git-proxy host port'
@@ -40,10 +44,12 @@ if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
fi
# Locate the netcat binary
-SOCAT=$(which socat 2>/dev/null)
-if [ $? -ne 0 ]; then
- echo "ERROR: socat binary not in PATH" 1>&2
- exit 1
+if [ -z "$SOCAT" ]; then
+ SOCAT=$(which socat 2>/dev/null)
+ if [ $? -ne 0 ]; then
+ echo "ERROR: socat binary not in PATH" 1>&2
+ exit 1
+ fi
fi
METHOD=""
@@ -58,7 +64,7 @@ ipv4_val() {
IP="$1"
SHIFT=24
VAL=0
- for B in ${IP//./ }; do
+ for B in $( echo "$IP" | tr '.' ' ' ); do
VAL=$(($VAL+$(($B<<$SHIFT))))
SHIFT=$(($SHIFT-8))
done
@@ -101,7 +107,7 @@ match_host() {
HOST=$1
GLOB=$2
- if [ -z "${HOST%%$GLOB}" ]; then
+ if [ -z "${HOST%%*$GLOB}" ]; then
return 0
fi
@@ -131,7 +137,7 @@ if [ -z "$ALL_PROXY" ]; then
fi
# Connect directly to hosts in NO_PROXY
-for H in ${NO_PROXY//,/ }; do
+for H in $( echo "$NO_PROXY" | tr ',' ' ' ); do
if match_host $1 $H; then
exec $SOCAT STDIO $METHOD
fi
diff --git a/scripts/oe-gnome-terminal-phonehome b/scripts/oe-gnome-terminal-phonehome
index e02354883a..1352a9872b 100755
--- a/scripts/oe-gnome-terminal-phonehome
+++ b/scripts/oe-gnome-terminal-phonehome
@@ -1,5 +1,9 @@
#!/bin/sh
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Gnome terminal won't tell us which PID a given command is run as
# or allow a single instance so we can't tell when it completes.
# This allows us to figure out the PID of the target so we can tell
diff --git a/scripts/oe-pkgdata-browser b/scripts/oe-pkgdata-browser
new file mode 100755
index 0000000000..c152c82b25
--- /dev/null
+++ b/scripts/oe-pkgdata-browser
@@ -0,0 +1,260 @@
+#! /usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import os, sys, enum, ast
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+
+import scriptpath
+bitbakepath = scriptpath.add_bitbake_lib_path()
+if not bitbakepath:
+ print("Unable to find bitbake by searching parent directory of this script or PATH")
+ sys.exit(1)
+import bb
+
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk, Gdk, GObject
+
+RecipeColumns = enum.IntEnum("RecipeColumns", {"Recipe": 0})
+PackageColumns = enum.IntEnum("PackageColumns", {"Package": 0, "Size": 1})
+FileColumns = enum.IntEnum("FileColumns", {"Filename": 0, "Size": 1})
+
+import time
+def timeit(f):
+ def timed(*args, **kw):
+ ts = time.time()
+ print ("func:%r calling" % f.__name__)
+ result = f(*args, **kw)
+ te = time.time()
+ print ('func:%r args:[%r, %r] took: %2.4f sec' % \
+ (f.__name__, args, kw, te-ts))
+ return result
+ return timed
+
+def human_size(nbytes):
+ import math
+ suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB']
+ human = nbytes
+ rank = 0
+ if nbytes != 0:
+ rank = int((math.log10(nbytes)) / 3)
+ rank = min(rank, len(suffixes) - 1)
+ human = nbytes / (1000.0 ** rank)
+ f = ('%.2f' % human).rstrip('0').rstrip('.')
+ return '%s %s' % (f, suffixes[rank])
+
+def load(filename, suffix=None):
+ from configparser import ConfigParser
+ from itertools import chain
+
+ parser = ConfigParser(delimiters=('='))
+ if suffix:
+ parser.optionxform = lambda option: option.replace(":" + suffix, "")
+ with open(filename) as lines:
+ lines = chain(("[fake]",), (line.replace(": ", " = ", 1) for line in lines))
+ parser.read_file(lines)
+
+ # TODO extract the data and put it into a real dict so we can transform some
+ # values to ints?
+ return parser["fake"]
+
+def find_pkgdata():
+ import subprocess
+ output = subprocess.check_output(("bitbake", "-e"), universal_newlines=True)
+ for line in output.splitlines():
+ if line.startswith("PKGDATA_DIR="):
+ return line.split("=", 1)[1].strip("\'\"")
+ # TODO exception or something
+ return None
+
+def packages_in_recipe(pkgdata, recipe):
+ """
+ Load the recipe pkgdata to determine the list of runtime packages.
+ """
+ data = load(os.path.join(pkgdata, recipe))
+ packages = data["PACKAGES"].split()
+ return packages
+
+def load_runtime_package(pkgdata, package):
+ return load(os.path.join(pkgdata, "runtime", package), suffix=package)
+
+def recipe_from_package(pkgdata, package):
+ data = load(os.path.join(pkgdata, "runtime", package), suffix=package)
+ return data["PN"]
+
+def summary(data):
+ s = ""
+ s += "{0[PKG]} {0[PKGV]}-{0[PKGR]}\n{0[LICENSE]}\n{0[SUMMARY]}\n".format(data)
+
+ return s
+
+
+class PkgUi():
+ def __init__(self, pkgdata):
+ self.pkgdata = pkgdata
+ self.current_recipe = None
+ self.recipe_iters = {}
+ self.package_iters = {}
+
+ builder = Gtk.Builder()
+ builder.add_from_file(os.path.join(os.path.dirname(__file__), "oe-pkgdata-browser.glade"))
+
+ self.window = builder.get_object("window")
+ self.window.connect("delete-event", Gtk.main_quit)
+
+ self.recipe_store = builder.get_object("recipe_store")
+ self.recipe_view = builder.get_object("recipe_view")
+ self.package_store = builder.get_object("package_store")
+ self.package_view = builder.get_object("package_view")
+
+ # Somehow resizable does not get set via builder xml
+ package_name_column = builder.get_object("package_name_column")
+ package_name_column.set_resizable(True)
+ file_name_column = builder.get_object("file_name_column")
+ file_name_column.set_resizable(True)
+
+ self.recipe_view.get_selection().connect("changed", self.on_recipe_changed)
+ self.package_view.get_selection().connect("changed", self.on_package_changed)
+
+ self.package_store.set_sort_column_id(PackageColumns.Package, Gtk.SortType.ASCENDING)
+ builder.get_object("package_size_column").set_cell_data_func(builder.get_object("package_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][PackageColumns.Size])))
+
+ self.label = builder.get_object("label1")
+ self.depends_label = builder.get_object("depends_label")
+ self.recommends_label = builder.get_object("recommends_label")
+ self.suggests_label = builder.get_object("suggests_label")
+ self.provides_label = builder.get_object("provides_label")
+
+ self.depends_label.connect("activate-link", self.on_link_activate)
+ self.recommends_label.connect("activate-link", self.on_link_activate)
+ self.suggests_label.connect("activate-link", self.on_link_activate)
+
+ self.file_store = builder.get_object("file_store")
+ self.file_store.set_sort_column_id(FileColumns.Filename, Gtk.SortType.ASCENDING)
+ builder.get_object("file_size_column").set_cell_data_func(builder.get_object("file_size_cell"), lambda column, cell, model, iter, data: cell.set_property("text", human_size(model[iter][FileColumns.Size])))
+
+ self.files_view = builder.get_object("files_scrollview")
+ self.files_label = builder.get_object("files_label")
+
+ self.load_recipes()
+
+ self.recipe_view.set_cursor(Gtk.TreePath.new_first())
+
+ self.window.show()
+
+ def on_link_activate(self, label, url_string):
+ from urllib.parse import urlparse
+ url = urlparse(url_string)
+ if url.scheme == "package":
+ package = url.path
+ recipe = recipe_from_package(self.pkgdata, package)
+
+ it = self.recipe_iters[recipe]
+ path = self.recipe_store.get_path(it)
+ self.recipe_view.set_cursor(path)
+ self.recipe_view.scroll_to_cell(path)
+
+ self.on_recipe_changed(self.recipe_view.get_selection())
+
+ it = self.package_iters[package]
+ path = self.package_store.get_path(it)
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ return True
+ else:
+ return False
+
+ def on_recipe_changed(self, selection):
+ self.package_store.clear()
+ self.package_iters = {}
+
+ (model, it) = selection.get_selected()
+ if not it:
+ return
+
+ recipe = model[it][RecipeColumns.Recipe]
+ packages = packages_in_recipe(self.pkgdata, recipe)
+ for package in packages:
+ # TODO also show PKG after debian-renaming?
+ data = load_runtime_package(self.pkgdata, package)
+ # TODO stash data to avoid reading in on_package_changed
+ self.package_iters[package] = self.package_store.append([package, int(data["PKGSIZE"])])
+
+ package = recipe if recipe in packages else sorted(packages)[0]
+ path = self.package_store.get_path(self.package_iters[package])
+ self.package_view.set_cursor(path)
+ self.package_view.scroll_to_cell(path)
+
+ def on_package_changed(self, selection):
+ self.label.set_text("")
+ self.file_store.clear()
+ self.depends_label.hide()
+ self.recommends_label.hide()
+ self.suggests_label.hide()
+ self.provides_label.hide()
+ self.files_view.hide()
+ self.files_label.hide()
+
+ (model, it) = selection.get_selected()
+ if it is None:
+ return
+
+ package = model[it][PackageColumns.Package]
+ data = load_runtime_package(self.pkgdata, package)
+
+ self.label.set_text(summary(data))
+
+ files = ast.literal_eval(data["FILES_INFO"])
+ if files:
+ self.files_label.set_text("{0} files take {1}.".format(len(files), human_size(int(data["PKGSIZE"]))))
+ self.files_view.show()
+ for filename, size in files.items():
+ self.file_store.append([filename, size])
+ else:
+ self.files_view.hide()
+ self.files_label.set_text("This package has no files.")
+ self.files_label.show()
+
+ def update_deps(field, prefix, label, clickable=True):
+ if field in data:
+ l = []
+ for name, version in bb.utils.explode_dep_versions2(data[field]).items():
+ if clickable:
+ l.append("<a href='package:{0}'>{0}</a> {1}".format(name, " ".join(version)).strip())
+ else:
+ l.append("{0} {1}".format(name, " ".join(version)).strip())
+ label.set_markup(prefix + ", ".join(l))
+ label.show()
+ else:
+ label.hide()
+ update_deps("RDEPENDS", "Depends: ", self.depends_label)
+ update_deps("RRECOMMENDS", "Recommends: ", self.recommends_label)
+ update_deps("RSUGGESTS", "Suggests: ", self.suggests_label)
+ update_deps("RPROVIDES", "Provides: ", self.provides_label, clickable=False)
+
+ def load_recipes(self):
+ if not os.path.exists(pkgdata):
+ sys.exit("Error: Please ensure %s exists by generating packages before using this tool." % pkgdata)
+ for recipe in sorted(os.listdir(pkgdata)):
+ if os.path.isfile(os.path.join(pkgdata, recipe)):
+ self.recipe_iters[recipe] = self.recipe_store.append([recipe])
+
+if __name__ == "__main__":
+ import argparse
+
+ parser = argparse.ArgumentParser(description='pkgdata browser')
+ parser.add_argument('-p', '--pkgdata', help="Optional location of pkgdata")
+
+ args = parser.parse_args()
+ pkgdata = args.pkgdata if args.pkgdata else find_pkgdata()
+ # TODO assert pkgdata is a directory
+ window = PkgUi(pkgdata)
+ Gtk.main()
diff --git a/scripts/oe-pkgdata-browser.glade b/scripts/oe-pkgdata-browser.glade
new file mode 100644
index 0000000000..0d06c825ff
--- /dev/null
+++ b/scripts/oe-pkgdata-browser.glade
@@ -0,0 +1,337 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Generated with glade 3.18.3 -->
+<interface>
+ <requires lib="gtk+" version="3.12"/>
+ <object class="GtkListStore" id="file_store">
+ <columns>
+ <!-- column-name Filename -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="package_store">
+ <columns>
+ <!-- column-name Package -->
+ <column type="gchararray"/>
+ <!-- column-name Size -->
+ <column type="glong"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="pkgdata_store">
+ <columns>
+ <!-- column-name Name -->
+ <column type="gchararray"/>
+ <!-- column-name Path -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkListStore" id="recipe_store">
+ <columns>
+ <!-- column-name Recipe -->
+ <column type="gchararray"/>
+ </columns>
+ </object>
+ <object class="GtkWindow" id="window">
+ <property name="can_focus">False</property>
+ <property name="title" translatable="yes">Package Data Browser</property>
+ <property name="default_width">1200</property>
+ <property name="default_height">900</property>
+ <property name="icon_name">accessories-dictionary</property>
+ <property name="has_resize_grip">True</property>
+ <child>
+ <object class="GtkBox" id="box1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="margin_right">4</property>
+ <property name="margin_top">4</property>
+ <property name="margin_bottom">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkComboBox" id="pkgdata_combo">
+ <property name="can_focus">False</property>
+ <property name="model">pkgdata_store</property>
+ <property name="id_column">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext5"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow1">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="recipe_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">recipe_store</property>
+ <property name="search_column">0</property>
+ <property name="fixed_height_mode">True</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection1"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="treeviewcolumn1">
+ <property name="sizing">fixed</property>
+ <property name="title" translatable="yes">Recipe</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext1"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkPaned" id="paned2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="position">400</property>
+ <property name="position_set">True</property>
+ <child>
+ <object class="GtkScrolledWindow" id="scrolledwindow2">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <property name="min_content_width">100</property>
+ <child>
+ <object class="GtkTreeView" id="package_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">package_store</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection2"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_name_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Package</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext2"/>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="package_size_column">
+ <property name="resizable">True</property>
+ <property name="sizing">autosize</property>
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="package_size_cell"/>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">False</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkBox" id="box2">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="margin_left">4</property>
+ <property name="orientation">vertical</property>
+ <property name="spacing">4</property>
+ <child>
+ <object class="GtkLabel" id="label1">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">label</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="depends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">depends_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="recommends_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">recs_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">2</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="suggests_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">suggests_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">3</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="provides_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">provides_label</property>
+ <property name="wrap">True</property>
+ <property name="track_visited_links">False</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">4</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkLabel" id="files_label">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="xalign">0</property>
+ <property name="label" translatable="yes">files_label</property>
+ <property name="ellipsize">end</property>
+ </object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">5</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkScrolledWindow" id="files_scrollview">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="shadow_type">in</property>
+ <child>
+ <object class="GtkTreeView" id="files_view">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="model">file_store</property>
+ <property name="rules_hint">True</property>
+ <property name="search_column">0</property>
+ <property name="show_expanders">False</property>
+ <child internal-child="selection">
+ <object class="GtkTreeSelection" id="treeview-selection3"/>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_name_column">
+ <property name="title" translatable="yes">Name</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">0</property>
+ <child>
+ <object class="GtkCellRendererText" id="cellrenderertext3">
+ <property name="background_rgba">rgba(0,0,0,0)</property>
+ </object>
+ <attributes>
+ <attribute name="text">0</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ <child>
+ <object class="GtkTreeViewColumn" id="file_size_column">
+ <property name="title" translatable="yes">Size</property>
+ <property name="sort_indicator">True</property>
+ <property name="sort_column_id">1</property>
+ <child>
+ <object class="GtkCellRendererText" id="file_size_cell"/>
+ <attributes>
+ <attribute name="text">1</attribute>
+ </attributes>
+ </child>
+ </object>
+ </child>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">6</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="resize">True</property>
+ <property name="shrink">True</property>
+ </packing>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
+ </child>
+ </object>
+ </child>
+ </object>
+</interface>
diff --git a/scripts/oe-pkgdata-util b/scripts/oe-pkgdata-util
index e6c9df94e8..44ae40549a 100755
--- a/scripts/oe-pkgdata-util
+++ b/scripts/oe-pkgdata-util
@@ -6,18 +6,7 @@
#
# Copyright 2012-2015 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
import sys
@@ -107,7 +96,7 @@ def glob(args):
pn = os.path.basename(pkgdata_file)
with open(pkgdata_file, 'r') as f:
for line in f:
- if line.startswith("PKG_%s:" % pn):
+ if line.startswith("PKG:%s:" % pn):
renamed = line.split(': ')[1].rstrip()
return renamed
@@ -182,7 +171,7 @@ def read_value(args):
val = line.split(': ', 1)[1].rstrip()
return val
- logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
+ logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuenames, packages))
for package in packages:
pkg_split = package.split('_')
pkg_name = pkg_split[0]
@@ -191,20 +180,29 @@ def read_value(args):
logger.debug(revlink)
if os.path.exists(revlink):
mappedpkg = os.path.basename(os.readlink(revlink))
- qvar = args.valuename
- value = readvar(revlink, qvar, mappedpkg)
- if qvar == "PKGSIZE":
- # PKGSIZE is now in bytes, but we we want it in KB
- pkgsize = (int(value) + 1024 // 2) // 1024
- value = "%d" % pkgsize
- if args.unescape:
- import codecs
- # escape_decode() unescapes backslash encodings in byte streams
- value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ qvars = args.valuenames
+ val_names = qvars.split(',')
+ values = []
+ for qvar in val_names:
+ if qvar == "PACKAGE":
+ value = mappedpkg
+ else:
+ value = readvar(revlink, qvar, mappedpkg)
+ if qvar == "PKGSIZE":
+ # PKGSIZE is now in bytes, but we we want it in KB
+ pkgsize = (int(value) + 1024 // 2) // 1024
+ value = "%d" % pkgsize
+ if args.unescape:
+ import codecs
+ # escape_decode() unescapes backslash encodings in byte streams
+ value = codecs.escape_decode(bytes(value, "utf-8"))[0].decode("utf-8")
+ values.append(value)
+
+ values_str = ' '.join(values)
if args.prefix_name:
- print('%s %s' % (pkg_name, value))
+ print('%s %s' % (pkg_name, values_str))
else:
- print(value)
+ print(values_str)
else:
logger.debug("revlink %s does not exist", revlink)
@@ -224,7 +222,7 @@ def lookup_pkglist(pkgs, pkgdata_dir, reverse):
with open(pkgfile, 'r') as f:
for line in f:
fields = line.rstrip().split(': ')
- if fields[0] == 'PKG_%s' % pkg:
+ if fields[0] == 'PKG:%s' % pkg:
mappings[pkg].append(fields[1])
break
return mappings
@@ -279,10 +277,14 @@ def lookup_recipe(args):
parse_pkgdatafile(pkgdatafile)
continue
pkgdatafile = os.path.join(args.pkgdata_dir, 'runtime-reverse', pkg)
- if not os.path.exists(pkgdatafile):
- logger.error("The following packages could not be found: %s" % pkg)
- sys.exit(1)
- parse_pkgdatafile(pkgdatafile)
+ if os.path.exists(pkgdatafile):
+ parse_pkgdatafile(pkgdatafile)
+ else:
+ if args.carryon:
+ print("The following packages could not be found: %s" % pkg)
+ else:
+ logger.error("The following packages could not be found: %s" % pkg)
+ sys.exit(1)
def package_info(args):
def parse_pkgdatafile(pkgdatafile):
@@ -294,7 +296,7 @@ def package_info(args):
extra = ''
for line in f:
for var in vars:
- m = re.match(var + '(?:_\S+)?:\s*(.+?)\s*$', line)
+ m = re.match(var + r'(?::\S+)?:\s*(.+?)\s*$', line)
if m:
vals[var] = m.group(1)
pkg_version = vals['PKGV'] or ''
@@ -396,21 +398,16 @@ def list_pkgs(args):
return False
return True
+ pkglist = []
if args.recipe:
packages = get_recipe_pkgs(args.pkgdata_dir, args.recipe, args.unpackaged)
if args.runtime:
- pkglist = []
runtime_pkgs = lookup_pkglist(packages, args.pkgdata_dir, False)
for rtpkgs in runtime_pkgs.values():
pkglist.extend(rtpkgs)
else:
pkglist = packages
-
- for pkg in pkglist:
- if matchpkg(pkg):
- found = True
- print("%s" % pkg)
else:
if args.runtime:
searchdir = 'runtime-reverse'
@@ -421,9 +418,13 @@ def list_pkgs(args):
for fn in files:
if fn.endswith('.packaged'):
continue
- if matchpkg(fn):
- found = True
- print("%s" % fn)
+ pkglist.append(fn)
+
+ for pkg in sorted(pkglist):
+ if matchpkg(pkg):
+ found = True
+ print("%s" % pkg)
+
if not found:
if args.pkgspec:
logger.error("Unable to find any package matching %s" % args.pkgspec)
@@ -439,7 +440,7 @@ def list_pkg_files(args):
for line in f:
if line.startswith('FILES_INFO:'):
found = True
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
if long:
width = max(map(len, dictval), default=0)
@@ -508,7 +509,7 @@ def find_path(args):
with open(os.path.join(root,fn)) as f:
for line in f:
if line.startswith('FILES_INFO:'):
- val = line.split(':', 1)[1].strip()
+ val = line.split(': ', 1)[1].strip()
dictval = json.loads(val)
for fullpth in dictval.keys():
if fnmatch.fnmatchcase(fullpth, args.targetpath):
@@ -558,6 +559,7 @@ def main():
help='Find recipe producing one or more packages',
description='Looks up the specified runtime package(s) to see which recipe they were produced by')
parser_lookup_recipe.add_argument('pkg', nargs='+', help='Runtime package name to look up')
+ parser_lookup_recipe.add_argument('-c', '--continue', dest="carryon", help='Continue looking up recipes even if we can not find one', action='store_true')
parser_lookup_recipe.set_defaults(func=lookup_recipe)
parser_package_info = subparsers.add_parser('package-info',
@@ -577,7 +579,7 @@ def main():
parser_read_value = subparsers.add_parser('read-value',
help='Read any pkgdata value for one or more packages',
description='Reads the named value from the pkgdata files for the specified packages')
- parser_read_value.add_argument('valuename', help='Name of the value to look up')
+ parser_read_value.add_argument('valuenames', help='Name of the value/s to look up (separated by commas, no spaces)')
parser_read_value.add_argument('pkg', nargs='*', help='Runtime package name to look up')
parser_read_value.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
parser_read_value.add_argument('-n', '--prefix-name', help='Prefix output with package name', action='store_true')
@@ -605,6 +607,9 @@ def main():
logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
sys.exit(1)
logger.debug('Found bitbake path: %s' % bitbakepath)
+ if not os.environ.get('BUILDDIR', ''):
+ logger.error("This script can only be run after initialising the build environment (e.g. by using oe-init-build-env)")
+ sys.exit(1)
tinfoil = tinfoil_init()
try:
args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
diff --git a/scripts/oe-publish-sdk b/scripts/oe-publish-sdk
index ee33acf902..b8a652e47f 100755
--- a/scripts/oe-publish-sdk
+++ b/scripts/oe-publish-sdk
@@ -1,21 +1,11 @@
#!/usr/bin/env python3
-
+#
# OpenEmbedded SDK publishing tool
-
-# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (C) 2015-2016 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
@@ -104,7 +94,10 @@ def publish(args):
logger.error('Failed to unpack %s to %s' % (dest_sdk, destination))
return ret
else:
- cmd = "ssh %s 'sh %s -p -y -d %s && rm -f %s'" % (host, dest_sdk, destdir, dest_sdk)
+ rm_or_not = " && rm -f %s" % dest_sdk
+ if args.keep_orig:
+ rm_or_not = ""
+ cmd = "ssh %s 'sh %s -p -y -d %s%s'" % (host, dest_sdk, destdir, rm_or_not)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('Successfully unpacked %s to %s' % (dest_sdk, destdir))
@@ -114,9 +107,9 @@ def publish(args):
# Setting up the git repo
if not is_remote:
- cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
+ cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination)
else:
- cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
+ cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc' > .gitignore; echo '*.pyo' >> .gitignore; echo 'pyshtables.py' >> .gitignore; fi; git config gc.auto 0; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir)
ret = subprocess.call(cmd, shell=True)
if ret == 0:
logger.info('SDK published successfully')
@@ -129,6 +122,7 @@ def main():
parser = argparse_oe.ArgumentParser(description="OpenEmbedded extensible SDK publishing tool - writes server-side data to support the extensible SDK update process to a specified location")
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+ parser.add_argument('-k', '--keep-orig', help='When published to a remote host, the eSDK installer gets deleted by default.', action='store_true')
parser.add_argument('sdk', help='Extensible SDK to publish (path to .sh installer file)')
parser.add_argument('dest', help='Destination to publish SDK to; can be local path or remote in the form of user@host:/path (in the latter case ssh/scp will be used).')
diff --git a/scripts/oe-pylint b/scripts/oe-pylint
new file mode 100755
index 0000000000..5ad72838e9
--- /dev/null
+++ b/scripts/oe-pylint
@@ -0,0 +1,15 @@
+#!/bin/bash
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Run the pylint3 against our common python module spaces and print a report of potential issues
+#
+this_dir=$(dirname $(readlink -f $0))
+ERRORS="-E"
+IGNORELIST="$ERRORS -d logging-too-many-args -d missing-docstring -d line-too-long -d invalid-name"
+PYTHONPATH=$this_dir/../bitbake/lib/ pylint3 $IGNORELIST bb
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST -d undefined-variable oe
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib pylint3 $IGNORELIST oeqa
+PYTHONPATH=$this_dir/../bitbake/lib/:$this_dir/../meta/lib:$this_dir/lib pylint3 $IGNORELIST -d undefined-variable argparse_oe buildstats devtool recipetool scriptpath testcasemgmt build_perf checklayer resulttool scriptutils wic \ No newline at end of file
diff --git a/scripts/oe-run-native b/scripts/oe-run-native
index 1131122e68..22958d97e7 100755
--- a/scripts/oe-run-native
+++ b/scripts/oe-run-native
@@ -1,20 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2016, Intel Corporation.
-# All Rights Reserved
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, see <http://www.gnu.org/licenses/>
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
@@ -28,7 +16,7 @@ if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
echo 'OpenEmbedded run-native - runs native tools'
echo ''
echo 'arguments:'
- echo ' native-recipe The recipe which provoides tool'
+ echo ' native-recipe The recipe which provides tool'
echo ' tool Native tool to run'
echo ''
exit 2
@@ -55,12 +43,12 @@ fi
OLD_PATH=$PATH
# look for a tool only in native sysroot
-PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin
+PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin$(find $OECORE_NATIVE_SYSROOT/usr/bin -maxdepth 1 -name "*-native" -type d -printf ":%p")
tool_find=`/usr/bin/which $tool 2>/dev/null`
if [ -n "$tool_find" ] ; then
# add old path to allow usage of host tools
- PATH=$PATH:$OLD_PATH $@
+ PATH=$PATH:$OLD_PATH "$@"
else
echo "Error: Unable to find '$tool' in $PATH"
echo "Error: Have you run 'bitbake $native_recipe -caddto_recipe_sysroot'?"
diff --git a/scripts/oe-selftest b/scripts/oe-selftest
index 1bf860a415..18ac0f5869 100755
--- a/scripts/oe-selftest
+++ b/scripts/oe-selftest
@@ -2,18 +2,8 @@
# Copyright (c) 2013-2017 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script runs tests defined in meta/lib/oeqa/selftest/
@@ -43,7 +33,7 @@ scriptpath.add_bitbake_lib_path()
from oeqa.utils import load_test_components
from oeqa.core.exception import OEQAPreRun
-logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout)
+logger = scriptutils.logger_create('oe-selftest', stream=sys.stdout, keepalive=True)
def main():
description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
diff --git a/scripts/oe-setup-build b/scripts/oe-setup-build
new file mode 100755
index 0000000000..5364f2b481
--- /dev/null
+++ b/scripts/oe-setup-build
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import json
+import os
+import subprocess
+
+def defaultlayers():
+ return os.path.abspath(os.path.join(os.path.dirname(__file__), '.oe-layers.json'))
+
+def makebuildpath(topdir, template):
+ return os.path.join(topdir, "build-{}".format(template))
+
+def discover_templates(layers_file):
+ if not os.path.exists(layers_file):
+ print("List of layers {} does not exist; were the layers set up using the setup-layers script?".format(layers_file))
+ return None
+
+ templates = []
+ layers_list = json.load(open(layers_file))["layers"]
+ for layer in layers_list:
+ template_dir = os.path.join(os.path.dirname(layers_file), layer, 'conf','templates')
+ if os.path.exists(template_dir):
+ for d in sorted(os.listdir(template_dir)):
+ templatepath = os.path.join(template_dir,d)
+ if not os.path.isfile(os.path.join(templatepath,'local.conf.sample')):
+ continue
+ layer_base = os.path.basename(layer)
+ templatename = "{}-{}".format(layer_base[5:] if layer_base.startswith("meta-") else layer_base, d)
+ buildpath = makebuildpath(os.getcwd(), templatename)
+ notespath = os.path.join(template_dir, d, 'conf-notes.txt')
+ try: notes = open(notespath).read()
+ except: notes = None
+ try: summary = open(os.path.join(template_dir, d, 'conf-summary.txt')).read()
+ except: summary = None
+ templates.append({"templatename":templatename,"templatepath":templatepath,"buildpath":buildpath,"notespath":notespath,"notes":notes,"summary":summary})
+
+ return templates
+
+def print_templates(templates, verbose):
+ print("Available build configurations:\n")
+
+ for i in range(len(templates)):
+ t = templates[i]
+ print("{}. {}".format(i+1, t["templatename"]))
+ print("{}".format(t["summary"].strip() if t["summary"] else "This configuration does not have a summary."))
+ if verbose:
+ print("Configuration template path:", t["templatepath"])
+ print("Build path:", t["buildpath"])
+ print("Usage notes:", t["notespath"] if t["notes"] else "This configuration does not have usage notes.")
+ print("")
+ if not verbose:
+ print("Re-run with 'list -v' to see additional information.")
+
+def list_templates(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ verbose = args.v
+ print_templates(templates, verbose)
+
+def find_template(template_name, templates):
+ print_templates(templates, False)
+ if not template_name:
+ n_s = input("Please choose a configuration by its number: ")
+ try: return templates[int(n_s) - 1]
+ except:
+ print("Invalid selection, please try again.")
+ return None
+ else:
+ for t in templates:
+ if t["templatename"] == template_name:
+ return t
+ print("Configuration {} is not one of {}, please try again.".format(tempalte_name, [t["templatename"] for t in templates]))
+ return None
+
+def setup_build_env(args):
+ templates = discover_templates(args.layerlist)
+ if not templates:
+ return
+
+ template = find_template(args.c, templates)
+ if not template:
+ return
+ builddir = args.b if args.b else template["buildpath"]
+ no_shell = args.no_shell
+ coredir = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
+ cmd = "TEMPLATECONF={} . {} {}".format(template["templatepath"], os.path.join(coredir, 'oe-init-build-env'), builddir)
+ if not no_shell:
+ cmd = cmd + " && {}".format(os.environ['SHELL'])
+ print("Running:", cmd)
+ subprocess.run(cmd, shell=True, executable=os.environ['SHELL'])
+
+parser = argparse.ArgumentParser(description="A script that discovers available build configurations and sets up a build environment based on one of them. Run without arguments to choose one interactively.")
+parser.add_argument("--layerlist", default=defaultlayers(), help='Where to look for available layers (as written out by setup-layers script) (default is {}).'.format(defaultlayers()))
+
+subparsers = parser.add_subparsers()
+parser_list_templates = subparsers.add_parser('list', help='List available configurations')
+parser_list_templates.add_argument('-v', action='store_true',
+ help='Print detailed information and usage notes for each available build configuration.')
+parser_list_templates.set_defaults(func=list_templates)
+
+parser_setup_env = subparsers.add_parser('setup', help='Set up a build environment and open a shell session with it, ready to run builds.')
+parser_setup_env.add_argument('-c', metavar='configuration_name', help="Use a build configuration configuration_name to set up a build environment (run this script with 'list' to see what is available)")
+parser_setup_env.add_argument('-b', metavar='build_path', help="Set up a build directory in build_path (run this script with 'list -v' to see where it would be by default)")
+parser_setup_env.add_argument('--no-shell', action='store_true',
+ help='Create a build directory but do not start a shell session with the build environment from it.')
+parser_setup_env.set_defaults(func=setup_build_env)
+
+args = parser.parse_args()
+
+if 'func' in args:
+ args.func(args)
+else:
+ from argparse import Namespace
+ setup_build_env(Namespace(layerlist=args.layerlist, c=None, b=None, no_shell=False))
diff --git a/scripts/oe-setup-builddir b/scripts/oe-setup-builddir
index 55d73ca1e4..dcb384c33a 100755
--- a/scripts/oe-setup-builddir
+++ b/scripts/oe-setup-builddir
@@ -4,26 +4,17 @@
#
# Copyright (C) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-if [ -z "$BUILDDIR" ]; then
- echo >&2 "Error: The build directory (BUILDDIR) must be set!"
+die() {
+ echo Error: "$@" >&2
exit 1
-fi
+}
-if [ "$1" = '--help' -o "$1" = '-h' ]; then
+[ -n "$BUILDDIR" ] || die "The build directory (BUILDDIR) must be set!"
+
+if [ "$1" = '--help' ] || [ "$1" = '-h' ]; then
echo 'Usage: oe-setup-builddir'
echo ''
echo "OpenEmbedded setup-builddir - setup build directory $BUILDDIR"
@@ -33,33 +24,22 @@ fi
mkdir -p "$BUILDDIR/conf"
-if [ ! -d "$BUILDDIR" ]; then
- echo >&2 "Error: The builddir ($BUILDDIR) does not exist!"
- exit 1
-fi
-
-if [ ! -w "$BUILDDIR" ]; then
- echo >&2 "Error: Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
- exit 1
-fi
+[ -d "$BUILDDIR" ] || die "The build directory ($BUILDDIR) does not exist!"
+[ -w "$BUILDDIR" ] ||
+ die "Cannot write to $BUILDDIR, perhaps try sourcing with a writable path? i.e. . oe-init-build-env ~/my-build"
# Attempting removal of sticky,setuid bits from BUILDDIR, BUILDDIR/conf
chmod -st "$BUILDDIR" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR"
chmod -st "$BUILDDIR/conf" 2>/dev/null || echo "WARNING: unable to chmod $BUILDDIR/conf"
-cd "$BUILDDIR"
+cd "$BUILDDIR" || die "Failed to change directory to $BUILDDIR!"
-if [ -f "$BUILDDIR/conf/templateconf.cfg" ]; then
- TEMPLATECONF=$(cat "$BUILDDIR/conf/templateconf.cfg")
-fi
+. "$OEROOT/.templateconf"
-. $OEROOT/.templateconf
+# Keep the original TEMPLATECONF before possibly prefixing it with $OEROOT below.
+ORG_TEMPLATECONF=$TEMPLATECONF
-if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
- echo "$TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
-fi
-
-#
+#
# $TEMPLATECONF can point to a directory for the template local.conf & bblayers.conf
#
if [ -n "$TEMPLATECONF" ]; then
@@ -68,73 +48,94 @@ if [ -n "$TEMPLATECONF" ]; then
if [ -d "$OEROOT/$TEMPLATECONF" ]; then
TEMPLATECONF="$OEROOT/$TEMPLATECONF"
fi
- if [ ! -d "$TEMPLATECONF" ]; then
- echo >&2 "Error: TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
- exit 1
- fi
+ [ -d "$TEMPLATECONF" ] ||
+ die "TEMPLATECONF value points to nonexistent directory '$TEMPLATECONF'"
+ fi
+ templatesdir=$(python3 -c "import sys; print(sys.argv[1].strip('/').split('/')[-2])" "$TEMPLATECONF")
+ if [ "$templatesdir" != templates ] || [ ! -f "$TEMPLATECONF/../../layer.conf" ]; then
+ die "TEMPLATECONF value (which is $TEMPLATECONF) must point to meta-some-layer/conf/templates/template-name"
fi
OECORELAYERCONF="$TEMPLATECONF/bblayers.conf.sample"
OECORELOCALCONF="$TEMPLATECONF/local.conf.sample"
+ OECORESUMMARYCONF="$TEMPLATECONF/conf-summary.txt"
OECORENOTESCONF="$TEMPLATECONF/conf-notes.txt"
fi
unset SHOWYPDOC
if [ -z "$OECORELOCALCONF" ]; then
- OECORELOCALCONF="$OEROOT/meta/conf/local.conf.sample"
+ OECORELOCALCONF="$OEROOT/meta/conf/templates/default/local.conf.sample"
fi
if [ ! -r "$BUILDDIR/conf/local.conf" ]; then
cat <<EOM
You had no conf/local.conf file. This configuration file has therefore been
-created for you with some default values. You may wish to edit it to, for
-example, select a different MACHINE (target hardware). See conf/local.conf
-for more information as common configuration options are commented.
+created for you from $OECORELOCALCONF
+You may wish to edit it to, for example, select a different MACHINE (target
+hardware).
EOM
- cp -f $OECORELOCALCONF "$BUILDDIR/conf/local.conf"
+ cp -f "$OECORELOCALCONF" "$BUILDDIR/conf/local.conf"
SHOWYPDOC=yes
fi
if [ -z "$OECORELAYERCONF" ]; then
- OECORELAYERCONF="$OEROOT/meta/conf/bblayers.conf.sample"
+ OECORELAYERCONF="$OEROOT/meta/conf/templates/default/bblayers.conf.sample"
fi
if [ ! -r "$BUILDDIR/conf/bblayers.conf" ]; then
cat <<EOM
You had no conf/bblayers.conf file. This configuration file has therefore been
-created for you with some default values. To add additional metadata layers
-into your configuration please add entries to conf/bblayers.conf.
+created for you from $OECORELAYERCONF
+To add additional metadata layers into your configuration please add entries
+to conf/bblayers.conf.
EOM
- # Put the abosolute path to the layers in bblayers.conf so we can run
- # bitbake without the init script after the first run
- # ##COREBASE## is deprecated as it's meaning was inconsistent, but continue
+ # Put the absolute path to the layers in bblayers.conf so we can run
+ # bitbake without the init script after the first run.
+ # ##COREBASE## is deprecated as its meaning was inconsistent, but continue
# to replace it for compatibility.
sed -e "s|##OEROOT##|$OEROOT|g" \
-e "s|##COREBASE##|$OEROOT|g" \
- $OECORELAYERCONF > "$BUILDDIR/conf/bblayers.conf"
+ "$OECORELAYERCONF" > "$BUILDDIR/conf/bblayers.conf"
SHOWYPDOC=yes
fi
+if [ -z "$OECORESUMMARYCONF" ]; then
+ OECORESUMMARYCONF="$OEROOT/meta/conf/templates/default/conf-summary.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-summary.txt" ]; then
+ [ ! -r "$OECORESUMMARYCONF" ] || cp "$OECORESUMMARYCONF" "$BUILDDIR/conf/conf-summary.txt"
+fi
+
+if [ -z "$OECORENOTESCONF" ]; then
+ OECORENOTESCONF="$OEROOT/meta/conf/templates/default/conf-notes.txt"
+fi
+if [ ! -r "$BUILDDIR/conf/conf-notes.txt" ]; then
+ [ ! -r "$OECORENOTESCONF" ] || cp "$OECORENOTESCONF" "$BUILDDIR/conf/conf-notes.txt"
+fi
+
# Prevent disturbing a new GIT clone in same console
unset OECORELOCALCONF
unset OECORELAYERCONF
+unset OECORESUMMARYCONF
+unset OECORENOTESCONF
# Ending the first-time run message. Show the YP Documentation banner.
-if [ ! -z "$SHOWYPDOC" ]; then
+if [ -n "$SHOWYPDOC" ]; then
cat <<EOM
The Yocto Project has extensive documentation about OE including a reference
manual which can be found at:
- http://yoctoproject.org/documentation
+ https://docs.yoctoproject.org
-For more information about OpenEmbedded see their website:
- http://www.openembedded.org/
+For more information about OpenEmbedded see the website:
+ https://www.openembedded.org/
EOM
# unset SHOWYPDOC
fi
-if [ -z "$OECORENOTESCONF" ]; then
- OECORENOTESCONF="$OEROOT/meta/conf/conf-notes.txt"
+[ ! -r "$BUILDDIR/conf/conf-summary.txt" ] || cat "$BUILDDIR/conf/conf-summary.txt"
+[ ! -r "$BUILDDIR/conf/conf-notes.txt" ] || cat "$BUILDDIR/conf/conf-notes.txt"
+
+if [ ! -f "$BUILDDIR/conf/templateconf.cfg" ]; then
+ echo "$ORG_TEMPLATECONF" >"$BUILDDIR/conf/templateconf.cfg"
fi
-[ ! -r "$OECORENOTESCONF" ] || cat $OECORENOTESCONF
-unset OECORENOTESCONF
diff --git a/scripts/oe-setup-layers b/scripts/oe-setup-layers
new file mode 100755
index 0000000000..6fbfefd656
--- /dev/null
+++ b/scripts/oe-setup-layers
@@ -0,0 +1,146 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+# This file was copied from poky(or oe-core)/scripts/oe-setup-layers by running
+#
+# bitbake-layers create-layers-setup destdir
+#
+# It is recommended that you do not modify this file directly, but rather re-run the above command to get the freshest upstream copy.
+#
+# This script is idempotent. Subsequent runs only change what is necessary to
+# ensure your layers match your configuration.
+
+import argparse
+import json
+import os
+import subprocess
+
+def _is_repo_git_repo(repodir):
+ try:
+ curr_toplevel = subprocess.check_output("git -C %s rev-parse --show-toplevel" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_toplevel.strip().decode("utf-8") == repodir:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_rev(repodir, rev):
+ try:
+ curr_rev = subprocess.check_output("git -C %s rev-parse HEAD" % repodir, shell=True, stderr=subprocess.DEVNULL)
+ if curr_rev.strip().decode("utf-8") == rev:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _is_repo_at_remote_uri(repodir, remote, uri):
+ try:
+ curr_uri = subprocess.check_output("git -C %s remote get-url %s" % (repodir, remote), shell=True, stderr=subprocess.DEVNULL)
+ if curr_uri.strip().decode("utf-8") == uri:
+ return True
+ except subprocess.CalledProcessError:
+ pass
+ return False
+
+def _contains_submodules(repodir):
+ return os.path.exists(os.path.join(repodir,".gitmodules"))
+
+def _write_layer_list(dest, repodirs):
+ layers = []
+ for r in repodirs:
+ for root, dirs, files in os.walk(r):
+ if os.path.basename(root) == 'conf' and 'layer.conf' in files:
+ layers.append(os.path.relpath(os.path.dirname(root), dest))
+ layers_f = os.path.join(dest, ".oe-layers.json")
+ print("Writing list of layers into {}".format(layers_f))
+ with open(layers_f, 'w') as f:
+ json.dump({"version":"1.0","layers":layers}, f, sort_keys=True, indent=4)
+
+def _do_checkout(args, json):
+ repos = json['sources']
+ repodirs = []
+ oesetupbuild = None
+ for r_name in repos:
+ r_data = repos[r_name]
+ repodir = os.path.abspath(os.path.join(args['destdir'], r_data['path']))
+ repodirs.append(repodir)
+
+ if 'contains_this_file' in r_data.keys():
+ force_arg = 'force_bootstraplayer_checkout'
+ if not args[force_arg]:
+ print('Note: not checking out source {repo}, use {repoflag} to override.'.format(repo=r_name, repoflag='--force-bootstraplayer-checkout'))
+ continue
+ r_remote = r_data['git-remote']
+ rev = r_remote['rev']
+ desc = r_remote['describe']
+ if not desc:
+ desc = rev[:10]
+ branch = r_remote['branch']
+ remotes = r_remote['remotes']
+
+ print('\nSetting up source {}, revision {}, branch {}'.format(r_name, desc, branch))
+ if not _is_repo_git_repo(repodir):
+ cmd = 'git init -q {}'.format(repodir)
+ print("Running '{}'".format(cmd))
+ subprocess.check_output(cmd, shell=True)
+
+ for remote in remotes:
+ if not _is_repo_at_remote_uri(repodir, remote, remotes[remote]['uri']):
+ cmd = "git remote remove {} > /dev/null 2>&1; git remote add {} {}".format(remote, remote, remotes[remote]['uri'])
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = "git fetch -q {} || true".format(remote)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if not _is_repo_at_rev(repodir, rev):
+ cmd = "git fetch -q --all || true"
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ cmd = 'git checkout -q {}'.format(rev)
+ print("Running '{}' in {}".format(cmd, repodir))
+ subprocess.check_output(cmd, shell=True, cwd=repodir)
+
+ if _contains_submodules(repodir):
+ print("Repo {} contains submodules, use 'git submodule update' to ensure they are up to date".format(repodir))
+ if os.path.exists(os.path.join(repodir, 'scripts/oe-setup-build')):
+ oesetupbuild = os.path.join(repodir, 'scripts/oe-setup-build')
+
+ _write_layer_list(args['destdir'], repodirs)
+
+ if oesetupbuild:
+ oesetupbuild_symlink = os.path.join(args['destdir'], 'setup-build')
+ if os.path.exists(oesetupbuild_symlink):
+ os.remove(oesetupbuild_symlink)
+ os.symlink(os.path.relpath(oesetupbuild,args['destdir']),oesetupbuild_symlink)
+ print("\nRun '{}' to list available build configuration templates and set up a build from one of them.".format(oesetupbuild_symlink))
+
+parser = argparse.ArgumentParser(description="A self contained python script that fetches all the needed layers and sets them to correct revisions using data in a json format from a separate file. The json data can be created from an active build directory with 'bitbake-layers create-layers-setup destdir' and there's a sample file and a schema in meta/files/")
+
+parser.add_argument('--force-bootstraplayer-checkout', action='store_true',
+ help='Force the checkout of the layer containing this file (by default it is presumed that as this script is in it, the layer is already in place).')
+
+try:
+ defaultdest = os.path.dirname(subprocess.check_output('git rev-parse --show-toplevel', universal_newlines=True, shell=True, cwd=os.path.dirname(__file__)))
+except subprocess.CalledProcessError as e:
+ defaultdest = os.path.abspath(".")
+
+parser.add_argument('--destdir', default=defaultdest, help='Where to check out the layers (default is {defaultdest}).'.format(defaultdest=defaultdest))
+parser.add_argument('--jsondata', default=__file__+".json", help='File containing the layer data in json format (default is {defaultjson}).'.format(defaultjson=__file__+".json"))
+
+args = parser.parse_args()
+
+with open(args.jsondata) as f:
+ json_f = json.load(f)
+
+supported_versions = ["1.0"]
+if json_f["version"] not in supported_versions:
+ raise Exception("File {} has version {}, which is not in supported versions: {}".format(args.jsondata, json_f["version"], supported_versions))
+
+_do_checkout(vars(args), json_f)
diff --git a/scripts/oe-setup-vscode b/scripts/oe-setup-vscode
new file mode 100755
index 0000000000..b8642780d5
--- /dev/null
+++ b/scripts/oe-setup-vscode
@@ -0,0 +1,93 @@
+#!/bin/sh
+
+usage() {
+ echo "$0 <OEINIT> <BUILDDIR>"
+ echo " OEINIT: path to directory where the .vscode folder is"
+ echo " BUILDDIR: directory passed to the oe-init-setup-env script"
+}
+
+if [ "$#" -ne 2 ]; then
+ usage
+ exit 1
+fi
+
+OEINIT=$(readlink -f "$1")
+BUILDDIR=$(readlink -f "$2")
+VSCODEDIR=$OEINIT/.vscode
+
+if [ ! -d "$OEINIT" ] || [ ! -d "$BUILDDIR" ]; then
+ echo "$OEINIT and/or $BUILDDIR directories are not present."
+ exit 1
+fi
+
+VSCODE_SETTINGS=$VSCODEDIR/settings.json
+ws_builddir="$(echo "$BUILDDIR" | sed -e "s|$OEINIT|\${workspaceFolder}|g")"
+
+# If BUILDDIR is in scope of VSCode ensure VSCode does not try to index the build folder.
+# This would lead to a busy CPU and finally to an OOM exception.
+mkdir -p "$VSCODEDIR"
+cat <<EOMsettings > "$VSCODE_SETTINGS"
+{
+ "bitbake.pathToBitbakeFolder": "\${workspaceFolder}/bitbake",
+ "bitbake.pathToEnvScript": "\${workspaceFolder}/oe-init-build-env",
+ "bitbake.pathToBuildFolder": "$ws_builddir",
+ "bitbake.commandWrapper": "",
+ "bitbake.workingDirectory": "\${workspaceFolder}",
+ "files.exclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "files.watcherExclude": {
+ "**/.git/**": true,
+ "**/_build/**": true,
+ "**/buildhistory/**": true,
+ "**/cache/**": true,
+ "**/downloads/**": true,
+ "**/node_modules/**": true,
+ "**/oe-logs/**": true,
+ "**/oe-workdir/**": true,
+ "**/sstate-cache/**": true,
+ "**/tmp*/**": true,
+ "**/workspace/attic/**": true,
+ "**/workspace/sources/**": true
+ },
+ "python.analysis.exclude": [
+ "**/_build/**",
+ "**/.git/**",
+ "**/buildhistory/**",
+ "**/cache/**",
+ "**/downloads/**",
+ "**/node_modules/**",
+ "**/oe-logs/**",
+ "**/oe-workdir/**",
+ "**/sstate-cache/**",
+ "**/tmp*/**",
+ "**/workspace/attic/**",
+ "**/workspace/sources/**"
+ ]
+}
+EOMsettings
+
+
+# Ask the user if the yocto-bitbake extension should be installed
+VSCODE_EXTENSIONS=$VSCODEDIR/extensions.json
+cat <<EOMextensions > "$VSCODE_EXTENSIONS"
+{
+ "recommendations": [
+ "yocto-project.yocto-bitbake"
+ ]
+}
+EOMextensions
+
+echo "You had no $VSCODEDIR configuration."
+echo "These configuration files have therefore been created for you."
diff --git a/scripts/oe-test b/scripts/oe-test
index 34d9012d14..55985b0b24 100755
--- a/scripts/oe-test
+++ b/scripts/oe-test
@@ -3,7 +3,9 @@
# OpenEmbedded test tool
#
# Copyright (C) 2016 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
diff --git a/scripts/oe-time-dd-test.sh b/scripts/oe-time-dd-test.sh
new file mode 100755
index 0000000000..81748b8c9e
--- /dev/null
+++ b/scripts/oe-time-dd-test.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+# oe-time-dd-test records how much time it takes to
+# write <count> number of kilobytes to the filesystem.
+# It also records the number of processes that are in
+# running (R), uninterruptible sleep (D) and interruptible
+# sleep (S) state from the output of "top" command.
+# The purporse of this script is to find which part of
+# the build system puts stress on the filesystem io and
+# log all the processes.
+usage() {
+ echo "$0 is used to detect i/o latency and runs commands to display host information."
+ echo "The following commands are run in order:"
+ echo "1) top -c -b -n1 -w 512"
+ echo "2) iostat -y -z -x 5 1"
+ echo "3) tail -30 tmp*/log/cooker/*/console-latest.log to gather cooker log."
+ echo " "
+ echo "Options:"
+ echo "-c | --count <amount> dd (transfer) <amount> KiB of data within specified timeout to detect latency."
+ echo " Must enable -t option."
+ echo "-t | --timeout <time> timeout in seconds for the <count> amount of data to be transferred."
+ echo "-l | --log-only run the commands without performing the data transfer."
+ echo "-h | --help show help"
+
+}
+
+run_cmds() {
+ echo "start: top output"
+ top -c -b -n1 -w 512
+ echo "end: top output"
+ echo "start: iostat"
+ iostat -y -z -x 5 1
+ echo "end: iostat"
+ echo "start: cooker log"
+ tail -30 tmp*/log/cooker/*/console-latest.log
+ echo "end: cooker log"
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 1
+fi
+
+re_c='^[0-9]+$'
+#re_t='^[0-9]+([.][0-9]+)?$'
+
+while [[ $# -gt 0 ]]; do
+ key="$1"
+
+ case $key in
+ -c|--count)
+ COUNT=$2
+ shift
+ shift
+ if ! [[ $COUNT =~ $re_c ]] || [[ $COUNT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -t|--timeout)
+ TIMEOUT=$2
+ shift
+ shift
+ if ! [[ $TIMEOUT =~ $re_c ]] || [[ $TIMEOUT -le 0 ]] ; then
+ usage
+ exit 1
+ fi
+ ;;
+ -l|--log-only)
+ LOG_ONLY="true"
+ shift
+ shift
+ ;;
+ -h|--help)
+ usage
+ exit 0
+ ;;
+ *)
+ usage
+ exit 1
+ ;;
+ esac
+done
+
+
+if [ "$LOG_ONLY" = "true" ] ; then
+ uptime
+ run_cmds
+ exit
+fi
+
+if [ -z ${TIMEOUT+x} ] || [ -z ${COUNT+x} ] ; then
+ usage
+ exit 1
+fi
+
+uptime
+echo "Timeout used: ${TIMEOUT}"
+timeout ${TIMEOUT} dd if=/dev/zero of=oe-time-dd-test.dat bs=1024 count=${COUNT} conv=fsync
+if [ $? -ne 0 ]; then
+ run_cmds
+fi
diff --git a/scripts/oe-trim-schemas b/scripts/oe-trim-schemas
index 7c199ef1df..e3b26e273e 100755
--- a/scripts/oe-trim-schemas
+++ b/scripts/oe-trim-schemas
@@ -1,4 +1,9 @@
#! /usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys
try:
diff --git a/scripts/oepydevshell-internal.py b/scripts/oepydevshell-internal.py
index 04621ae8a1..3bf7df1114 100755
--- a/scripts/oepydevshell-internal.py
+++ b/scripts/oepydevshell-internal.py
@@ -1,4 +1,9 @@
#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import os
import sys
@@ -40,7 +45,7 @@ nonblockingfd(pty)
nonblockingfd(sys.stdin)
-histfile = os.path.expanduser("~/.oedevpyshell-history")
+histfile = os.path.expanduser("~/.oepydevshell-history")
readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(histfile)
@@ -63,7 +68,9 @@ try:
(ready, _, _) = select.select([pty, sys.stdin], writers , [], 0)
try:
if pty in ready:
- i = i + pty.read().decode('utf-8')
+ readdata = pty.read()
+ if readdata:
+ i = i + readdata.decode('utf-8')
if i:
# Write a page at a time to avoid overflowing output
# d.keys() is a good way to do that
diff --git a/scripts/opkg-query-helper.py b/scripts/opkg-query-helper.py
index ce89491f60..084d9ef684 100755
--- a/scripts/opkg-query-helper.py
+++ b/scripts/opkg-query-helper.py
@@ -6,21 +6,8 @@
#
# Copyright 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-#
-
import sys
import fileinput
@@ -42,7 +29,7 @@ for arg in sys.argv[1:]:
args.append(arg)
# Regex for removing version specs after dependency items
-verregex = re.compile(' \([=<>]* [^ )]*\)')
+verregex = re.compile(r' \([=<>]* [^ )]*\)')
pkg = ""
ver = ""
diff --git a/scripts/patchtest b/scripts/patchtest
new file mode 100755
index 0000000000..0be7062dc2
--- /dev/null
+++ b/scripts/patchtest
@@ -0,0 +1,232 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import sys
+import os
+import unittest
+import logging
+import traceback
+import json
+
+# Include current path so test cases can see it
+sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)))
+
+# Include patchtest library
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '../meta/lib/patchtest'))
+
+from data import PatchTestInput
+from repo import PatchTestRepo
+
+import utils
+logger = utils.logger_create('patchtest')
+info = logger.info
+error = logger.error
+
+import repo
+
+def getResult(patch, mergepatch, logfile=None):
+
+ class PatchTestResult(unittest.TextTestResult):
+ """ Patchtest TextTestResult """
+ shouldStop = True
+ longMessage = False
+
+ success = 'PASS'
+ fail = 'FAIL'
+ skip = 'SKIP'
+
+ def startTestRun(self):
+ # let's create the repo already, it can be used later on
+ repoargs = {
+ 'repodir': PatchTestInput.repodir,
+ 'commit' : PatchTestInput.basecommit,
+ 'branch' : PatchTestInput.basebranch,
+ 'patch' : patch,
+ }
+
+ self.repo_error = False
+ self.test_error = False
+ self.test_failure = False
+
+ try:
+ self.repo = PatchTestInput.repo = PatchTestRepo(**repoargs)
+ except:
+ logger.error(traceback.print_exc())
+ self.repo_error = True
+ self.stop()
+ return
+
+ if mergepatch:
+ self.repo.merge()
+
+ def addError(self, test, err):
+ self.test_error = True
+ (ty, va, trace) = err
+ logger.error(traceback.print_exc())
+
+ def addFailure(self, test, err):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ self.test_failure = True
+ fail_str = '{}: {}: {} ({})'.format(self.fail,
+ test_description, json.loads(str(err[1]))["issue"],
+ test.id())
+ print(fail_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(fail_str + "\n")
+
+ def addSuccess(self, test):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ success_str = '{}: {} ({})'.format(self.success,
+ test_description, test.id())
+ print(success_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(success_str + "\n")
+
+ def addSkip(self, test, reason):
+ test_description = test.id().split('.')[-1].replace('_', ' ').replace("cve", "CVE").replace("signed off by",
+ "Signed-off-by").replace("upstream status",
+ "Upstream-Status").replace("non auh",
+ "non-AUH").replace("presence format", "presence")
+ skip_str = '{}: {}: {} ({})'.format(self.skip,
+ test_description, json.loads(str(reason))["issue"],
+ test.id())
+ print(skip_str)
+ if logfile:
+ with open(logfile, "a") as f:
+ f.write(skip_str + "\n")
+
+ def stopTestRun(self):
+
+ # in case there was an error on repo object creation, just return
+ if self.repo_error:
+ return
+
+ self.repo.clean()
+
+ return PatchTestResult
+
+def _runner(resultklass, prefix=None):
+ # load test with the corresponding prefix
+ loader = unittest.TestLoader()
+ if prefix:
+ loader.testMethodPrefix = prefix
+
+ # create the suite with discovered tests and the corresponding runner
+ suite = loader.discover(start_dir=PatchTestInput.testdir, pattern=PatchTestInput.pattern, top_level_dir=PatchTestInput.topdir)
+ ntc = suite.countTestCases()
+
+ # if there are no test cases, just quit
+ if not ntc:
+ return 2
+ runner = unittest.TextTestRunner(resultclass=resultklass, verbosity=0)
+
+ try:
+ result = runner.run(suite)
+ except:
+ logger.error(traceback.print_exc())
+ logger.error('patchtest: something went wrong')
+ return 1
+ if result.test_failure or result.test_error:
+ return 1
+
+ return 0
+
+def run(patch, logfile=None):
+ """ Load, setup and run pre and post-merge tests """
+ # Get the result class and install the control-c handler
+ unittest.installHandler()
+
+ # run pre-merge tests, meaning those methods with 'pretest' as prefix
+ premerge_resultklass = getResult(patch, False, logfile)
+ premerge_result = _runner(premerge_resultklass, 'pretest')
+
+ # run post-merge tests, meaning those methods with 'test' as prefix
+ postmerge_resultklass = getResult(patch, True, logfile)
+ postmerge_result = _runner(postmerge_resultklass, 'test')
+
+ print('----------------------------------------------------------------------\n')
+ if premerge_result == 2 and postmerge_result == 2:
+ logger.error('patchtest: No test cases found - did you specify the correct suite directory?')
+ if premerge_result == 1 or postmerge_result == 1:
+ logger.error('WARNING: patchtest: At least one patchtest caused a failure or an error - please check https://wiki.yoctoproject.org/wiki/Patchtest for further guidance')
+ else:
+ logger.info('OK: patchtest: All patchtests passed')
+ print('----------------------------------------------------------------------\n')
+ return premerge_result or postmerge_result
+
+def main():
+ tmp_patch = False
+ patch_path = PatchTestInput.patch_path
+ log_results = PatchTestInput.log_results
+ log_path = None
+ patch_list = None
+
+ git_status = os.popen("(cd %s && git status)" % PatchTestInput.repodir).read()
+ status_matches = ["Changes not staged for commit", "Changes to be committed"]
+ if any([match in git_status for match in status_matches]):
+ logger.error("patchtest: there are uncommitted changes in the target repo that would be overwritten. Please commit or restore them before running patchtest")
+ return 1
+
+ if os.path.isdir(patch_path):
+ patch_list = [os.path.join(patch_path, filename) for filename in sorted(os.listdir(patch_path))]
+ else:
+ patch_list = [patch_path]
+
+ for patch in patch_list:
+ if os.path.getsize(patch) == 0:
+ logger.error('patchtest: patch is empty')
+ return 1
+
+ logger.info('Testing patch %s' % patch)
+
+ if log_results:
+ log_path = patch + ".testresult"
+ with open(log_path, "a") as f:
+ f.write("Patchtest results for patch '%s':\n\n" % patch)
+
+ try:
+ if log_path:
+ run(patch, log_path)
+ else:
+ run(patch)
+ finally:
+ if tmp_patch:
+ os.remove(patch)
+
+if __name__ == '__main__':
+ ret = 1
+
+ # Parse the command line arguments and store it on the PatchTestInput namespace
+ PatchTestInput.set_namespace()
+
+ # set debugging level
+ if PatchTestInput.debug:
+ logger.setLevel(logging.DEBUG)
+
+ # if topdir not define, default it to testdir
+ if not PatchTestInput.topdir:
+ PatchTestInput.topdir = PatchTestInput.testdir
+
+ try:
+ ret = main()
+ except Exception:
+ import traceback
+ traceback.print_exc(5)
+
+ sys.exit(ret)
diff --git a/scripts/patchtest-get-branch b/scripts/patchtest-get-branch
new file mode 100755
index 0000000000..c6e242f8b6
--- /dev/null
+++ b/scripts/patchtest-get-branch
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+
+# Get target branch from the corresponding mbox
+#
+# NOTE: this script was based on patches coming to the openembedded-core
+# where target branch is defined inside brackets as subject prefix
+# i.e. [master], [rocko], etc.
+#
+# Copyright (C) 2016 Intel Corporation
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import mailbox
+import argparse
+import re
+import git
+
+re_prefix = re.compile(r"(\[.*\])", re.DOTALL)
+
+def get_branch(filepath_repo, filepath_mbox, default_branch):
+ branch = None
+
+ # get all remotes branches
+ gitbranches = git.Git(filepath_repo).branch('-a').splitlines()
+
+ # from gitbranches, just get the names
+ branches = [b.split('/')[-1] for b in gitbranches]
+
+ subject = ' '.join(mailbox.mbox(filepath_mbox)[0]['subject'].splitlines())
+
+ # we expect that patches will have somewhere between one and three
+ # consecutive sets of square brackets with tokens inside, e.g.:
+ # 1. [PATCH]
+ # 2. [OE-core][PATCH]
+ # 3. [OE-core][kirkstone][PATCH]
+ # Some of them may also be part of a series, in which case the PATCH
+ # token will be formatted like:
+ # [PATCH 1/4]
+ # or they will be revisions to previous patches, where it will be:
+ # [PATCH v2]
+ # Or they may contain both:
+ # [PATCH v2 3/4]
+ # In any case, we want mprefix to contain all of these tokens so
+ # that we can search for branch names within them.
+ mprefix = re.findall(r'\[.*?\]', subject)
+ found_branch = None
+ if mprefix:
+ # Iterate over the tokens and compare against the branch list to
+ # figure out which one the patch is targeting
+ for token in mprefix:
+ stripped = token.lower().strip('[]')
+ if default_branch in stripped:
+ found_branch = default_branch
+ break
+ else:
+ for branch in branches:
+ # ignore branches named "core"
+ if branch != "core" and stripped.rfind(branch) != -1:
+ found_branch = token.split(' ')[0].strip('[]')
+ break
+
+ # if there's no mprefix content or no known branches were found in
+ # the tokens, assume the target is master
+ if found_branch is None:
+ found_branch = "master"
+
+ return (subject, found_branch)
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('repo', metavar='REPO', help='Main repository')
+ parser.add_argument('mbox', metavar='MBOX', help='mbox filename')
+ parser.add_argument('--default-branch', metavar='DEFAULT_BRANCH', default='master', help='Use this branch if no one is found')
+ parser.add_argument('--separator', '-s', metavar='SEPARATOR', default=' ', help='Char separator for output data')
+ args = parser.parse_args()
+
+ subject, branch = get_branch(args.repo, args.mbox, args.default_branch)
+ print("branch: %s" % branch)
+
diff --git a/scripts/patchtest-get-series b/scripts/patchtest-get-series
new file mode 100755
index 0000000000..908442089f
--- /dev/null
+++ b/scripts/patchtest-get-series
@@ -0,0 +1,115 @@
+#!/bin/bash -e
+#
+# get-latest-series: Download latest patch series from Patchwork
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# the interval into the past which we want to check for new series, in minutes
+INTERVAL_MINUTES=30
+
+# Maximum number of series to retrieve. the Patchwork API can support up to 250
+# at once
+SERIES_LIMIT=250
+
+# Location to save patches
+DOWNLOAD_PATH="."
+
+# Name of the file to use/check as a log of previously-tested series IDs
+SERIES_TEST_LOG=".series_test.log"
+
+# Patchwork project to pull series patches from
+PROJECT="oe-core"
+
+# The Patchwork server to pull from
+SERVER="https://patchwork.yoctoproject.org/api/1.2/"
+
+help()
+{
+ echo "Usage: get-latest-series [ -i | --interval MINUTES ]
+ [ -d | --directory DIRECTORY ]
+ [ -l | --limit COUNT ]
+ [ -h | --help ]
+ [ -t | --tested-series LOGFILE]
+ [ -p | --project PROJECT ]
+ [ -s | --server SERVER ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -i|--interval)
+ INTERVAL_MINUTES=$2
+ shift 2
+ ;;
+ -l|--limit)
+ SERIES_LIMIT=$2
+ shift 2
+ ;;
+ -d|--directory)
+ DOWNLOAD_PATH=$2
+ shift 2
+ ;;
+ -p|--project)
+ PROJECT=$2
+ shift 2
+ ;;
+ -s|--server)
+ SERVER=$2
+ shift 2
+ ;;
+ -t|--tested-series)
+ SERIES_TEST_LOG=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# The time this script is running at
+START_TIME=$(date --date "now" +"%Y-%m-%dT%H:%M:%S")
+
+# the corresponding timestamp we want to check against for new patch series
+SERIES_CHECK_LIMIT=$(date --date "now - ${INTERVAL_MINUTES} minutes" +"%Y-%m-%dT%H:%M:%S")
+
+echo "Start time is $START_TIME"
+echo "Series check limit is $SERIES_CHECK_LIMIT"
+
+# Create DOWNLOAD_PATH if it doesn't exist
+if [ ! -d "$DOWNLOAD_PATH" ]; then
+ mkdir "${DOWNLOAD_PATH}"
+fi
+
+# Create SERIES_TEST_LOG if it doesn't exist
+if [ ! -f "$SERIES_TEST_LOG" ]; then
+ touch "${SERIES_TEST_LOG}"
+fi
+
+# Retrieve a list of series IDs from the 'git-pw series list' output. The API
+# supports a maximum of 250 results, so make sure we allow that when required
+SERIES_LIST=$(git-pw --project "${PROJECT}" --server "${SERVER}" series list --since "${SERIES_CHECK_LIMIT}" --limit "${SERIES_LIMIT}" | awk '{print $2}' | xargs | sed -e 's/[^0-9 ]//g')
+
+if [ -z "$SERIES_LIST" ]; then
+ echo "No new series for project ${PROJECT} since ${SERIES_CHECK_LIMIT}"
+ exit 0
+fi
+
+# Check each series ID
+for SERIES in $SERIES_LIST; do
+ # Download the series only if it's not found in the SERIES_TEST_LOG
+ if ! grep -w --quiet "${SERIES}" "${SERIES_TEST_LOG}"; then
+ echo "Downloading $SERIES..."
+ git-pw series download --separate "${SERIES}" "${DOWNLOAD_PATH}"
+ echo "${SERIES}" >> "${SERIES_TEST_LOG}"
+ else
+ echo "Already tested ${SERIES}. Skipping..."
+ fi
+done
diff --git a/scripts/patchtest-send-results b/scripts/patchtest-send-results
new file mode 100755
index 0000000000..8a3dadbd11
--- /dev/null
+++ b/scripts/patchtest-send-results
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# patchtest: execute all unittest test cases discovered for a single patch
+# Note that this script is currently under development and has been
+# hard-coded with default values for testing purposes. This script
+# should not be used without changing the default recipient, at minimum.
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import argparse
+import boto3
+import configparser
+import mailbox
+import os
+import re
+import sys
+
+greeting = """Thank you for your submission. Patchtest identified one
+or more issues with the patch. Please see the log below for
+more information:\n\n---\n"""
+
+suggestions = """\n---\n\nPlease address the issues identified and
+submit a new revision of the patch, or alternatively, reply to this
+email with an explanation of why the patch should be accepted. If you
+believe these results are due to an error in patchtest, please submit a
+bug at https://bugzilla.yoctoproject.org/ (use the 'Patchtest' category
+under 'Yocto Project Subprojects'). For more information on specific
+failures, see: https://wiki.yoctoproject.org/wiki/Patchtest. Thank
+you!"""
+
+def has_a_failed_test(raw_results):
+ return any(raw_result.split(':')[0] == "FAIL" for raw_result in raw_results.splitlines())
+
+parser = argparse.ArgumentParser(description="Send patchtest results to a submitter for a given patch")
+parser.add_argument("-p", "--patch", dest="patch", required=True, help="The patch file to summarize")
+parser.add_argument("-d", "--debug", dest="debug", required=False, action='store_true', help="Print raw email headers and content, but don't actually send it")
+args = parser.parse_args()
+
+if not os.path.exists(args.patch):
+ print(f"Patch '{args.patch}' not found - did you provide the right path?")
+ sys.exit(1)
+elif not os.path.exists(args.patch + ".testresult"):
+ print(f"Found patch '{args.patch}' but '{args.patch}.testresult' was not present. Have you run patchtest on the patch?")
+ sys.exit(1)
+
+result_file = args.patch + ".testresult"
+testresult = None
+
+with open(result_file, "r") as f:
+ testresult = f.read()
+
+# we know these patch files will only contain a single patch, so only
+# worry about the first element for getting the subject
+mbox = mailbox.mbox(args.patch)
+mbox_subject = mbox[0]['subject']
+subject_line = f"Patchtest results for {mbox_subject}"
+
+# extract the submitter email address and use it as the reply address
+# for the results
+reply_address = mbox[0]['from']
+
+# extract the message ID and use that as the in-reply-to address
+# TODO: This will need to change again when patchtest can handle a whole
+# series at once
+in_reply_to = mbox[0]['Message-ID']
+
+# the address the results email is sent from
+from_address = "patchtest@automation.yoctoproject.org"
+
+# mailing list to CC
+cc_address = "openembedded-core@lists.openembedded.org"
+
+if has_a_failed_test(testresult):
+ reply_contents = None
+ if len(max(open(result_file, 'r'), key=len)) > 220:
+ warning = "Tests failed for the patch, but the results log could not be processed due to excessive result line length."
+ reply_contents = greeting + warning + suggestions
+ else:
+ reply_contents = greeting + testresult + suggestions
+
+ ses_client = boto3.client('ses', region_name='us-west-2')
+
+ # Construct the headers for the email. We only want to reply
+ # directly to the tested patch, so make In-Reply-To and References
+ # the same value.
+ raw_data = 'From: ' + from_address + '\nTo: ' + reply_address + \
+ '\nCC: ' + cc_address + '\nSubject:' + subject_line + \
+ '\nIn-Reply-To:' + in_reply_to + \
+ '\nReferences:' + in_reply_to + \
+ '\nMIME-Version: 1.0" + \
+ "\nContent-type: Multipart/Mixed;boundary="NextPart"\n\n--NextPart\nContent-Type: text/plain\n\n' + \
+ reply_contents + '\n\n--NextPart'
+
+ if args.debug:
+ print(f"RawMessage: \n\n{raw_data}")
+ else:
+ response = ses_client.send_raw_email(
+ Source="patchtest@automation.yoctoproject.org",
+ RawMessage={
+ "Data": raw_data,
+ },
+ )
+
+else:
+ print(f"No failures identified for {args.patch}.")
diff --git a/scripts/patchtest-setup-sharedir b/scripts/patchtest-setup-sharedir
new file mode 100755
index 0000000000..277677e527
--- /dev/null
+++ b/scripts/patchtest-setup-sharedir
@@ -0,0 +1,83 @@
+#!/bin/bash -e
+#
+# patchtest-setup-sharedir: Setup a directory for storing mboxes and
+# repositories to be shared with the guest machine, including updates to
+# the repos if the directory already exists
+#
+# Copyright (C) 2023 BayLibre Inc.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+# poky repository
+POKY_REPO="https://git.yoctoproject.org/poky"
+
+# patchtest repository
+PATCHTEST_REPO="https://git.yoctoproject.org/patchtest"
+
+# the name of the directory
+SHAREDIR="patchtest_share"
+
+help()
+{
+ echo "Usage: patchtest-setup-sharedir [ -d | --directory SHAREDIR ]
+ [ -p | --patchtest PATCHTEST_REPO ]
+ [ -y | --poky POKY_REPO ]"
+ exit 2
+}
+
+while [ "$1" != "" ]; do
+ case $1 in
+ -d|--directory)
+ SHAREDIR=$2
+ shift 2
+ ;;
+ -p|--patchtest)
+ PATCHTEST_REPO=$2
+ shift 2
+ ;;
+ -y|--poky)
+ POKY_REPO=$2
+ shift 2
+ ;;
+ -h|--help)
+ help
+ ;;
+ *)
+ echo "Unknown option $1"
+ help
+ ;;
+ esac
+done
+
+# define MBOX_DIR where the patch series will be stored by
+# get-latest-series
+MBOX_DIR="${SHAREDIR}/mboxes"
+
+# Create SHAREDIR if it doesn't exist
+if [ ! -d "$SHAREDIR" ]; then
+ mkdir -p "${SHAREDIR}"
+ echo "Created ${SHAREDIR}"
+fi
+
+# Create the mboxes directory if it doesn't exist
+if [ ! -d "$MBOX_DIR" ]; then
+ mkdir -p "${MBOX_DIR}"
+ echo "Created ${MBOX_DIR}"
+fi
+
+# clone poky if it's not already present; otherwise, update it
+if [ ! -d "$POKY_REPO" ]; then
+ BASENAME=$(basename ${POKY_REPO})
+ git clone "${POKY_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
+
+# clone patchtest if it's not already present; otherwise, update it
+if [ ! -d "$PATCHTEST_REPO" ]; then
+ BASENAME=$(basename ${PATCHTEST_REPO})
+ git clone "${PATCHTEST_REPO}" "${SHAREDIR}/${BASENAME}"
+else
+ (cd "${SHAREDIR}/$BASENAME" && git pull)
+fi
diff --git a/scripts/patchtest.README b/scripts/patchtest.README
new file mode 100644
index 0000000000..76b5fcdb6d
--- /dev/null
+++ b/scripts/patchtest.README
@@ -0,0 +1,153 @@
+# Patchtest
+
+## Introduction
+
+Patchtest is a test framework for community patches based on the standard
+unittest python module. As input, it needs tree elements to work properly:
+a patch in mbox format (either created with `git format-patch` or fetched
+from 'patchwork'), a test suite and a target repository.
+
+The first test suite intended to be used with patchtest is found in the
+openembedded-core repository [1] targeted for patches that get into the
+openembedded-core mailing list [2]. This suite is also intended as a
+baseline for development of similar suites for other layers as needed.
+
+Patchtest can either run on a host or a guest machine, depending on which
+environment the execution needs to be done. If you plan to test your own patches
+(a good practice before these are sent to the mailing list), the easiest way is
+to install and execute on your local host; in the other hand, if automatic
+testing is intended, the guest method is strongly recommended. The guest
+method requires the use of the patchtest layer, in addition to the tools
+available in oe-core: https://git.yoctoproject.org/patchtest/
+
+## Installation
+
+As a tool for use with the Yocto Project, the [quick start guide](https://docs.yoctoproject.org/brief-yoctoprojectqs/index.html)
+contains the necessary prerequisites for a basic project. In addition,
+patchtest relies on the following Python modules:
+
+- boto3 (for sending automated results emails only)
+- git-pw>=2.5.0
+- jinja2
+- pylint
+- pyparsing>=3.0.9
+- unidiff
+
+These can be installed by running `pip install -r
+meta/lib/patchtest/requirements.txt`. Note that git-pw is not
+automatically added to the user's PATH; by default, it is installed at
+~/.local/bin/git-pw.
+
+For git-pw (and therefore scripts such as patchtest-get--series) to work, you need
+to provide a Patchwork instance in your user's .gitconfig, like so (the project
+can be specified using the --project argument):
+
+ git config --global pw.server "https://patchwork.yoctoproject.org/api/1.2/"
+
+To work with patchtest, you should have the following repositories cloned:
+
+1. https://git.openembedded.org/openembedded-core/ (or https://git.yoctoproject.org/poky/)
+2. https://git.openembedded.org/bitbake/ (if not using poky)
+3. https://git.yoctoproject.org/patchtest (if using guest mode)
+
+## Usage
+
+### Obtaining Patches
+
+Patch files can be obtained directly from cloned repositories using `git
+format-patch -N` (where N is the number of patches starting from HEAD to
+generate). git-pw can also be used with filters for users, patch/series IDs,
+and timeboxes if specific patches are desired. For more information, see the
+git-pw [documentation](https://patchwork.readthedocs.io/projects/git-pw/en/latest/).
+
+Alternatively, `scripts/patchtest-get-series` can be used to pull mbox files from
+the Patchwork instance configured previously in .gitconfig. It uses a log file
+called ".series_test.log" to store and compare series IDs so that the same
+versions of a patch are not tested multiple times unintentionally. By default,
+it will pull up to five patch series from the last 30 minutes using oe-core as
+the target project, but these parameters can be configured using the `--limit`,
+`--interval`, and `--project` arguments respectively. For more information, run
+`patchtest-get-series -h`.
+
+### Host Mode
+
+To run patchtest on the host, do the following:
+
+1. In openembedded-core/poky, do `source oe-init-build-env`
+2. Generate patch files from the target repository by doing `git-format patch -N`,
+ where N is the number of patches starting at HEAD, or by using git-pw
+ or patchtest-get-series
+3. Run patchtest on a patch file by doing the following:
+
+ patchtest --patch /path/to/patch/file
+
+ or, if you have stored the patch files in a directory, do:
+
+ patchtest --directory /path/to/patch/directory
+
+ For example, to test `master-gcc-Fix--fstack-protector-issue-on-aarch64.patch` against the oe-core test suite:
+
+ patchtest --patch master-gcc-Fix--fstack-protector-issue-on-aarch64.patch
+
+ If you want to use a different test suite or target repository, you can use the --testdir and --repodir flags:
+
+ patchtest --patch /path/to/patch/file --repodir /path/to/repo --testdir /path/to/test/dir
+
+### Guest Mode
+
+Patchtest's guest mode has been refactored to more closely mirror the
+typical Yocto Project image build workflow, but there are still some key
+differences to keep in mind. The primary objective is to provide a level
+of isolation from the host when testing patches pulled automatically
+from the mailing lists. When executed this way, the test process is
+essentially running random code from the internet and could be
+catastrophic if malicious bits or even poorly-handled edge cases aren't
+protected against. In order to use this mode, the
+https://git.yoctoproject.org/patchtest/ repository must be cloned and
+the meta-patchtest layer added to bblayers.conf.
+
+The general flow of guest mode is:
+
+1. Run patchtest-setup-sharedir --directory <dirname> to create a
+ directory for mounting
+2. Collect patches via patchtest-get-series (or other manual step) into the
+ <dirname>/mboxes path
+3. Ensure that a user with ID 1200 has appropriate read/write
+ permissions to <dirname> and <dirname>/mboxes, so that the
+ "patchtest" user in the core-image-patchtest image can function
+4. Build the core-image-patchtest image
+5. Run the core-image-patchtest image with the mounted sharedir, like
+ so:
+ `runqemu kvm nographic qemuparams="-snapshot -fsdev
+ local,id=test_mount,path=/workspace/yocto/poky/build/patchtestdir,security_model=mapped
+ -device virtio-9p-pci,fsdev=test_mount,mount_tag=test_mount -smp 4 -m
+ 2048"`
+
+Patchtest runs as an initscript for the core-image-patchtest image and
+shuts down after completion, so there is no input required from a user
+during operation. Unlike in host mode, the guest is designed to
+automatically generate test result files, in the same directory as the
+targeted patch files but with .testresult as an extension. These contain
+the entire output of the patchtest run for each respective pass,
+including the PASS, FAIL, and SKIP indicators for each test run.
+
+## Contributing
+
+The yocto mailing list (openembedded-core@lists.openembedded.org) is used for questions,
+comments and patch review. It is subscriber only, so please register before
+posting.
+
+When sending single patches, please use something like:
+
+ git send-email -M -1 --to=openembedded-core@lists.openembedded.org --subject-prefix=OE-core][PATCH
+
+## Maintenance
+-----------
+
+Maintainers:
+ Trevor Gamblin <tgamblin@baylibre.com>
+
+## Links
+-----
+[1] https://git.openembedded.org/openembedded-core/
+[2] https://www.yoctoproject.org/community/mailing-lists/
diff --git a/scripts/postinst-intercepts/delay_to_first_boot b/scripts/postinst-intercepts/delay_to_first_boot
index ecdbef95dd..fa8e1caaf5 100644
--- a/scripts/postinst-intercepts/delay_to_first_boot
+++ b/scripts/postinst-intercepts/delay_to_first_boot
@@ -1,2 +1,6 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
exit 1
diff --git a/scripts/postinst-intercepts/postinst_intercept b/scripts/postinst-intercepts/postinst_intercept
index b18e806d43..b91974c885 100755
--- a/scripts/postinst-intercepts/postinst_intercept
+++ b/scripts/postinst-intercepts/postinst_intercept
@@ -1,5 +1,7 @@
#!/bin/sh
#
+# SPDX-License-Identifier: MIT
+#
# This script is called from inside postinstall scriptlets at do_rootfs time. It
# actually adds, at the end, the list of packages for which the intercept script
# is valid. Also, if one wants to pass any variables to the intercept script from
diff --git a/scripts/postinst-intercepts/update_desktop_database b/scripts/postinst-intercepts/update_desktop_database
new file mode 100644
index 0000000000..8903b496f3
--- /dev/null
+++ b/scripts/postinst-intercepts/update_desktop_database
@@ -0,0 +1,8 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime-xdg.bbclass
+
+update-desktop-database $D${desktop_dir}
+
diff --git a/scripts/postinst-intercepts/update_font_cache b/scripts/postinst-intercepts/update_font_cache
index 20e9048adf..900db042d6 100644
--- a/scripts/postinst-intercepts/update_font_cache
+++ b/scripts/postinst-intercepts/update_font_cache
@@ -1,6 +1,13 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${bindir}/fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+rm -f $D${fontconfigcachedir}/CACHEDIR.TAG
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D -E ${fontconfigcacheenv} $D${libexecdir}/${binprefix}fc-cache --sysroot=$D --system-only ${fontconfigcacheparams}
+
chown -R root:root $D${fontconfigcachedir}
+find $D -type f -name .uuid -exec chown root:root '{}' +
diff --git a/scripts/postinst-intercepts/update_gio_module_cache b/scripts/postinst-intercepts/update_gio_module_cache
index d1f0140947..c87fa85db9 100644
--- a/scripts/postinst-intercepts/update_gio_module_cache
+++ b/scripts/postinst-intercepts/update_gio_module_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/scripts/postinst-intercepts/update_gtk_icon_cache b/scripts/postinst-intercepts/update_gtk_icon_cache
new file mode 100644
index 0000000000..a92bd840c6
--- /dev/null
+++ b/scripts/postinst-intercepts/update_gtk_icon_cache
@@ -0,0 +1,21 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for gtk-icon-cache.bbclass
+
+set -e
+
+# Update native pixbuf loaders
+$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
+
+for icondir in $D/usr/share/icons/*/ ; do
+ if [ -d $icondir ] ; then
+ for gtkuic_cmd in gtk-update-icon-cache gtk4-update-icon-cache ; do
+ if [ -n "$(which $gtkuic_cmd)" ]; then
+ $gtkuic_cmd -fqt $icondir
+ fi
+ done
+ fi
+done
+
diff --git a/scripts/postinst-intercepts/update_gtk_immodules_cache b/scripts/postinst-intercepts/update_gtk_immodules_cache
index d85d3622c2..9f07ccca6b 100644
--- a/scripts/postinst-intercepts/update_gtk_immodules_cache
+++ b/scripts/postinst-intercepts/update_gtk_immodules_cache
@@ -1,15 +1,18 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
-if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
- PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D/${bindir}/gtk-query-immodules-2.0 \
+if [ -x $D${libexecdir}/${binprefix}gtk-query-immodules-2.0 ]; then
+ PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}gtk-query-immodules-2.0 \
> $D${libdir}/gtk-2.0/2.10.0/immodules.cache &&
sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
chown root:root $D${libdir}/gtk-2.0/2.10.0/immodules.cache
fi
-if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
- PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D/${bindir}/gtk-query-immodules-3.0 \
+if [ -x $D${libexecdir}/${binprefix}gtk-query-immodules-3.0 ]; then
+ PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${libexecdir}/${binprefix}gtk-query-immodules-3.0 \
> $D${libdir}/gtk-3.0/3.0.0/immodules.cache &&
sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
chown root:root $D${libdir}/gtk-3.0/3.0.0/immodules.cache
diff --git a/scripts/postinst-intercepts/update_icon_cache b/scripts/postinst-intercepts/update_icon_cache
deleted file mode 100644
index 9cf2a72a0c..0000000000
--- a/scripts/postinst-intercepts/update_icon_cache
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-set -e
-
-# update native pixbuf loaders
-$STAGING_DIR_NATIVE/${libdir_native}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders --update-cache
-
-for icondir in $D/usr/share/icons/*/ ; do
- if [ -d $icondir ] ; then
- gtk-update-icon-cache -fqt $icondir
- fi
-done
-
diff --git a/scripts/postinst-intercepts/update_mandb b/scripts/postinst-intercepts/update_mandb
new file mode 100644
index 0000000000..f91bafdb11
--- /dev/null
+++ b/scripts/postinst-intercepts/update_mandb
@@ -0,0 +1,18 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -eu
+
+# Create a temporary man_db.conf with paths to the rootfs, as mandb needs absolute paths
+CONFIG=$(mktemp --tmpdir update-mandb.XXXXX)
+sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf > $CONFIG
+
+mkdir -p $D${localstatedir}/cache/man/
+
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${bindir}/mandb --config-file $CONFIG --create
+
+rm -f $CONFIG
+
+chown -R man:man $D${localstatedir}/cache/man/
diff --git a/scripts/postinst-intercepts/update_mime_database b/scripts/postinst-intercepts/update_mime_database
new file mode 100644
index 0000000000..582d1e162c
--- /dev/null
+++ b/scripts/postinst-intercepts/update_mime_database
@@ -0,0 +1,9 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+# Post-install intercept for mime.bbclass
+
+echo "Updating MIME database... this may take a while."
+update-mime-database $D${mimedir}
+
diff --git a/scripts/postinst-intercepts/update_pixbuf_cache b/scripts/postinst-intercepts/update_pixbuf_cache
index ebea07c356..ea12814474 100644
--- a/scripts/postinst-intercepts/update_pixbuf_cache
+++ b/scripts/postinst-intercepts/update_pixbuf_cache
@@ -1,4 +1,7 @@
#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
set -e
diff --git a/scripts/postinst-intercepts/update_udev_hwdb b/scripts/postinst-intercepts/update_udev_hwdb
new file mode 100644
index 0000000000..8b3f5de791
--- /dev/null
+++ b/scripts/postinst-intercepts/update_udev_hwdb
@@ -0,0 +1,25 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: MIT
+#
+
+set -e
+
+case "${PREFERRED_PROVIDER_udev}" in
+ systemd)
+ UDEV_EXTRA_ARGS="--usr"
+ UDEVLIBDIR="${rootlibexecdir}"
+ UDEVADM="${base_bindir}/udevadm"
+ ;;
+
+ *)
+ UDEV_EXTRA_ARGS=""
+ UDEVLIBDIR="${sysconfdir}"
+ UDEVADM="${bindir}/udevadm"
+ ;;
+esac
+
+rm -f $D${UDEVLIBDIR}/udev/hwdb.bin
+PSEUDO_UNLOAD=1 ${binprefix}qemuwrapper -L $D $D${UDEVADM} hwdb --update --root $D ${UDEV_EXTRA_ARGS} ||
+ PSEUDO_UNLOAD=1 qemuwrapper -L $D $D${UDEVADM} hwdb --update --root $D ${UDEV_EXTRA_ARGS}
+chown root:root $D${UDEVLIBDIR}/udev/hwdb.bin
diff --git a/scripts/pybootchartgui/pybootchartgui.py b/scripts/pybootchartgui/pybootchartgui.py
index 7ce1a5be40..1c4062b42c 100755
--- a/scripts/pybootchartgui/pybootchartgui.py
+++ b/scripts/pybootchartgui/pybootchartgui.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
#
# This file is part of pybootchartgui.
diff --git a/scripts/pybootchartgui/pybootchartgui/draw.py b/scripts/pybootchartgui/pybootchartgui/draw.py
index 201ce4577f..c6e67833ab 100644
--- a/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -19,22 +19,23 @@ import math
import re
import random
import colorsys
+import functools
from operator import itemgetter
class RenderOptions:
- def __init__(self, app_options):
- # should we render a cumulative CPU time chart
- self.cumulative = True
- self.charts = True
- self.kernel_only = False
- self.app_options = app_options
+ def __init__(self, app_options):
+ # should we render a cumulative CPU time chart
+ self.cumulative = True
+ self.charts = True
+ self.kernel_only = False
+ self.app_options = app_options
- def proc_tree (self, trace):
- if self.kernel_only:
- return trace.kernel_tree
- else:
- return trace.proc_tree
+ def proc_tree (self, trace):
+ if self.kernel_only:
+ return trace.kernel_tree
+ else:
+ return trace.proc_tree
# Process tree background color.
BACK_COLOR = (1.0, 1.0, 1.0, 1.0)
@@ -79,6 +80,22 @@ MEM_BUFFERS_COLOR = (0.4, 0.4, 0.4, 0.3)
# Swap color
MEM_SWAP_COLOR = DISK_TPUT_COLOR
+# avg10 CPU pressure color
+CPU_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total CPU pressure color
+CPU_PRESSURE_TOTAL_COLOR = CPU_COLOR
+# avg10 IO pressure color
+IO_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total IO pressure color
+IO_PRESSURE_TOTAL_COLOR = IO_COLOR
+# avg10 memory pressure color
+MEM_PRESSURE_AVG10_COLOR = (0.0, 0.0, 0.0, 1.0)
+# delta total memory pressure color
+MEM_PRESSURE_TOTAL_COLOR = DISK_TPUT_COLOR
+
+
+
+
# Process border color.
PROC_BORDER_COLOR = (0.71, 0.71, 0.71, 1.0)
# Waiting process color.
@@ -136,11 +153,11 @@ TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
# Distinct colors used for different disk volumnes.
# If we have more volumns, colors get re-used.
VOLUME_COLORS = [
- (1.0, 1.0, 0.00, 1.0),
- (0.0, 1.00, 0.00, 1.0),
- (1.0, 0.00, 1.00, 1.0),
- (0.0, 0.00, 1.00, 1.0),
- (0.0, 1.00, 1.00, 1.0),
+ (1.0, 1.0, 0.00, 1.0),
+ (0.0, 1.00, 0.00, 1.0),
+ (1.0, 0.00, 1.00, 1.0),
+ (0.0, 0.00, 1.00, 1.0),
+ (0.0, 1.00, 1.00, 1.0),
]
# Process states
@@ -152,7 +169,7 @@ STATE_STOPPED = 4
STATE_ZOMBIE = 5
STATE_COLORS = [(0, 0, 0, 0), PROC_COLOR_R, PROC_COLOR_S, PROC_COLOR_D, \
- PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
+ PROC_COLOR_T, PROC_COLOR_Z, PROC_COLOR_X, PROC_COLOR_W]
# CumulativeStats Types
STAT_TYPE_CPU = 0
@@ -160,80 +177,80 @@ STAT_TYPE_IO = 1
# Convert ps process state to an int
def get_proc_state(flag):
- return "RSDTZXW".find(flag) + 1
+ return "RSDTZXW".find(flag) + 1
def draw_text(ctx, text, color, x, y):
- ctx.set_source_rgba(*color)
- ctx.move_to(x, y)
- ctx.show_text(text)
+ ctx.set_source_rgba(*color)
+ ctx.move_to(x, y)
+ ctx.show_text(text)
def draw_fill_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.fill()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.fill()
def draw_rect(ctx, color, rect):
- ctx.set_source_rgba(*color)
- ctx.rectangle(*rect)
- ctx.stroke()
+ ctx.set_source_rgba(*color)
+ ctx.rectangle(*rect)
+ ctx.stroke()
def draw_legend_box(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s, s, s))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y - s, s, s))
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_legend_line(ctx, label, fill_color, x, y, s):
- draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
- ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
- ctx.fill()
- draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
+ draw_fill_rect(ctx, fill_color, (x, y - s/2, s + 1, 3))
+ ctx.arc(x + (s + 1)/2.0, y - (s - 3)/2.0, 2.5, 0, 2.0 * math.pi)
+ ctx.fill()
+ draw_text(ctx, label, TEXT_COLOR, x + s + 5, y)
def draw_label_in_box(ctx, color, label, x, y, w, maxx):
- label_w = ctx.text_extents(label)[2]
- label_x = x + w / 2 - label_w / 2
- if label_w + 10 > w:
- label_x = x + w + 5
- if label_x + label_w > maxx:
- label_x = x - label_w - 5
- draw_text(ctx, label, color, label_x, y)
+ label_w = ctx.text_extents(label)[2]
+ label_x = x + w / 2 - label_w / 2
+ if label_w + 10 > w:
+ label_x = x + w + 5
+ if label_x + label_w > maxx:
+ label_x = x - label_w - 5
+ draw_text(ctx, label, color, label_x, y)
def draw_sec_labels(ctx, options, rect, sec_w, nsecs):
- ctx.set_font_size(AXIS_FONT_SIZE)
- prev_x = 0
- for i in range(0, rect[2] + 1, sec_w):
- if ((i / sec_w) % nsecs == 0) :
- if options.app_options.as_minutes :
- label = "%.1f" % (i / sec_w / 60.0)
- else :
- label = "%d" % (i / sec_w)
- label_w = ctx.text_extents(label)[2]
- x = rect[0] + i - label_w/2
- if x >= prev_x:
- draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
- prev_x = x + label_w
+ ctx.set_font_size(AXIS_FONT_SIZE)
+ prev_x = 0
+ for i in range(0, rect[2] + 1, sec_w):
+ if ((i / sec_w) % nsecs == 0) :
+ if options.app_options.as_minutes :
+ label = "%.1f" % (i / sec_w / 60.0)
+ else :
+ label = "%d" % (i / sec_w)
+ label_w = ctx.text_extents(label)[2]
+ x = rect[0] + i - label_w/2
+ if x >= prev_x:
+ draw_text(ctx, label, TEXT_COLOR, x, rect[1] - 2)
+ prev_x = x + label_w
def draw_box_ticks(ctx, rect, sec_w):
- draw_rect(ctx, BORDER_COLOR, tuple(rect))
-
- ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
-
- for i in range(sec_w, rect[2] + 1, sec_w):
- if ((i / sec_w) % 10 == 0) :
- ctx.set_line_width(1.5)
- elif sec_w < 5 :
- continue
- else :
- ctx.set_line_width(1.0)
- if ((i / sec_w) % 30 == 0) :
- ctx.set_source_rgba(*TICK_COLOR_BOLD)
- else :
- ctx.set_source_rgba(*TICK_COLOR)
- ctx.move_to(rect[0] + i, rect[1] + 1)
- ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
- ctx.stroke()
- ctx.set_line_width(1.0)
-
- ctx.set_line_cap(cairo.LINE_CAP_BUTT)
+ draw_rect(ctx, BORDER_COLOR, tuple(rect))
+
+ ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
+
+ for i in range(sec_w, rect[2] + 1, sec_w):
+ if ((i / sec_w) % 10 == 0) :
+ ctx.set_line_width(1.5)
+ elif sec_w < 5 :
+ continue
+ else :
+ ctx.set_line_width(1.0)
+ if ((i / sec_w) % 30 == 0) :
+ ctx.set_source_rgba(*TICK_COLOR_BOLD)
+ else :
+ ctx.set_source_rgba(*TICK_COLOR)
+ ctx.move_to(rect[0] + i, rect[1] + 1)
+ ctx.line_to(rect[0] + i, rect[1] + rect[3] - 1)
+ ctx.stroke()
+ ctx.set_line_width(1.0)
+
+ ctx.set_line_cap(cairo.LINE_CAP_BUTT)
def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_line_cap(cairo.LINE_CAP_SQUARE)
@@ -252,51 +269,54 @@ def draw_annotations(ctx, proc_tree, times, rect):
ctx.set_dash([])
def draw_chart(ctx, color, fill, chart_bounds, data, proc_tree, data_range):
- ctx.set_line_width(0.5)
- x_shift = proc_tree.start_time
-
- def transform_point_coords(point, x_base, y_base, \
- xscale, yscale, x_trans, y_trans):
- x = (point[0] - x_base) * xscale + x_trans
- y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
- return x, y
-
- max_x = max (x for (x, y) in data)
- max_y = max (y for (x, y) in data)
- # avoid divide by zero
- if max_y == 0:
- max_y = 1.0
- xscale = float (chart_bounds[2]) / (max_x - x_shift)
- # If data_range is given, scale the chart so that the value range in
- # data_range matches the chart bounds exactly.
- # Otherwise, scale so that the actual data matches the chart bounds.
- if data_range:
- yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
- ybase = data_range[0]
- else:
- yscale = float(chart_bounds[3]) / max_y
- ybase = 0
-
- first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
-
- ctx.set_source_rgba(*color)
- ctx.move_to(*first)
- for point in data:
- x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
- chart_bounds[0], chart_bounds[1])
- ctx.line_to(x, y)
- if fill:
- ctx.stroke_preserve()
- ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
- ctx.line_to(first[0], first[1])
- ctx.fill()
- else:
- ctx.stroke()
- ctx.set_line_width(1.0)
+ ctx.set_line_width(0.5)
+ x_shift = proc_tree.start_time
+
+ def transform_point_coords(point, x_base, y_base, \
+ xscale, yscale, x_trans, y_trans):
+ x = (point[0] - x_base) * xscale + x_trans
+ y = (point[1] - y_base) * -yscale + y_trans + chart_bounds[3]
+ return x, y
+
+ max_x = max (x for (x, y) in data)
+ max_y = max (y for (x, y) in data)
+ # avoid divide by zero
+ if max_y == 0:
+ max_y = 1.0
+ if (max_x - x_shift):
+ xscale = float (chart_bounds[2]) / (max_x - x_shift)
+ else:
+ xscale = float (chart_bounds[2])
+ # If data_range is given, scale the chart so that the value range in
+ # data_range matches the chart bounds exactly.
+ # Otherwise, scale so that the actual data matches the chart bounds.
+ if data_range and (data_range[1] - data_range[0]):
+ yscale = float(chart_bounds[3]) / (data_range[1] - data_range[0])
+ ybase = data_range[0]
+ else:
+ yscale = float(chart_bounds[3]) / max_y
+ ybase = 0
+
+ first = transform_point_coords (data[0], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ last = transform_point_coords (data[-1], x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+
+ ctx.set_source_rgba(*color)
+ ctx.move_to(*first)
+ for point in data:
+ x, y = transform_point_coords (point, x_shift, ybase, xscale, yscale, \
+ chart_bounds[0], chart_bounds[1])
+ ctx.line_to(x, y)
+ if fill:
+ ctx.stroke_preserve()
+ ctx.line_to(last[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], chart_bounds[1]+chart_bounds[3])
+ ctx.line_to(first[0], first[1])
+ ctx.fill()
+ else:
+ ctx.stroke()
+ ctx.set_line_width(1.0)
bar_h = 55
meminfo_bar_h = 2 * bar_h
@@ -307,342 +327,469 @@ sec_w_base = 1 # the width of a second
proc_h = 16 # the height of a process
leg_s = 10
MIN_IMG_W = 800
-CUML_HEIGHT = 2000 # Increased value to accomodate CPU and I/O Graphs
+CUML_HEIGHT = 2000 # Increased value to accommodate CPU and I/O Graphs
OPTIONS = None
def extents(options, xscale, trace):
- start = min(trace.start.keys())
- end = start
-
- processes = 0
- for proc in trace.processes:
- if not options.app_options.show_all and \
- trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
- continue
-
- if trace.processes[proc][1] > end:
- end = trace.processes[proc][1]
- processes += 1
-
- if trace.min is not None and trace.max is not None:
- start = trace.min
- end = trace.max
-
- w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
- h = proc_h * processes + header_h + 2 * off_y
-
- if options.charts:
- if trace.cpu_stats:
- h += 30 + bar_h
- if trace.disk_stats:
- h += 30 + bar_h
- if trace.monitor_disk:
- h += 30 + bar_h
- if trace.mem_stats:
- h += meminfo_bar_h
-
- return (w, h)
+ start = min(trace.start.keys())
+ end = start
+
+ processes = 0
+ for proc in trace.processes:
+ if not options.app_options.show_all and \
+ trace.processes[proc][1] - trace.processes[proc][0] < options.app_options.mintime:
+ continue
+
+ if trace.processes[proc][1] > end:
+ end = trace.processes[proc][1]
+ processes += 1
+
+ if trace.min is not None and trace.max is not None:
+ start = trace.min
+ end = trace.max
+
+ w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
+ h = proc_h * processes + header_h + 2 * off_y
+
+ if options.charts:
+ if trace.cpu_stats:
+ h += 30 + bar_h
+ if trace.disk_stats:
+ h += 30 + bar_h
+ if trace.cpu_pressure:
+ h += 30 + bar_h
+ if trace.io_pressure:
+ h += 30 + bar_h
+ if trace.mem_pressure:
+ h += 30 + bar_h
+ if trace.monitor_disk:
+ h += 30 + bar_h
+ if trace.mem_stats:
+ h += meminfo_bar_h
+
+ # Allow for width of process legend and offset
+ if w < (720 + off_x):
+ w = 720 + off_x
+
+ return (w, h)
def clip_visible(clip, rect):
- xmax = max (clip[0], rect[0])
- ymax = max (clip[1], rect[1])
- xmin = min (clip[0] + clip[2], rect[0] + rect[2])
- ymin = min (clip[1] + clip[3], rect[1] + rect[3])
- return (xmin > xmax and ymin > ymax)
+ xmax = max (clip[0], rect[0])
+ ymax = max (clip[1], rect[1])
+ xmin = min (clip[0] + clip[2], rect[0] + rect[2])
+ ymin = min (clip[1] + clip[3], rect[1] + rect[3])
+ return (xmin > xmax and ymin > ymax)
def render_charts(ctx, options, clip, trace, curr_y, w, h, sec_w):
- proc_tree = options.proc_tree(trace)
-
- # render bar legend
- if trace.cpu_stats:
- ctx.set_font_size(LEGEND_FONT_SIZE)
-
- draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O wait
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
- proc_tree, None)
- # render CPU load
- draw_chart (ctx, CPU_COLOR, True, chart_rect, \
- [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
- proc_tree, None)
-
- curr_y = curr_y + 30 + bar_h
-
- # render second chart
- if trace.disk_stats:
- draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
-
- # render I/O utilization
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- draw_chart (ctx, IO_COLOR, True, chart_rect, \
- [(sample.time, sample.util) for sample in trace.disk_stats], \
- proc_tree, None)
-
- # render disk throughput
- max_sample = max (trace.disk_stats, key = lambda s: s.tput)
- if clip_visible (clip, chart_rect):
- draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
- [(sample.time, sample.tput) for sample in trace.disk_stats], \
- proc_tree, None)
-
- pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
-
- shift_x, shift_y = -20, 20
- if (pos_x < off_x + 245):
- shift_x, shift_y = 5, 40
-
- label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
- draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
-
- curr_y = curr_y + 30 + bar_h
-
- # render disk space usage
- #
- # Draws the amount of disk space used on each volume relative to the
- # lowest recorded amount. The graphs for each volume are stacked above
- # each other so that total disk usage is visible.
- if trace.monitor_disk:
- ctx.set_font_size(LEGEND_FONT_SIZE)
- # Determine set of volumes for which we have
- # information and the minimal amount of used disk
- # space for each. Currently samples are allowed to
- # not have a values for all volumes; drawing could be
- # made more efficient if that wasn't the case.
- volumes = set()
- min_used = {}
- for sample in trace.monitor_disk:
- for volume, used in sample.records.items():
- volumes.add(volume)
- if volume not in min_used or min_used[volume] > used:
- min_used[volume] = used
- volumes = sorted(list(volumes))
- disk_scale = 0
- for i, volume in enumerate(volumes):
- volume_scale = max([sample.records[volume] - min_used[volume]
- for sample in trace.monitor_disk
- if volume in sample.records])
- # Does not take length of volume name into account, but fixed offset
- # works okay in practice.
- draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
- VOLUME_COLORS[i % len(VOLUME_COLORS)],
- off_x + i * 250, curr_y+20, leg_s)
- disk_scale += volume_scale
-
- # render used amount of disk space
- chart_rect = (off_x, curr_y+30, w, bar_h)
- if clip_visible (clip, chart_rect):
- draw_box_ticks (ctx, chart_rect, sec_w)
- draw_annotations (ctx, proc_tree, trace.times, chart_rect)
- for i in range(len(volumes), 0, -1):
- draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
- [(sample.time,
- # Sum up used space of all volumes including the current one
- # so that the graphs appear as stacked on top of each other.
- reduce(lambda x,y: x+y,
- [sample.records[volume] - min_used[volume]
- for volume in volumes[0:i]
- if volume in sample.records],
- 0))
- for sample in trace.monitor_disk], \
- proc_tree, [0, disk_scale])
-
- curr_y = curr_y + 30 + bar_h
-
- # render mem usage
- chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
- mem_stats = trace.mem_stats
- if mem_stats and clip_visible (clip, chart_rect):
- mem_scale = max(sample.buffers for sample in mem_stats)
- draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
- draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
- draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
- draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
- MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_annotations(ctx, proc_tree, trace.times, chart_rect)
- draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
- [(sample.time, sample.buffers) for sample in trace.mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
- [(sample.time, sample.used) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
- [(sample.time, sample.cached) for sample in mem_stats], \
- proc_tree, [0, mem_scale])
- draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
- [(sample.time, float(sample.swap)) for sample in mem_stats], \
- proc_tree, None)
-
- curr_y = curr_y + meminfo_bar_h
-
- return curr_y
-
-def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
- chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - header_h - leg_s + proc_h]
-
- draw_legend_box (ctx, "Configure", \
- TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Compile", \
- TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Install", \
- TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Populate Sysroot", \
- TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package", \
- TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Package Write",
- TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
-
- ctx.set_font_size(PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks(ctx, chart_rect, sec_w)
- draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
-
- y = curr_y+header_h
-
- offset = trace.min or min(trace.start.keys())
- for s in sorted(trace.start.keys()):
- for val in sorted(trace.start[s]):
- if not options.app_options.show_all and \
- trace.processes[val][1] - s < options.app_options.mintime:
- continue
- task = val.split(":")[1]
- #print val
- #print trace.processes[val][1]
- #print s
- x = chart_rect[0] + (s - offset) * sec_w
- w = ((trace.processes[val][1] - s) * sec_w)
-
- #print "proc at %s %s %s %s" % (x, y, w, proc_h)
- col = None
- if task == "do_compile":
- col = TASK_COLOR_COMPILE
- elif task == "do_configure":
- col = TASK_COLOR_CONFIGURE
- elif task == "do_install":
- col = TASK_COLOR_INSTALL
- elif task == "do_populate_sysroot":
- col = TASK_COLOR_SYSROOT
- elif task == "do_package":
- col = TASK_COLOR_PACKAGE
- elif task == "do_package_write_rpm" or \
+ proc_tree = options.proc_tree(trace)
+
+ # render bar legend
+ if trace.cpu_stats:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+
+ draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O wait
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+ proc_tree, None)
+ # render CPU load
+ draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+ [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render second chart
+ if trace.disk_stats:
+ draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+
+ # render I/O utilization
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_COLOR, True, chart_rect, \
+ [(sample.time, sample.util) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ # render disk throughput
+ max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+ [(sample.time, sample.tput) for sample in trace.disk_stats], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+ label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+ draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render CPU pressure chart
+ if trace.cpu_pressure:
+ max_sample_avg = max (trace.cpu_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.cpu_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 CPU Pressure (max %d%%)" % (max_sample_avg.avg10), CPU_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total CPU Pressure (max %d)" % (max_sample_total.deltaTotal), CPU_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total cpu
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, CPU_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.cpu_pressure], \
+ proc_tree, None)
+
+ # render avg10 cpu
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, CPU_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.cpu_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, CPU_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render I/O pressure chart
+ if trace.io_pressure:
+ max_sample_avg = max (trace.io_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.io_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 I/O Pressure (max %d%%)" % (max_sample_avg.avg10), IO_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total I/O Pressure (max %d)" % (max_sample_total.deltaTotal), IO_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total io
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, IO_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.io_pressure], \
+ proc_tree, None)
+
+ # render avg10 io
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, IO_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.io_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, IO_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render MEM pressure chart
+ if trace.mem_pressure:
+ max_sample_avg = max (trace.mem_pressure, key = lambda s: s.avg10)
+ max_sample_total = max (trace.mem_pressure, key = lambda s: s.deltaTotal)
+ draw_legend_line(ctx, "avg10 MEM Pressure (max %d%%)" % (max_sample_avg.avg10), MEM_PRESSURE_AVG10_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "delta total MEM Pressure (max %d)" % (max_sample_total.deltaTotal), MEM_PRESSURE_TOTAL_COLOR, off_x + 240, curr_y+20, leg_s)
+
+ # render delta total mem
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ draw_chart (ctx, MEM_PRESSURE_TOTAL_COLOR, True, chart_rect, \
+ [(sample.time, sample.deltaTotal) for sample in trace.mem_pressure], \
+ proc_tree, None)
+
+ # render avg10 mem
+ if clip_visible (clip, chart_rect):
+ draw_chart (ctx, MEM_PRESSURE_AVG10_COLOR, False, chart_rect, \
+ [(sample.time, sample.avg10) for sample in trace.mem_pressure], \
+ proc_tree, None)
+
+ pos_x = off_x + ((max_sample_avg.time - proc_tree.start_time) * w / proc_tree.duration)
+
+ shift_x, shift_y = -20, 20
+ if (pos_x < off_x + 245):
+ shift_x, shift_y = 5, 40
+
+
+ label = "%d%%" % (max_sample_avg.avg10)
+ draw_text (ctx, label, MEM_PRESSURE_AVG10_COLOR, pos_x + shift_x, curr_y + shift_y)
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render disk space usage
+ #
+ # Draws the amount of disk space used on each volume relative to the
+ # lowest recorded amount. The graphs for each volume are stacked above
+ # each other so that total disk usage is visible.
+ if trace.monitor_disk:
+ ctx.set_font_size(LEGEND_FONT_SIZE)
+ # Determine set of volumes for which we have
+ # information and the minimal amount of used disk
+ # space for each. Currently samples are allowed to
+ # not have a values for all volumes; drawing could be
+ # made more efficient if that wasn't the case.
+ volumes = set()
+ min_used = {}
+ for sample in trace.monitor_disk:
+ for volume, used in sample.records.items():
+ volumes.add(volume)
+ if volume not in min_used or min_used[volume] > used:
+ min_used[volume] = used
+ volumes = sorted(list(volumes))
+ disk_scale = 0
+ for i, volume in enumerate(volumes):
+ volume_scale = max([sample.records[volume] - min_used[volume]
+ for sample in trace.monitor_disk
+ if volume in sample.records])
+ # Does not take length of volume name into account, but fixed offset
+ # works okay in practice.
+ draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+ VOLUME_COLORS[i % len(VOLUME_COLORS)],
+ off_x + i * 250, curr_y+20, leg_s)
+ disk_scale += volume_scale
+
+ # render used amount of disk space
+ chart_rect = (off_x, curr_y+30, w, bar_h)
+ if clip_visible (clip, chart_rect):
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+ for i in range(len(volumes), 0, -1):
+ draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+ [(sample.time,
+ # Sum up used space of all volumes including the current one
+ # so that the graphs appear as stacked on top of each other.
+ functools.reduce(lambda x,y: x+y,
+ [sample.records[volume] - min_used[volume]
+ for volume in volumes[0:i]
+ if volume in sample.records],
+ 0))
+ for sample in trace.monitor_disk], \
+ proc_tree, [0, disk_scale])
+
+ curr_y = curr_y + 30 + bar_h
+
+ # render mem usage
+ chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
+ mem_stats = trace.mem_stats
+ if mem_stats and clip_visible (clip, chart_rect):
+ mem_scale = max(sample.buffers for sample in mem_stats)
+ draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
+ draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
+ draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
+ MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_annotations(ctx, proc_tree, trace.times, chart_rect)
+ draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
+ [(sample.time, sample.buffers) for sample in trace.mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
+ [(sample.time, sample.used) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
+ [(sample.time, sample.cached) for sample in mem_stats], \
+ proc_tree, [0, mem_scale])
+ draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
+ [(sample.time, float(sample.swap)) for sample in mem_stats], \
+ proc_tree, None)
+
+ curr_y = curr_y + meminfo_bar_h
+
+ return curr_y
+
+def render_processes_chart(ctx, options, trace, curr_y, width, h, sec_w):
+ chart_rect = [off_x, curr_y+header_h, width, h - curr_y - 1 * off_y - header_h ]
+
+ draw_legend_box (ctx, "Configure", \
+ TASK_COLOR_CONFIGURE, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Compile", \
+ TASK_COLOR_COMPILE, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Install", \
+ TASK_COLOR_INSTALL, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Populate Sysroot", \
+ TASK_COLOR_SYSROOT, off_x+360, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package", \
+ TASK_COLOR_PACKAGE, off_x+480, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Package Write", \
+ TASK_COLOR_PACKAGE_WRITE, off_x+600, curr_y + 45, leg_s)
+
+ ctx.set_font_size(PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks(ctx, chart_rect, sec_w)
+ draw_sec_labels(ctx, options, chart_rect, sec_w, 30)
+
+ y = curr_y+header_h
+
+ offset = trace.min or min(trace.start.keys())
+ for start in sorted(trace.start.keys()):
+ for process in sorted(trace.start[start]):
+ elapsed_time = trace.processes[process][1] - start
+ if not options.app_options.show_all and \
+ elapsed_time < options.app_options.mintime:
+ continue
+ task = process.split(":")[1]
+
+ #print(process)
+ #print(trace.processes[process][1])
+ #print(s)
+
+ x = chart_rect[0] + (start - offset) * sec_w
+ w = elapsed_time * sec_w
+
+ def set_alfa(color, alfa):
+ clist = list(color)
+ clist[-1] = alfa
+ return tuple(clist)
+
+ #print("proc at %s %s %s %s" % (x, y, w, proc_h))
+ col = None
+ if task == "do_compile":
+ col = TASK_COLOR_COMPILE
+ elif "do_compile" in task:
+ col = set_alfa(TASK_COLOR_COMPILE, 0.25)
+ elif task == "do_configure":
+ col = TASK_COLOR_CONFIGURE
+ elif "do_configure" in task:
+ col = set_alfa(TASK_COLOR_CONFIGURE, 0.25)
+ elif task == "do_install":
+ col = TASK_COLOR_INSTALL
+ elif task == "do_populate_sysroot":
+ col = TASK_COLOR_SYSROOT
+ elif task == "do_package":
+ col = TASK_COLOR_PACKAGE
+ elif task == "do_package_write_rpm" or \
task == "do_package_write_deb" or \
task == "do_package_write_ipk":
- col = TASK_COLOR_PACKAGE_WRITE
- else:
- col = WHITE
+ col = TASK_COLOR_PACKAGE_WRITE
+ else:
+ col = WHITE
+
+ if col:
+ draw_fill_rect(ctx, col, (x, y, w, proc_h))
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- if col:
- draw_fill_rect(ctx, col, (x, y, w, proc_h))
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ # Show elapsed time for each task
+ process = "%ds %s" % (elapsed_time, process)
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, process, x, y + proc_h - 4, w, width)
- draw_label_in_box(ctx, PROC_TEXT_COLOR, val, x, y + proc_h - 4, w, proc_h)
- y = y + proc_h
+ y = y + proc_h
- return curr_y
+ return curr_y
#
# Render the chart.
#
def render(ctx, options, xscale, trace):
- (w, h) = extents (options, xscale, trace)
- global OPTIONS
- OPTIONS = options.app_options
+ (w, h) = extents (options, xscale, trace)
+ global OPTIONS
+ OPTIONS = options.app_options
- # x, y, w, h
- clip = ctx.clip_extents()
+ # x, y, w, h
+ clip = ctx.clip_extents()
- sec_w = int (xscale * sec_w_base)
- ctx.set_line_width(1.0)
- ctx.select_font_face(FONT_NAME)
- draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
- w -= 2*off_x
- curr_y = off_y;
+ sec_w = int (xscale * sec_w_base)
+ ctx.set_line_width(1.0)
+ ctx.select_font_face(FONT_NAME)
+ draw_fill_rect(ctx, WHITE, (0, 0, max(w, MIN_IMG_W), h))
+ w -= 2*off_x
+ curr_y = off_y;
- if options.charts:
- curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+ if options.charts:
+ curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
- curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
+ curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
- return
+ return
- proc_tree = options.proc_tree (trace)
+ proc_tree = options.proc_tree (trace)
- # draw the title and headers
- if proc_tree.idle:
- duration = proc_tree.idle
- else:
- duration = proc_tree.duration
+ # draw the title and headers
+ if proc_tree.idle:
+ duration = proc_tree.idle
+ else:
+ duration = proc_tree.duration
- if not options.kernel_only:
- curr_y = draw_header (ctx, trace.headers, duration)
- else:
- curr_y = off_y;
+ if not options.kernel_only:
+ curr_y = draw_header (ctx, trace.headers, duration)
+ else:
+ curr_y = off_y;
- # draw process boxes
- proc_height = h
- if proc_tree.taskstats and options.cumulative:
- proc_height -= CUML_HEIGHT
+ # draw process boxes
+ proc_height = h
+ if proc_tree.taskstats and options.cumulative:
+ proc_height -= CUML_HEIGHT
- draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
- curr_y, w, proc_height, sec_w)
+ draw_process_bar_chart(ctx, clip, options, proc_tree, trace.times,
+ curr_y, w, proc_height, sec_w)
- curr_y = proc_height
- ctx.set_font_size(SIG_FONT_SIZE)
- draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
+ curr_y = proc_height
+ ctx.set_font_size(SIG_FONT_SIZE)
+ draw_text(ctx, SIGNATURE, SIG_COLOR, off_x + 5, proc_height - 8)
- # draw a cumulative CPU-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
+ # draw a cumulative CPU-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_CPU)
- # draw a cumulative I/O-time-per-process graph
- if proc_tree.taskstats and options.cumulative:
- cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
- if clip_visible (clip, cuml_rect):
- draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
+ # draw a cumulative I/O-time-per-process graph
+ if proc_tree.taskstats and options.cumulative:
+ cuml_rect = (off_x, curr_y + off_y * 100, w, CUML_HEIGHT/2 - off_y * 2)
+ if clip_visible (clip, cuml_rect):
+ draw_cuml_graph(ctx, proc_tree, cuml_rect, duration, sec_w, STAT_TYPE_IO)
def draw_process_bar_chart(ctx, clip, options, proc_tree, times, curr_y, w, h, sec_w):
- header_size = 0
- if not options.kernel_only:
- draw_legend_box (ctx, "Running (%cpu)",
- PROC_COLOR_R, off_x , curr_y + 45, leg_s)
- draw_legend_box (ctx, "Unint.sleep (I/O)",
- PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Sleeping",
- PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
- draw_legend_box (ctx, "Zombie",
- PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
- header_size = 45
-
- chart_rect = [off_x, curr_y + header_size + 15,
- w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
- ctx.set_font_size (PROC_TEXT_FONT_SIZE)
-
- draw_box_ticks (ctx, chart_rect, sec_w)
- if sec_w > 100:
- nsec = 1
- else:
- nsec = 5
- draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
- draw_annotations (ctx, proc_tree, times, chart_rect)
-
- y = curr_y + 60
- for root in proc_tree.process_tree:
- draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
- y = y + proc_h * proc_tree.num_nodes([root])
+ header_size = 0
+ if not options.kernel_only:
+ draw_legend_box (ctx, "Running (%cpu)",
+ PROC_COLOR_R, off_x , curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Unint.sleep (I/O)",
+ PROC_COLOR_D, off_x+120, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Sleeping",
+ PROC_COLOR_S, off_x+240, curr_y + 45, leg_s)
+ draw_legend_box (ctx, "Zombie",
+ PROC_COLOR_Z, off_x+360, curr_y + 45, leg_s)
+ header_size = 45
+
+ chart_rect = [off_x, curr_y + header_size + 15,
+ w, h - 2 * off_y - (curr_y + header_size + 15) + proc_h]
+ ctx.set_font_size (PROC_TEXT_FONT_SIZE)
+
+ draw_box_ticks (ctx, chart_rect, sec_w)
+ if sec_w > 100:
+ nsec = 1
+ else:
+ nsec = 5
+ draw_sec_labels (ctx, options, chart_rect, sec_w, nsec)
+ draw_annotations (ctx, proc_tree, times, chart_rect)
+
+ y = curr_y + 60
+ for root in proc_tree.process_tree:
+ draw_processes_recursively(ctx, root, proc_tree, y, proc_h, chart_rect, clip)
+ y = y + proc_h * proc_tree.num_nodes([root])
def draw_header (ctx, headers, duration):
@@ -678,291 +825,291 @@ def draw_header (ctx, headers, duration):
return header_y
def draw_processes_recursively(ctx, proc, proc_tree, y, proc_h, rect, clip) :
- x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
- w = ((proc.duration) * rect[2] / proc_tree.duration)
-
- draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
- draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
- ipid = int(proc.pid)
- if not OPTIONS.show_all:
- cmdString = proc.cmd
- else:
- cmdString = ''
- if (OPTIONS.show_pid or OPTIONS.show_all) and ipid is not 0:
- cmdString = cmdString + " [" + str(ipid // 1000) + "]"
- if OPTIONS.show_all:
- if proc.args:
- cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
- else:
- cmdString = cmdString + " " + proc.exe
-
- draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
-
- next_y = y + proc_h
- for child in proc.child_list:
- if next_y > clip[1] + clip[3]:
- break
- child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
- draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
- next_y = next_y + proc_h * proc_tree.num_nodes([child])
-
- return x, y
+ x = rect[0] + ((proc.start_time - proc_tree.start_time) * rect[2] / proc_tree.duration)
+ w = ((proc.duration) * rect[2] / proc_tree.duration)
+
+ draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip)
+ draw_rect(ctx, PROC_BORDER_COLOR, (x, y, w, proc_h))
+ ipid = int(proc.pid)
+ if not OPTIONS.show_all:
+ cmdString = proc.cmd
+ else:
+ cmdString = ''
+ if (OPTIONS.show_pid or OPTIONS.show_all) and ipid != 0:
+ cmdString = cmdString + " [" + str(ipid // 1000) + "]"
+ if OPTIONS.show_all:
+ if proc.args:
+ cmdString = cmdString + " '" + "' '".join(proc.args) + "'"
+ else:
+ cmdString = cmdString + " " + proc.exe
+
+ draw_label_in_box(ctx, PROC_TEXT_COLOR, cmdString, x, y + proc_h - 4, w, rect[0] + rect[2])
+
+ next_y = y + proc_h
+ for child in proc.child_list:
+ if next_y > clip[1] + clip[3]:
+ break
+ child_x, child_y = draw_processes_recursively(ctx, child, proc_tree, next_y, proc_h, rect, clip)
+ draw_process_connecting_lines(ctx, x, y, child_x, child_y, proc_h)
+ next_y = next_y + proc_h * proc_tree.num_nodes([child])
+
+ return x, y
def draw_process_activity_colors(ctx, proc, proc_tree, x, y, w, proc_h, rect, clip):
- if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
- return
+ if y > clip[1] + clip[3] or y + proc_h + 2 < clip[1]:
+ return
- draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
+ draw_fill_rect(ctx, PROC_COLOR_S, (x, y, w, proc_h))
- last_tx = -1
- for sample in proc.samples :
- tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
+ last_tx = -1
+ for sample in proc.samples :
+ tx = rect[0] + round(((sample.time - proc_tree.start_time) * rect[2] / proc_tree.duration))
- # samples are sorted chronologically
- if tx < clip[0]:
- continue
- if tx > clip[0] + clip[2]:
- break
+ # samples are sorted chronologically
+ if tx < clip[0]:
+ continue
+ if tx > clip[0] + clip[2]:
+ break
- tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
- if last_tx != -1 and abs(last_tx - tx) <= tw:
- tw -= last_tx - tx
- tx = last_tx
- tw = max (tw, 1) # nice to see at least something
+ tw = round(proc_tree.sample_period * rect[2] / float(proc_tree.duration))
+ if last_tx != -1 and abs(last_tx - tx) <= tw:
+ tw -= last_tx - tx
+ tx = last_tx
+ tw = max (tw, 1) # nice to see at least something
- last_tx = tx + tw
- state = get_proc_state( sample.state )
+ last_tx = tx + tw
+ state = get_proc_state( sample.state )
- color = STATE_COLORS[state]
- if state == STATE_RUNNING:
- alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
- color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
-# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
- elif state == STATE_SLEEPING:
- continue
+ color = STATE_COLORS[state]
+ if state == STATE_RUNNING:
+ alpha = min (sample.cpu_sample.user + sample.cpu_sample.sys, 1.0)
+ color = tuple(list(PROC_COLOR_R[0:3]) + [alpha])
+# print "render time %d [ tx %d tw %d ], sample state %s color %s alpha %g" % (sample.time, tx, tw, state, color, alpha)
+ elif state == STATE_SLEEPING:
+ continue
- draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
+ draw_fill_rect(ctx, color, (tx, y, tw, proc_h))
def draw_process_connecting_lines(ctx, px, py, x, y, proc_h):
- ctx.set_source_rgba(*DEP_COLOR)
- ctx.set_dash([2, 2])
- if abs(px - x) < 3:
- dep_off_x = 3
- dep_off_y = proc_h / 4
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, y + proc_h / 2)
- ctx.line_to(px - dep_off_x, py - dep_off_y)
- ctx.line_to(px, py - dep_off_y)
- else:
- ctx.move_to(x, y + proc_h / 2)
- ctx.line_to(px, y + proc_h / 2)
- ctx.line_to(px, py)
- ctx.stroke()
- ctx.set_dash([])
+ ctx.set_source_rgba(*DEP_COLOR)
+ ctx.set_dash([2, 2])
+ if abs(px - x) < 3:
+ dep_off_x = 3
+ dep_off_y = proc_h / 4
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, y + proc_h / 2)
+ ctx.line_to(px - dep_off_x, py - dep_off_y)
+ ctx.line_to(px, py - dep_off_y)
+ else:
+ ctx.move_to(x, y + proc_h / 2)
+ ctx.line_to(px, y + proc_h / 2)
+ ctx.line_to(px, py)
+ ctx.stroke()
+ ctx.set_dash([])
# elide the bootchart collector - it is quite distorting
def elide_bootchart(proc):
- return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
+ return proc.cmd == 'bootchartd' or proc.cmd == 'bootchart-colle'
class CumlSample:
- def __init__(self, proc):
- self.cmd = proc.cmd
- self.samples = []
- self.merge_samples (proc)
- self.color = None
-
- def merge_samples(self, proc):
- self.samples.extend (proc.samples)
- self.samples.sort (key = lambda p: p.time)
-
- def next(self):
- global palette_idx
- palette_idx += HSV_STEP
- return palette_idx
-
- def get_color(self):
- if self.color is None:
- i = self.next() % HSV_MAX_MOD
- h = 0.0
- if i is not 0:
- h = (1.0 * i) / HSV_MAX_MOD
- s = 0.5
- v = 1.0
- c = colorsys.hsv_to_rgb (h, s, v)
- self.color = (c[0], c[1], c[2], 1.0)
- return self.color
+ def __init__(self, proc):
+ self.cmd = proc.cmd
+ self.samples = []
+ self.merge_samples (proc)
+ self.color = None
+
+ def merge_samples(self, proc):
+ self.samples.extend (proc.samples)
+ self.samples.sort (key = lambda p: p.time)
+
+ def next(self):
+ global palette_idx
+ palette_idx += HSV_STEP
+ return palette_idx
+
+ def get_color(self):
+ if self.color is None:
+ i = self.next() % HSV_MAX_MOD
+ h = 0.0
+ if i != 0:
+ h = (1.0 * i) / HSV_MAX_MOD
+ s = 0.5
+ v = 1.0
+ c = colorsys.hsv_to_rgb (h, s, v)
+ self.color = (c[0], c[1], c[2], 1.0)
+ return self.color
def draw_cuml_graph(ctx, proc_tree, chart_bounds, duration, sec_w, stat_type):
- global palette_idx
- palette_idx = 0
-
- time_hash = {}
- total_time = 0.0
- m_proc_list = {}
-
- if stat_type is STAT_TYPE_CPU:
- sample_value = 'cpu'
- else:
- sample_value = 'io'
- for proc in proc_tree.process_list:
- if elide_bootchart(proc):
- continue
-
- for sample in proc.samples:
- total_time += getattr(sample.cpu_sample, sample_value)
- if not sample.time in time_hash:
- time_hash[sample.time] = 1
-
- # merge pids with the same cmd
- if not proc.cmd in m_proc_list:
- m_proc_list[proc.cmd] = CumlSample (proc)
- continue
- s = m_proc_list[proc.cmd]
- s.merge_samples (proc)
-
- # all the sample times
- times = sorted(time_hash)
- if len (times) < 2:
- print("degenerate boot chart")
- return
-
- pix_per_ns = chart_bounds[3] / total_time
-# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
-
- # FIXME: we have duplicates in the process list too [!] - why !?
-
- # Render bottom up, left to right
- below = {}
- for time in times:
- below[time] = chart_bounds[1] + chart_bounds[3]
-
- # same colors each time we render
- random.seed (0)
-
- ctx.set_line_width(1)
-
- legends = []
- labels = []
-
- # render each pid in order
- for cs in m_proc_list.values():
- row = {}
- cuml = 0.0
-
- # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
- for sample in cs.samples:
- cuml += getattr(sample.cpu_sample, sample_value)
- row[sample.time] = cuml
-
- process_total_time = cuml
-
- # hide really tiny processes
- if cuml * pix_per_ns <= 2:
- continue
-
- last_time = times[0]
- y = last_below = below[last_time]
- last_cuml = cuml = 0.0
-
- ctx.set_source_rgba(*cs.get_color())
- for time in times:
- render_seg = False
-
- # did the underlying trend increase ?
- if below[time] != last_below:
- last_below = below[last_time]
- last_cuml = cuml
- render_seg = True
-
- # did we move up a pixel increase ?
- if time in row:
- nc = round (row[time] * pix_per_ns)
- if nc != cuml:
- last_cuml = cuml
- cuml = nc
- render_seg = True
-
-# if last_cuml > cuml:
-# assert fail ... - un-sorted process samples
-
- # draw the trailing rectangle from the last time to
- # before now, at the height of the last segment.
- if render_seg:
- w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
- ctx.fill()
-# ctx.stroke()
- last_time = time
- y = below [time] - cuml
-
- row[time] = y
-
- # render the last segment
- x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
- y = below[last_time] - cuml
- ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
- ctx.fill()
-# ctx.stroke()
-
- # render legend if it will fit
- if cuml > 8:
- label = cs.cmd
- extnts = ctx.text_extents(label)
- label_w = extnts[2]
- label_h = extnts[3]
-# print "Text extents %g by %g" % (label_w, label_h)
- labels.append((label,
- chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
- y + (cuml + label_h) / 2))
- if cs in legends:
- print("ARGH - duplicate process in list !")
-
- legends.append ((cs, process_total_time))
-
- below = row
-
- # render grid-lines over the top
- draw_box_ticks(ctx, chart_bounds, sec_w)
-
- # render labels
- for l in labels:
- draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
-
- # Render legends
- font_height = 20
- label_width = 300
- LEGENDS_PER_COL = 15
- LEGENDS_TOTAL = 45
- ctx.set_font_size (TITLE_FONT_SIZE)
- dur_secs = duration / 100
- cpu_secs = total_time / 1000000000
-
- # misleading - with multiple CPUs ...
-# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
- if stat_type is STAT_TYPE_CPU:
- label = "Cumulative CPU usage, by process; total CPU: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
- else:
- label = "Cumulative I/O usage, by process; total I/O: " \
- " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
-
- draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
- chart_bounds[1] + font_height)
-
- i = 0
- legends = sorted(legends, key=itemgetter(1), reverse=True)
- ctx.set_font_size(TEXT_FONT_SIZE)
- for t in legends:
- cs = t[0]
- time = t[1]
- x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
- y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
- str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
- draw_legend_box(ctx, str, cs.color, x, y, leg_s)
- i = i + 1
- if i >= LEGENDS_TOTAL:
- break
+ global palette_idx
+ palette_idx = 0
+
+ time_hash = {}
+ total_time = 0.0
+ m_proc_list = {}
+
+ if stat_type is STAT_TYPE_CPU:
+ sample_value = 'cpu'
+ else:
+ sample_value = 'io'
+ for proc in proc_tree.process_list:
+ if elide_bootchart(proc):
+ continue
+
+ for sample in proc.samples:
+ total_time += getattr(sample.cpu_sample, sample_value)
+ if not sample.time in time_hash:
+ time_hash[sample.time] = 1
+
+ # merge pids with the same cmd
+ if not proc.cmd in m_proc_list:
+ m_proc_list[proc.cmd] = CumlSample (proc)
+ continue
+ s = m_proc_list[proc.cmd]
+ s.merge_samples (proc)
+
+ # all the sample times
+ times = sorted(time_hash)
+ if len (times) < 2:
+ print("degenerate boot chart")
+ return
+
+ pix_per_ns = chart_bounds[3] / total_time
+# print "total time: %g pix-per-ns %g" % (total_time, pix_per_ns)
+
+ # FIXME: we have duplicates in the process list too [!] - why !?
+
+ # Render bottom up, left to right
+ below = {}
+ for time in times:
+ below[time] = chart_bounds[1] + chart_bounds[3]
+
+ # same colors each time we render
+ random.seed (0)
+
+ ctx.set_line_width(1)
+
+ legends = []
+ labels = []
+
+ # render each pid in order
+ for cs in m_proc_list.values():
+ row = {}
+ cuml = 0.0
+
+ # print "pid : %s -> %g samples %d" % (proc.cmd, cuml, len (cs.samples))
+ for sample in cs.samples:
+ cuml += getattr(sample.cpu_sample, sample_value)
+ row[sample.time] = cuml
+
+ process_total_time = cuml
+
+ # hide really tiny processes
+ if cuml * pix_per_ns <= 2:
+ continue
+
+ last_time = times[0]
+ y = last_below = below[last_time]
+ last_cuml = cuml = 0.0
+
+ ctx.set_source_rgba(*cs.get_color())
+ for time in times:
+ render_seg = False
+
+ # did the underlying trend increase ?
+ if below[time] != last_below:
+ last_below = below[last_time]
+ last_cuml = cuml
+ render_seg = True
+
+ # did we move up a pixel increase ?
+ if time in row:
+ nc = round (row[time] * pix_per_ns)
+ if nc != cuml:
+ last_cuml = cuml
+ cuml = nc
+ render_seg = True
+
+# if last_cuml > cuml:
+# assert fail ... - un-sorted process samples
+
+ # draw the trailing rectangle from the last time to
+ # before now, at the height of the last segment.
+ if render_seg:
+ w = math.ceil ((time - last_time) * chart_bounds[2] / proc_tree.duration) + 1
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ ctx.rectangle (x, below[last_time] - last_cuml, w, last_cuml)
+ ctx.fill()
+# ctx.stroke()
+ last_time = time
+ y = below [time] - cuml
+
+ row[time] = y
+
+ # render the last segment
+ x = chart_bounds[0] + round((last_time - proc_tree.start_time) * chart_bounds[2] / proc_tree.duration)
+ y = below[last_time] - cuml
+ ctx.rectangle (x, y, chart_bounds[2] - x, cuml)
+ ctx.fill()
+# ctx.stroke()
+
+ # render legend if it will fit
+ if cuml > 8:
+ label = cs.cmd
+ extnts = ctx.text_extents(label)
+ label_w = extnts[2]
+ label_h = extnts[3]
+# print "Text extents %g by %g" % (label_w, label_h)
+ labels.append((label,
+ chart_bounds[0] + chart_bounds[2] - label_w - off_x * 2,
+ y + (cuml + label_h) / 2))
+ if cs in legends:
+ print("ARGH - duplicate process in list !")
+
+ legends.append ((cs, process_total_time))
+
+ below = row
+
+ # render grid-lines over the top
+ draw_box_ticks(ctx, chart_bounds, sec_w)
+
+ # render labels
+ for l in labels:
+ draw_text(ctx, l[0], TEXT_COLOR, l[1], l[2])
+
+ # Render legends
+ font_height = 20
+ label_width = 300
+ LEGENDS_PER_COL = 15
+ LEGENDS_TOTAL = 45
+ ctx.set_font_size (TITLE_FONT_SIZE)
+ dur_secs = duration / 100
+ cpu_secs = total_time / 1000000000
+
+ # misleading - with multiple CPUs ...
+# idle = ((dur_secs - cpu_secs) / dur_secs) * 100.0
+ if stat_type is STAT_TYPE_CPU:
+ label = "Cumulative CPU usage, by process; total CPU: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+ else:
+ label = "Cumulative I/O usage, by process; total I/O: " \
+ " %.5g(s) time: %.3g(s)" % (cpu_secs, dur_secs)
+
+ draw_text(ctx, label, TEXT_COLOR, chart_bounds[0] + off_x,
+ chart_bounds[1] + font_height)
+
+ i = 0
+ legends = sorted(legends, key=itemgetter(1), reverse=True)
+ ctx.set_font_size(TEXT_FONT_SIZE)
+ for t in legends:
+ cs = t[0]
+ time = t[1]
+ x = chart_bounds[0] + off_x + int (i/LEGENDS_PER_COL) * label_width
+ y = chart_bounds[1] + font_height * ((i % LEGENDS_PER_COL) + 2)
+ str = "%s - %.0f(ms) (%2.2f%%)" % (cs.cmd, time/1000000, (time/total_time) * 100.0)
+ draw_legend_box(ctx, str, cs.color, x, y, leg_s)
+ i = i + 1
+ if i >= LEGENDS_TOTAL:
+ break
diff --git a/scripts/pybootchartgui/pybootchartgui/gui.py b/scripts/pybootchartgui/pybootchartgui/gui.py
index 7fedd232df..e1fe915563 100644
--- a/scripts/pybootchartgui/pybootchartgui/gui.py
+++ b/scripts/pybootchartgui/pybootchartgui/gui.py
@@ -13,64 +13,83 @@
# You should have received a copy of the GNU General Public License
# along with pybootchartgui. If not, see <http://www.gnu.org/licenses/>.
-import gobject
-import gtk
-import gtk.gdk
-import gtk.keysyms
+import gi
+gi.require_version('Gtk', '3.0')
+from gi.repository import Gtk as gtk
+from gi.repository import Gtk
+from gi.repository import Gdk
+from gi.repository import GObject as gobject
+from gi.repository import GObject
+
from . import draw
from .draw import RenderOptions
-class PyBootchartWidget(gtk.DrawingArea):
+class PyBootchartWidget(gtk.DrawingArea, gtk.Scrollable):
__gsignals__ = {
- 'expose-event': 'override',
- 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, gtk.gdk.Event)),
+ 'clicked' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_STRING, Gdk.Event)),
'position-changed' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_INT, gobject.TYPE_INT)),
'set-scroll-adjustments' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gtk.Adjustment, gtk.Adjustment))
}
+ hadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ hscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+ vadjustment = GObject.property(type=Gtk.Adjustment,
+ default=Gtk.Adjustment(),
+ flags=GObject.PARAM_READWRITE)
+ vscroll_policy = GObject.property(type=Gtk.ScrollablePolicy,
+ default=Gtk.ScrollablePolicy.MINIMUM,
+ flags=GObject.PARAM_READWRITE)
+
def __init__(self, trace, options, xscale):
gtk.DrawingArea.__init__(self)
self.trace = trace
self.options = options
- self.set_flags(gtk.CAN_FOCUS)
+ self.set_can_focus(True)
- self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("button-press-event", self.on_area_button_press)
self.connect("button-release-event", self.on_area_button_release)
- self.add_events(gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
+ self.add_events(Gdk.EventMask.POINTER_MOTION_MASK | Gdk.EventMask.POINTER_MOTION_HINT_MASK | Gdk.EventMask.BUTTON_RELEASE_MASK)
self.connect("motion-notify-event", self.on_area_motion_notify)
self.connect("scroll-event", self.on_area_scroll_event)
self.connect('key-press-event', self.on_key_press_event)
- self.connect('set-scroll-adjustments', self.on_set_scroll_adjustments)
self.connect("size-allocate", self.on_allocation_size_changed)
self.connect("position-changed", self.on_position_changed)
+ self.connect("draw", self.on_draw)
+
self.zoom_ratio = 1.0
self.xscale = xscale
self.x, self.y = 0.0, 0.0
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self.hadj = None
- self.vadj = None
- self.hadj_changed_signal_id = None
- self.vadj_changed_signal_id = None
-
- def do_expose_event(self, event):
- cr = self.window.cairo_create()
-
- # set a clip region for the expose event
- cr.rectangle(
- event.area.x, event.area.y,
- event.area.width, event.area.height
- )
- cr.clip()
- self.draw(cr, self.get_allocation())
- return False
-
- def draw(self, cr, rect):
+ self.our_width, self.our_height = self.chart_width, self.chart_height
+
+ self.hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ self.vadj.connect('value-changed', self.on_adjustments_changed)
+ self.hadj.connect('value-changed', self.on_adjustments_changed)
+
+ def bound_vals(self):
+ self.x = max(0, self.x)
+ self.y = max(0, self.y)
+ self.x = min(self.chart_width - self.our_width, self.x)
+ self.y = min(self.chart_height - self.our_height, self.y)
+
+ def on_draw(self, darea, cr):
+ # set a clip region
+ #cr.rectangle(
+ # self.x, self.y,
+ # self.chart_width, self.chart_height
+ #)
+ #cr.clip()
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
cr.scale(self.zoom_ratio, self.zoom_ratio)
@@ -84,7 +103,7 @@ class PyBootchartWidget(gtk.DrawingArea):
def zoom_image (self, zoom_ratio):
self.zoom_ratio = zoom_ratio
- self._set_scroll_adjustments (self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
def zoom_to_rect (self, rect):
@@ -122,126 +141,101 @@ class PyBootchartWidget(gtk.DrawingArea):
def show_toggled(self, button):
self.options.app_options.show_all = button.get_property ('active')
self.chart_width, self.chart_height = draw.extents(self.options, self.xscale, self.trace)
- self._set_scroll_adjustments(self.hadj, self.vadj)
+ self._set_scroll_adjustments()
self.queue_draw()
POS_INCREMENT = 100
def on_key_press_event(self, widget, event):
- if event.keyval == gtk.keysyms.Left:
+ if event.keyval == Gdk.keyval_from_name("Left"):
self.x -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Right:
+ elif event.keyval == Gdk.keyval_from_name("Right"):
self.x += self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Up:
+ elif event.keyval == Gdk.keyval_from_name("Up"):
self.y -= self.POS_INCREMENT/self.zoom_ratio
- elif event.keyval == gtk.keysyms.Down:
+ elif event.keyval == Gdk.keyval_from_name("Down"):
self.y += self.POS_INCREMENT/self.zoom_ratio
else:
return False
+ self.bound_vals()
self.queue_draw()
self.position_changed()
return True
def on_area_button_press(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.FLEUR))
self.prevmousex = event.x
self.prevmousey = event.y
- if event.type not in (gtk.gdk.BUTTON_PRESS, gtk.gdk.BUTTON_RELEASE):
+ if event.type not in (Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE):
return False
return False
def on_area_button_release(self, area, event):
if event.button == 2 or event.button == 1:
- area.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.ARROW))
+ window = self.get_window()
+ window.set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
self.prevmousex = None
self.prevmousey = None
return True
return False
def on_area_scroll_event(self, area, event):
- if event.state & gtk.gdk.CONTROL_MASK:
- if event.direction == gtk.gdk.SCROLL_UP:
+ if event.state & Gdk.CONTROL_MASK:
+ if event.direction == Gdk.SCROLL_UP:
self.zoom_image(self.zoom_ratio * self.ZOOM_INCREMENT)
return True
- if event.direction == gtk.gdk.SCROLL_DOWN:
+ if event.direction == Gdk.SCROLL_DOWN:
self.zoom_image(self.zoom_ratio / self.ZOOM_INCREMENT)
return True
return False
def on_area_motion_notify(self, area, event):
state = event.state
- if state & gtk.gdk.BUTTON2_MASK or state & gtk.gdk.BUTTON1_MASK:
+ if state & Gdk.ModifierType.BUTTON2_MASK or state & Gdk.ModifierType.BUTTON1_MASK:
x, y = int(event.x), int(event.y)
# pan the image
self.x += (self.prevmousex - x)/self.zoom_ratio
self.y += (self.prevmousey - y)/self.zoom_ratio
+ self.bound_vals()
self.queue_draw()
self.prevmousex = x
self.prevmousey = y
self.position_changed()
return True
- def on_set_scroll_adjustments(self, area, hadj, vadj):
- self._set_scroll_adjustments (hadj, vadj)
-
def on_allocation_size_changed(self, widget, allocation):
self.hadj.page_size = allocation.width
self.hadj.page_increment = allocation.width * 0.9
self.vadj.page_size = allocation.height
self.vadj.page_increment = allocation.height * 0.9
+ self.our_width = allocation.width
+ if self.chart_width < self.our_width:
+ self.our_width = self.chart_width
+ self.our_height = allocation.height
+ if self.chart_height < self.our_height:
+ self.our_height = self.chart_height
+ self._set_scroll_adjustments()
def _set_adj_upper(self, adj, upper):
- changed = False
- value_changed = False
-
- if adj.upper != upper:
- adj.upper = upper
- changed = True
-
- max_value = max(0.0, upper - adj.page_size)
- if adj.value > max_value:
- adj.value = max_value
- value_changed = True
-
- if changed:
- adj.changed()
- if value_changed:
- adj.value_changed()
-
- def _set_scroll_adjustments(self, hadj, vadj):
- if hadj == None:
- hadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
- if vadj == None:
- vadj = gtk.Adjustment(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
-
- if self.hadj_changed_signal_id != None and \
- self.hadj != None and hadj != self.hadj:
- self.hadj.disconnect (self.hadj_changed_signal_id)
- if self.vadj_changed_signal_id != None and \
- self.vadj != None and vadj != self.vadj:
- self.vadj.disconnect (self.vadj_changed_signal_id)
-
- if hadj != None:
- self.hadj = hadj
- self._set_adj_upper (self.hadj, self.zoom_ratio * self.chart_width)
- self.hadj_changed_signal_id = self.hadj.connect('value-changed', self.on_adjustments_changed)
-
- if vadj != None:
- self.vadj = vadj
- self._set_adj_upper (self.vadj, self.zoom_ratio * self.chart_height)
- self.vadj_changed_signal_id = self.vadj.connect('value-changed', self.on_adjustments_changed)
+
+ if adj.get_upper() != upper:
+ adj.set_upper(upper)
+
+ def _set_scroll_adjustments(self):
+ self._set_adj_upper (self.hadj, self.zoom_ratio * (self.chart_width - self.our_width))
+ self._set_adj_upper (self.vadj, self.zoom_ratio * (self.chart_height - self.our_height))
def on_adjustments_changed(self, adj):
- self.x = self.hadj.value / self.zoom_ratio
- self.y = self.vadj.value / self.zoom_ratio
+ self.x = self.hadj.get_value() / self.zoom_ratio
+ self.y = self.vadj.get_value() / self.zoom_ratio
self.queue_draw()
def on_position_changed(self, widget, x, y):
- self.hadj.value = x * self.zoom_ratio
- self.vadj.value = y * self.zoom_ratio
-
-PyBootchartWidget.set_set_scroll_adjustments_signal('set-scroll-adjustments')
+ self.hadj.set_value(x * self.zoom_ratio)
+ #self.hadj.value_changed()
+ self.vadj.set_value(y * self.zoom_ratio)
class PyBootchartShell(gtk.VBox):
ui = '''
@@ -260,7 +254,7 @@ class PyBootchartShell(gtk.VBox):
def __init__(self, window, trace, options, xscale):
gtk.VBox.__init__(self)
- self.widget = PyBootchartWidget(trace, options, xscale)
+ self.widget2 = PyBootchartWidget(trace, options, xscale)
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
@@ -275,12 +269,12 @@ class PyBootchartShell(gtk.VBox):
# Create actions
actiongroup.add_actions((
- ('Expand', gtk.STOCK_ADD, None, None, None, self.widget.on_expand),
- ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget.on_contract),
- ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
- ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
- ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget.on_zoom_fit),
- ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
+ ('Expand', gtk.STOCK_ADD, None, None, None, self.widget2.on_expand),
+ ('Contract', gtk.STOCK_REMOVE, None, None, None, self.widget2.on_contract),
+ ('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget2.on_zoom_in),
+ ('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget2.on_zoom_out),
+ ('ZoomFit', gtk.STOCK_ZOOM_FIT, 'Fit Width', None, None, self.widget2.on_zoom_fit),
+ ('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget2.on_zoom_100),
))
# Add the actiongroup to the uimanager
@@ -290,29 +284,33 @@ class PyBootchartShell(gtk.VBox):
uimanager.add_ui_from_string(self.ui)
# Scrolled window
- scrolled = gtk.ScrolledWindow()
- scrolled.add(self.widget)
+ scrolled = gtk.ScrolledWindow(self.widget2.hadj, self.widget2.vadj)
+ scrolled.add(self.widget2)
+
+ #scrolled.set_hadjustment()
+ #scrolled.set_vadjustment(self.widget2.vadj)
+ scrolled.set_policy(gtk.PolicyType.ALWAYS, gtk.PolicyType.ALWAYS)
# toolbar / h-box
hbox = gtk.HBox(False, 8)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
- hbox.pack_start(toolbar, True, True)
+ hbox.pack_start(toolbar, True, True, 0)
if not options.kernel_only:
# Misc. options
button = gtk.CheckButton("Show more")
- button.connect ('toggled', self.widget.show_toggled)
+ button.connect ('toggled', self.widget2.show_toggled)
button.set_active(options.app_options.show_all)
- hbox.pack_start (button, False, True)
+ hbox.pack_start (button, False, True, 0)
- self.pack_start(hbox, False)
- self.pack_start(scrolled)
+ self.pack_start(hbox, False, True, 0)
+ self.pack_start(scrolled, True, True, 0)
self.show_all()
def grab_focus(self, window):
- window.set_focus(self.widget)
+ window.set_focus(self.widget2)
class PyBootchartWindow(gtk.Window):
diff --git a/scripts/pybootchartgui/pybootchartgui/parsing.py b/scripts/pybootchartgui/pybootchartgui/parsing.py
index bcfb2da569..63a53b6b88 100644
--- a/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -18,7 +18,7 @@ import string
import re
import sys
import tarfile
-from time import clock
+import time
from collections import defaultdict
from functools import reduce
@@ -49,6 +49,9 @@ class Trace:
self.parent_map = None
self.mem_stats = []
self.monitor_disk = None
+ self.cpu_pressure = []
+ self.io_pressure = []
+ self.mem_pressure = []
self.times = [] # Always empty, but expected by draw.py when drawing system charts.
if len(paths):
@@ -128,7 +131,7 @@ class Trace:
def compile(self, writer):
def find_parent_id_for(pid):
- if pid is 0:
+ if pid == 0:
return 0
ppid = self.parent_map.get(pid)
if ppid:
@@ -267,7 +270,7 @@ def _parse_headers(file):
value = line.strip()
headers[last] += value
return headers, last
- return reduce(parse, file.read().decode('utf-8').split('\n'), (defaultdict(str),''))[0]
+ return reduce(parse, file.read().split('\n'), (defaultdict(str),''))[0]
def _parse_timed_blocks(file):
"""Parses (ie., splits) a file into so-called timed-blocks. A
@@ -281,7 +284,7 @@ def _parse_timed_blocks(file):
return (int(lines[0]), lines[1:])
except ValueError:
raise ParseError("expected a timed-block, but timestamp '%s' is not an integer" % lines[0])
- blocks = file.read().decode('utf-8').split('\n\n')
+ blocks = file.read().split('\n\n')
return [parse(block) for block in blocks if block.strip() and not block.endswith(' not running\n')]
def _parse_proc_ps_log(writer, file):
@@ -554,6 +557,29 @@ def _parse_monitor_disk_log(file):
return disk_stats
+def _parse_pressure_logs(file, filename):
+ """
+ Parse file for "some" pressure with 'avg10', 'avg60' 'avg300' and delta total values
+ (in that order) directly stored on one line for both CPU and IO, based on filename.
+ """
+ pressure_stats = []
+ if filename == "cpu.log":
+ SamplingClass = CPUPressureSample
+ elif filename == "memory.log":
+ SamplingClass = MemPressureSample
+ else:
+ SamplingClass = IOPressureSample
+ for time, lines in _parse_timed_blocks(file):
+ for line in lines:
+ if not line: continue
+ tokens = line.split()
+ avg10 = float(tokens[0])
+ avg60 = float(tokens[1])
+ avg300 = float(tokens[2])
+ delta = float(tokens[3])
+ pressure_stats.append(SamplingClass(time, avg10, avg60, avg300, delta))
+
+ return pressure_stats
# if we boot the kernel with: initcall_debug printk.time=1 we can
# get all manner of interesting data from the dmesg output
@@ -577,7 +603,7 @@ def _parse_dmesg(writer, file):
processMap['k-boot'] = kernel
base_ts = False
max_ts = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
t = timestamp_re.match (line)
if t is None:
# print "duff timestamp " + line
@@ -665,7 +691,7 @@ def _parse_pacct(writer, file):
def _parse_paternity_log(writer, file):
parent_map = {}
parent_map[0] = 0
- for line in file.read().decode('utf-8').split('\n'):
+ for line in file.read().split('\n'):
if not line:
continue
elems = line.split(' ') # <Child> <Parent>
@@ -678,7 +704,7 @@ def _parse_paternity_log(writer, file):
def _parse_cmdline_log(writer, file):
cmdLines = {}
- for block in file.read().decode('utf-8').split('\n\n'):
+ for block in file.read().split('\n\n'):
lines = block.split('\n')
if len (lines) >= 3:
# print "Lines '%s'" % (lines[0])
@@ -723,7 +749,7 @@ def get_num_cpus(headers):
def _do_parse(writer, state, filename, file):
writer.info("parsing '%s'" % filename)
- t1 = clock()
+ t1 = time.process_time()
name = os.path.basename(filename)
if name == "proc_diskstats.log":
state.disk_stats = _parse_proc_disk_stat_log(file)
@@ -741,9 +767,16 @@ def _do_parse(writer, state, filename, file):
state.cmdline = _parse_cmdline_log(writer, file)
elif name == "monitor_disk.log":
state.monitor_disk = _parse_monitor_disk_log(file)
+ #pressure logs are in a subdirectory
+ elif name == "cpu.log":
+ state.cpu_pressure = _parse_pressure_logs(file, name)
+ elif name == "io.log":
+ state.io_pressure = _parse_pressure_logs(file, name)
+ elif name == "memory.log":
+ state.mem_pressure = _parse_pressure_logs(file, name)
elif not filename.endswith('.log'):
_parse_bitbake_buildstats(writer, state, filename, file)
- t2 = clock()
+ t2 = time.process_time()
writer.info(" %s seconds" % str(t2-t1))
return state
@@ -751,7 +784,7 @@ def parse_file(writer, state, filename):
if state.filename is None:
state.filename = filename
basename = os.path.basename(filename)
- with open(filename, "rb") as file:
+ with open(filename, "r") as file:
return _do_parse(writer, state, filename, file)
def parse_paths(writer, state, paths):
diff --git a/scripts/pybootchartgui/pybootchartgui/samples.py b/scripts/pybootchartgui/pybootchartgui/samples.py
index 9fc309b3ab..a70d8a5a28 100644
--- a/scripts/pybootchartgui/pybootchartgui/samples.py
+++ b/scripts/pybootchartgui/pybootchartgui/samples.py
@@ -37,6 +37,31 @@ class CPUSample:
return str(self.time) + "\t" + str(self.user) + "\t" + \
str(self.sys) + "\t" + str(self.io) + "\t" + str (self.swap)
+class CPUPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+class IOPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+class MemPressureSample:
+ def __init__(self, time, avg10, avg60, avg300, deltaTotal):
+ self.time = time
+ self.avg10 = avg10
+ self.avg60 = avg60
+ self.avg300 = avg300
+ self.deltaTotal = deltaTotal
+
+
class MemSample:
used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
diff --git a/scripts/pythondeps b/scripts/pythondeps
index 590b9769e7..48277ec28a 100755
--- a/scripts/pythondeps
+++ b/scripts/pythondeps
@@ -1,5 +1,9 @@
#!/usr/bin/env python3
#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Determine dependencies of python scripts or available python modules in a search path.
#
# Given the -d argument and a filename/filenames, returns the modules imported by those files.
@@ -9,7 +13,8 @@
import argparse
import ast
-import imp
+import importlib
+from importlib import machinery
import logging
import os.path
import sys
@@ -17,10 +22,7 @@ import sys
logger = logging.getLogger('pythondeps')
-suffixes = []
-for triple in imp.get_suffixes():
- suffixes.append(triple[0])
-
+suffixes = importlib.machinery.all_suffixes()
class PythonDepError(Exception):
pass
diff --git a/scripts/recipetool b/scripts/recipetool
index 3a3c9b7445..e2d585d2c5 100755
--- a/scripts/recipetool
+++ b/scripts/recipetool
@@ -4,18 +4,8 @@
#
# Copyright (C) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
diff --git a/scripts/relocate_sdk.py b/scripts/relocate_sdk.py
index c752fa2c61..8a728720ba 100755
--- a/scripts/relocate_sdk.py
+++ b/scripts/relocate_sdk.py
@@ -2,18 +2,7 @@
#
# Copyright (c) 2012 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION
# This script is called by the SDK installer script. It replaces the dynamic
@@ -41,9 +30,16 @@ else:
old_prefix = re.compile(b("##DEFAULT_INSTALL_DIR##"))
def get_arch():
+ global endian_prefix
f.seek(0)
e_ident =f.read(16)
- ei_mag0,ei_mag1_3,ei_class = struct.unpack("<B3sB11x", e_ident)
+ ei_mag0,ei_mag1_3,ei_class,ei_data,ei_version = struct.unpack("<B3sBBB9x", e_ident)
+
+ # ei_data = 1 for little-endian & 0 for big-endian
+ if ei_data == 1:
+ endian_prefix = '<'
+ else:
+ endian_prefix = '>'
if (ei_mag0 != 0x7f and ei_mag1_3 != "ELF") or ei_class == 0:
return 0
@@ -62,11 +58,11 @@ def parse_elf_header():
if arch == 32:
# 32bit
- hdr_fmt = "<HHILLLIHHHHHH"
+ hdr_fmt = endian_prefix + "HHILLLIHHHHHH"
hdr_size = 52
else:
# 64bit
- hdr_fmt = "<HHIQQQIHHHHHH"
+ hdr_fmt = endian_prefix + "HHIQQQIHHHHHH"
hdr_size = 64
e_type, e_machine, e_version, e_entry, e_phoff, e_shoff, e_flags,\
@@ -75,9 +71,9 @@ def parse_elf_header():
def change_interpreter(elf_file_name):
if arch == 32:
- ph_fmt = "<IIIIIIII"
+ ph_fmt = endian_prefix + "IIIIIIII"
else:
- ph_fmt = "<IIQQQQQQ"
+ ph_fmt = endian_prefix + "IIQQQQQQ"
""" look for PT_INTERP section """
for i in range(0,e_phnum):
@@ -108,25 +104,26 @@ def change_interpreter(elf_file_name):
if (len(new_dl_path) >= p_filesz):
print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
% (elf_file_name, p_memsz, len(new_dl_path) + 1))
- break
+ return False
dl_path = new_dl_path + b("\0") * (p_filesz - len(new_dl_path))
f.seek(p_offset)
f.write(dl_path)
break
+ return True
def change_dl_sysdirs(elf_file_name):
if arch == 32:
- sh_fmt = "<IIIIIIIIII"
+ sh_fmt = endian_prefix + "IIIIIIIIII"
else:
- sh_fmt = "<IIQQQQIIQQ"
+ sh_fmt = endian_prefix + "IIQQQQIIQQ"
""" read section string table """
f.seek(e_shoff + e_shstrndx * e_shentsize)
sh_hdr = f.read(e_shentsize)
if arch == 32:
- sh_offset, sh_size = struct.unpack("<16xII16x", sh_hdr)
+ sh_offset, sh_size = struct.unpack(endian_prefix + "16xII16x", sh_hdr)
else:
- sh_offset, sh_size = struct.unpack("<24xQQ24x", sh_hdr)
+ sh_offset, sh_size = struct.unpack(endian_prefix + "24xQQ24x", sh_hdr)
f.seek(sh_offset)
sh_strtab = f.read(sh_size)
@@ -226,6 +223,7 @@ else:
executables_list = sys.argv[3:]
+errors = False
for e in executables_list:
perms = os.stat(e)[stat.ST_MODE]
if os.access(e, os.W_OK|os.R_OK):
@@ -251,7 +249,8 @@ for e in executables_list:
arch = get_arch()
if arch:
parse_elf_header()
- change_interpreter(e)
+ if not change_interpreter(e):
+ errors = True
change_dl_sysdirs(e)
""" change permissions back """
@@ -264,3 +263,6 @@ for e in executables_list:
print("New file size for %s is different. Looks like a relocation error!", e)
sys.exit(-1)
+if errors:
+ print("Relocation of one or more executables failed.")
+ sys.exit(-1)
diff --git a/scripts/resulttool b/scripts/resulttool
new file mode 100755
index 0000000000..fc282bda6c
--- /dev/null
+++ b/scripts/resulttool
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+#
+# test results tool - tool for manipulating OEQA test result json files
+# (merge results, summarise results, regression analysis, generate manual test results file)
+#
+# To look for help information.
+# $ resulttool
+#
+# To store test results from oeqa automated tests, execute the below
+# $ resulttool store <source_dir> <git_branch>
+#
+# To merge test results, execute the below
+# $ resulttool merge <base_result_file> <target_result_file>
+#
+# To report test report, execute the below
+# $ resulttool report <source_dir>
+#
+# To perform regression file analysis, execute the below
+# $ resulttool regression-file <base_result_file> <target_result_file>
+#
+# To execute manual test cases, execute the below
+# $ resulttool manualexecution <manualjsonfile>
+#
+# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
+import os
+import sys
+import argparse
+import logging
+script_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = script_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+import resulttool.merge
+import resulttool.store
+import resulttool.regression
+import resulttool.report
+import resulttool.manualexecution
+import resulttool.log
+logger = scriptutils.logger_create('resulttool')
+
+def main():
+ parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.",
+ epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+ parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
+ parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
+ subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+ subparsers.required = True
+ subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
+ resulttool.manualexecution.register_commands(subparsers)
+ subparsers.add_subparser_group('setup', 'setup', 200)
+ resulttool.merge.register_commands(subparsers)
+ resulttool.store.register_commands(subparsers)
+ subparsers.add_subparser_group('analysis', 'analysis', 100)
+ resulttool.regression.register_commands(subparsers)
+ resulttool.report.register_commands(subparsers)
+ resulttool.log.register_commands(subparsers)
+
+ args = parser.parse_args()
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+ elif args.quiet:
+ logger.setLevel(logging.ERROR)
+
+ try:
+ ret = args.func(args, logger)
+ except argparse_oe.ArgumentUsageError as ae:
+ parser.error_subcommand(ae.message, ae.subcommand)
+ return ret
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/scripts/rpm2cpio.sh b/scripts/rpm2cpio.sh
index cf23472ba9..8199b43784 100755
--- a/scripts/rpm2cpio.sh
+++ b/scripts/rpm2cpio.sh
@@ -1,5 +1,4 @@
#!/bin/sh -efu
-
# This file comes from rpm 4.x distribution
fatal() {
@@ -8,7 +7,7 @@ fatal() {
}
pkg="$1"
-[ -n "$pkg" -a -e "$pkg" ] ||
+[ -n "$pkg" ] && [ -e "$pkg" ] ||
fatal "No package supplied"
_dd() {
@@ -17,13 +16,23 @@ _dd() {
}
calcsize() {
+
+ case "$(_dd $1 bs=4 count=1 | tr -d '\0')" in
+ "$(printf '\216\255\350')"*) ;; # '\x8e\xad\xe8'
+ *) fatal "File doesn't look like rpm: $pkg" ;;
+ esac
+
offset=$(($1 + 8))
local i b b0 b1 b2 b3 b4 b5 b6 b7
i=0
while [ $i -lt 8 ]; do
- b="$(_dd $(($offset + $i)) bs=1 count=1)"
+ # add . to not loose \n
+ # strip \0 as it gets dropped with warning otherwise
+ b="$(_dd $(($offset + $i)) bs=1 count=1 | tr -d '\0' ; echo .)"
+ b=${b%.} # strip . again
+
[ -z "$b" ] &&
b="0" ||
b="$(exec printf '%u\n' "'$b")"
@@ -35,7 +44,7 @@ calcsize() {
offset=$(($offset + $rsize))
}
-case "$(_dd 0 bs=8 count=1)" in
+case "$(_dd 0 bs=4 count=1 | tr -d '\0')" in
"$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
*) fatal "File doesn't look like rpm: $pkg" ;;
esac
@@ -46,10 +55,11 @@ sigsize=$rsize
calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
hdrsize=$rsize
-case "$(_dd $offset bs=3 count=1)" in
- "$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
- "$(printf '\037\213')"*) _dd $offset | gunzip ;; # '\x1f\x8b'
- "$(printf '\375\067')"*) _dd $offset | xzcat ;; # '\xfd\x37'
- "$(printf '\135\000')"*) _dd $offset | unlzma ;; # '\x5d\x00'
- *) fatal "Unrecognized rpm file: $pkg" ;;
+case "$(_dd $offset bs=2 count=1 | tr -d '\0')" in
+ "$(printf '\102\132')") _dd $offset | bunzip2 ;; # '\x42\x5a'
+ "$(printf '\037\213')") _dd $offset | gunzip ;; # '\x1f\x8b'
+ "$(printf '\375\067')") _dd $offset | xzcat ;; # '\xfd\x37'
+ "$(printf '\135')") _dd $offset | unlzma ;; # '\x5d\x00'
+ "$(printf '\050\265')") _dd $offset | unzstd ;; # '\x28\xb5'
+ *) fatal "Unrecognized payload compression format in rpm file: $pkg" ;;
esac
diff --git a/scripts/runqemu b/scripts/runqemu
index 73d7d5818b..69cd44864e 100755
--- a/scripts/runqemu
+++ b/scripts/runqemu
@@ -5,18 +5,8 @@
# Copyright (C) 2006-2011 Linux Foundation
# Copyright (c) 2016 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
@@ -28,6 +18,7 @@ import shutil
import glob
import configparser
import signal
+import time
class RunQemuError(Exception):
"""Custom exception to raise on known errors."""
@@ -69,26 +60,38 @@ def print_usage():
Usage: you can run this script with any valid combination
of the following environment variables (in any order):
KERNEL - the kernel image file to use
+ BIOS - the bios image file to use
ROOTFS - the rootfs image file or nfsroot directory to use
DEVICE_TREE - the device tree blob to use
MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified)
Simplified QEMU command-line options can be passed with:
nographic - disable video console
+ nonetwork - disable network connectivity
+ novga - Disable VGA emulation completely
+ sdl - choose the SDL UI frontend
+ gtk - choose the Gtk UI frontend
+ gl - enable virgl-based GL acceleration (also needs gtk or sdl options)
+ gl-es - enable virgl-based GL acceleration, using OpenGL ES (also needs gtk or sdl options)
+ egl-headless - enable headless EGL output; use vnc (via publicvnc option) or spice to see it
+ (hint: if /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create
+ one suitable for mesa llvmpipe software renderer)
serial - enable a serial console on /dev/ttyS0
- slirp - enable user networking, no root privileges is required
+ serialstdio - enable a serial console on the console (regardless of graphics mode)
+ slirp - enable user networking, no root privilege is required
+ snapshot - don't write changes back to images
kvm - enable KVM when running x86/x86_64 (VT-capable CPU required)
kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
publicvnc - enable a VNC server open to all hosts
audio - enable audio
+ guestagent - enable guest agent communication
+ qmp=<path> - create a QMP socket (defaults to unix:qmp.sock if unspecified)
[*/]ovmf* - OVMF firmware file or base name for booting with UEFI
tcpserial=<port> - specify tcp serial port number
- biosdir=<dir> - specify custom bios dir
- biosfilename=<filename> - specify bios filename
qemuparams=<xyz> - specify custom parameters to QEMU
bootparams=<xyz> - specify custom kernel parameters during boot
help, -h, --help: print this text
-d, --debug: Enable debug output
- -q, --quite: Hide most output except error messages
+ -q, --quiet: Hide most output except error messages
Examples:
runqemu
@@ -98,11 +101,13 @@ Examples:
runqemu qemux86-64 core-image-sato ext4
runqemu qemux86-64 wic-image-minimal wic
runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
- runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz...
+ runqemu qemux86 iso/hddimg/wic.vmdk/wic.vhd/wic.vhdx/wic.qcow2/wic.vdi/ramfs/cpio.gz...
runqemu qemux86 qemuparams="-m 256"
runqemu qemux86 bootparams="psplash=false"
runqemu path/to/<image>-<machine>.wic
runqemu path/to/<image>-<machine>.wic.vmdk
+ runqemu path/to/<image>-<machine>.wic.vhdx
+ runqemu path/to/<image>-<machine>.wic.vhd
""")
def check_tun():
@@ -114,62 +119,16 @@ def check_tun():
if not os.access(dev_tun, os.W_OK):
raise RunQemuError("TUN control device %s is not writable, please fix (e.g. sudo chmod 666 %s)" % (dev_tun, dev_tun))
-def check_libgl(qemu_bin):
- cmd = 'ldd %s' % qemu_bin
- logger.debug('Running %s...' % cmd)
- need_gl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
- if re.search('libGLU', need_gl):
- # We can't run without a libGL.so
- libgl = False
- check_files = (('/usr/lib/libGL.so', '/usr/lib/libGLU.so'), \
- ('/usr/lib64/libGL.so', '/usr/lib64/libGLU.so'), \
- ('/usr/lib/*-linux-gnu/libGL.so', '/usr/lib/*-linux-gnu/libGLU.so'))
-
- for (f1, f2) in check_files:
- if re.search('\*', f1):
- for g1 in glob.glob(f1):
- if libgl:
- break
- if os.path.exists(g1):
- for g2 in glob.glob(f2):
- if os.path.exists(g2):
- libgl = True
- break
- if libgl:
- break
- else:
- if os.path.exists(f1) and os.path.exists(f2):
- libgl = True
- break
- if not libgl:
- logger.error("You need libGL.so and libGLU.so to exist in your library path to run the QEMU emulator.")
- logger.error("Ubuntu package names are: libgl1-mesa-dev and libglu1-mesa-dev.")
- logger.error("Fedora package names are: mesa-libGL-devel mesa-libGLU-devel.")
- raise RunQemuError('%s requires libGLU, but not found' % qemu_bin)
-
-def get_first_file(cmds):
- """Return first file found in wildcard cmds"""
- for cmd in cmds:
- all_files = glob.glob(cmd)
+def get_first_file(globs):
+ """Return first file found in wildcard globs"""
+ for g in globs:
+ all_files = glob.glob(g)
if all_files:
for f in all_files:
if not os.path.isdir(f):
return f
return ''
-def check_free_port(host, port):
- """ Check whether the port is free or not """
- import socket
- from contextlib import closing
-
- with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
- if sock.connect_ex((host, port)) == 0:
- # Port is open, so not free
- return False
- else:
- # Port is not open, so free
- return True
-
class BaseConfig(object):
def __init__(self):
# The self.d saved vars from self.set(), part of them are from qemuboot.conf
@@ -180,27 +139,33 @@ class BaseConfig(object):
self.env_vars = ('MACHINE',
'ROOTFS',
'KERNEL',
+ 'BIOS',
'DEVICE_TREE',
'DEPLOY_DIR_IMAGE',
'OE_TMPDIR',
'OECORE_NATIVE_SYSROOT',
+ 'MULTICONFIG',
+ 'SERIAL_CONSOLES',
)
self.qemu_opt = ''
self.qemu_opt_script = ''
- self.clean_nfs_dir = False
+ self.qemuparams = ''
self.nfs_server = ''
self.rootfs = ''
# File name(s) of a OVMF firmware file or variable store,
# to be added with -drive if=pflash.
# Found in the same places as the rootfs, with or without one of
# these suffices: qcow2, bin.
- # Setting one also adds "-vga std" because that is all that
- # OVMF supports.
self.ovmf_bios = []
+ # When enrolling default Secure Boot keys, the hypervisor
+ # must provide the Platform Key and the first Key Exchange Key
+ # certificate in the Type 11 SMBIOS table.
+ self.ovmf_secboot_pkkek1 = ''
self.qemuboot = ''
self.qbconfload = False
self.kernel = ''
+ self.bios = ''
self.kernel_cmdline = ''
self.kernel_cmdline_script = ''
self.bootparams = ''
@@ -209,23 +174,39 @@ class BaseConfig(object):
self.kvm_enabled = False
self.vhost_enabled = False
self.slirp_enabled = False
+ self.net_bridge = None
self.nfs_instance = 0
self.nfs_running = False
+ self.serialconsole = False
self.serialstdio = False
+ self.nographic = False
+ self.nonetwork = False
+ self.sdl = False
+ self.gtk = False
+ self.gl = False
+ self.gl_es = False
+ self.egl_headless = False
+ self.publicvnc = False
+ self.novga = False
self.cleantap = False
self.saved_stty = ''
self.audio_enabled = False
self.tcpserial_portnum = ''
- self.custombiosdir = ''
- self.lock = ''
- self.lock_descriptor = ''
+ self.taplock = ''
+ self.taplock_descriptor = None
+ self.portlocks = {}
self.bitbake_e = ''
self.snapshot = False
+ self.wictypes = ('wic', 'wic.vmdk', 'wic.qcow2', 'wic.vdi', "wic.vhd", "wic.vhdx")
self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs',
- 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz')
- self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'wic.vmdk',
- 'wic.qcow2', 'wic.vdi', 'iso')
+ 'cpio.gz', 'cpio', 'ramfs', 'tar.bz2', 'tar.gz',
+ 'squashfs', 'squashfs-xz', 'squashfs-lzo',
+ 'squashfs-lz4', 'squashfs-zst')
+ self.vmtypes = ('hddimg', 'iso')
+ self.fsinfo = {}
self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+ self.cmdline_ip_slirp = "ip=dhcp"
+ self.cmdline_ip_tap = "ip=192.168.7.@CLIENT@::192.168.7.@GATEWAY@:255.255.255.0::eth0:off:8.8.8.8 net.ifnames=0"
# Use different mac section for tap and slirp to avoid
# conflicts, e.g., when one is running with tap, the other is
# running with slirp.
@@ -235,30 +216,97 @@ class BaseConfig(object):
self.mac_tap = "52:54:00:12:34:"
self.mac_slirp = "52:54:00:12:35:"
# pid of the actual qemu process
- self.qemupid = None
+ self.qemu_environ = os.environ.copy()
+ self.qemuprocess = None
# avoid cleanup twice
self.cleaned = False
-
- def acquire_lock(self, error=True):
- logger.debug("Acquiring lockfile %s..." % self.lock)
+ # Files to cleanup after run
+ self.cleanup_files = []
+ self.qmp = None
+ self.guest_agent = False
+ self.guest_agent_sockpath = '/tmp/qga.sock'
+
+ def acquire_taplock(self, error=True):
+ logger.debug("Acquiring lockfile %s..." % self.taplock)
try:
- self.lock_descriptor = open(self.lock, 'w')
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ self.taplock_descriptor = open(self.taplock, 'w')
+ fcntl.flock(self.taplock_descriptor, fcntl.LOCK_EX|fcntl.LOCK_NB)
except Exception as e:
- msg = "Acquiring lockfile %s failed: %s" % (self.lock, e)
+ msg = "Acquiring lockfile %s failed: %s" % (self.taplock, e)
if error:
logger.error(msg)
else:
logger.info(msg)
- if self.lock_descriptor:
- self.lock_descriptor.close()
+ if self.taplock_descriptor:
+ self.taplock_descriptor.close()
+ self.taplock_descriptor = None
return False
return True
- def release_lock(self):
- fcntl.flock(self.lock_descriptor, fcntl.LOCK_UN)
- self.lock_descriptor.close()
- os.remove(self.lock)
+ def release_taplock(self):
+ if self.taplock_descriptor:
+ logger.debug("Releasing lockfile for tap device '%s'" % self.tap)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.taplock_descriptor, fcntl.LOCK_UN)
+ self.taplock_descriptor.close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(self.taplock)
+ self.taplock_descriptor = None
+
+ def check_free_port(self, host, port, lockdir):
+ """ Check whether the port is free or not """
+ import socket
+ from contextlib import closing
+
+ lockfile = os.path.join(lockdir, str(port) + '.lock')
+ if self.acquire_portlock(lockfile):
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ if sock.connect_ex((host, port)) == 0:
+ # Port is open, so not free
+ self.release_portlock(lockfile)
+ return False
+ else:
+ # Port is not open, so free
+ return True
+ else:
+ return False
+
+ def acquire_portlock(self, lockfile):
+ logger.debug("Acquiring lockfile %s..." % lockfile)
+ try:
+ portlock_descriptor = open(lockfile, 'w')
+ self.portlocks.update({lockfile: portlock_descriptor})
+ fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except Exception as e:
+ msg = "Acquiring lockfile %s failed: %s" % (lockfile, e)
+ logger.info(msg)
+ if lockfile in self.portlocks.keys() and self.portlocks[lockfile]:
+ self.portlocks[lockfile].close()
+ del self.portlocks[lockfile]
+ return False
+ return True
+
+ def release_portlock(self, lockfile=None):
+ if lockfile != None:
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(self.portlocks[lockfile], fcntl.LOCK_UN)
+ self.portlocks[lockfile].close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
+ del self.portlocks[lockfile]
+ elif len(self.portlocks):
+ for lockfile, descriptor in self.portlocks.items():
+ logger.debug("Releasing lockfile '%s'" % lockfile)
+ # We pass the fd to the qemu process and if we unlock here, it would unlock for
+ # that too. Therefore don't unlock, just close
+ # fcntl.flock(descriptor, fcntl.LOCK_UN)
+ descriptor.close()
+ # Removing the file is a potential race, don't do that either
+ # os.remove(lockfile)
+ self.portlocks = {}
def get(self, key):
if key in self.d:
@@ -285,8 +333,8 @@ class BaseConfig(object):
def check_arg_fstype(self, fst):
"""Check and set FSTYPE"""
- if fst not in self.fstypes + self.vmtypes:
- logger.warn("Maybe unsupported FSTYPE: %s" % fst)
+ if fst not in self.fstypes + self.vmtypes + self.wictypes:
+ logger.warning("Maybe unsupported FSTYPE: %s" % fst)
if not self.fstype or self.fstype == fst:
if fst == 'ramfs':
fst = 'cpio.gz'
@@ -315,21 +363,21 @@ class BaseConfig(object):
def check_arg_path(self, p):
"""
- Check whether it is <image>.qemuboot.conf or contains <image>.qemuboot.conf
- - Check whether is a kernel file
- - Check whether is a image file
- - Check whether it is a nfs dir
- - Check whether it is a OVMF flash file
+ - Check whether it is a kernel file
+ - Check whether it is an image file
+ - Check whether it is an NFS dir
+ - Check whether it is an OVMF flash file
"""
if p.endswith('.qemuboot.conf'):
self.qemuboot = p
self.qbconfload = True
- elif re.search('\.bin$', p) or re.search('bzImage', p) or \
+ elif re.search('\\.bin$', p) or re.search('bzImage', p) or \
re.search('zImage', p) or re.search('vmlinux', p) or \
re.search('fitImage', p) or re.search('uImage', p):
self.kernel = p
- elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
+ elif os.path.isfile(p) and ('-image-' in os.path.basename(p) or '.rootfs.' in os.path.basename(p)):
self.rootfs = p
- # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+ # Check filename against self.fstypes can handle <file>.cpio.gz,
# otherwise, its type would be "gz", which is incorrect.
fst = ""
for t in self.fstypes:
@@ -337,18 +385,24 @@ class BaseConfig(object):
fst = t
break
if not fst:
- m = re.search('.*\.(.*)$', self.rootfs)
+ m = re.search('.*\\.(.*)$', self.rootfs)
if m:
fst = m.group(1)
if fst:
self.check_arg_fstype(fst)
- qb = re.sub('\.' + fst + "$", '', self.rootfs)
- qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
+ qb = re.sub('\\.' + fst + "$", '.qemuboot.conf', self.rootfs)
if os.path.exists(qb):
self.qemuboot = qb
self.qbconfload = True
else:
- logger.warn("%s doesn't exist" % qb)
+ logger.warning("%s doesn't exist, will try to remove '.rootfs' from filename" % qb)
+ # They to remove .rootfs (IMAGE_NAME_SUFFIX) as well
+ qb = re.sub('\\.rootfs.qemuboot.conf$', '.qemuboot.conf', qb)
+ if os.path.exists(qb):
+ self.qemuboot = qb
+ self.qbconfload = True
+ else:
+ logger.warning("%s doesn't exist" % qb)
else:
raise RunQemuError("Can't find FSTYPE from: %s" % p)
@@ -382,6 +436,7 @@ class BaseConfig(object):
# are there other scenarios in which we need to support being
# invoked by bitbake?
deploy = self.get('DEPLOY_DIR_IMAGE')
+ image_link_name = self.get('IMAGE_LINK_NAME')
bbchild = deploy and self.get('OE_TMPDIR')
if bbchild:
self.set_machine_deploy_dir(arg, deploy)
@@ -392,9 +447,7 @@ class BaseConfig(object):
self.set("MACHINE", arg)
return
- cmd = 'MACHINE=%s bitbake -e' % arg
- logger.info('Running %s...' % cmd)
- self.bitbake_e = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ self.bitbake_e = self.run_bitbake_env(arg)
# bitbake -e doesn't report invalid MACHINE as an error, so
# let's check DEPLOY_DIR_IMAGE to make sure that it is a valid
# MACHINE.
@@ -408,6 +461,24 @@ class BaseConfig(object):
else:
logger.error("%s not a directory valid DEPLOY_DIR_IMAGE" % deploy_dir_image)
self.set("MACHINE", arg)
+ if not image_link_name:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
+
+ def set_dri_path(self):
+ drivers_path = os.path.join(self.bindir_native, '../lib/dri')
+ if not os.path.exists(drivers_path) or not os.listdir(drivers_path):
+ raise RunQemuError("""
+qemu has been built without opengl support and accelerated graphics support is not available.
+To enable it, add:
+DISTRO_FEATURES_NATIVE:append = " opengl"
+DISTRO_FEATURES_NATIVESDK:append = " opengl"
+to your build configuration.
+""")
+ self.qemu_environ['LIBGL_DRIVERS_PATH'] = drivers_path
def check_args(self):
for debug in ("-d", "--debug"):
@@ -420,15 +491,33 @@ class BaseConfig(object):
logger.setLevel(logging.ERROR)
sys.argv.remove(quiet)
+ if 'gl' not in sys.argv[1:] and 'gl-es' not in sys.argv[1:]:
+ self.qemu_environ['SDL_RENDER_DRIVER'] = 'software'
+ self.qemu_environ['SDL_FRAMEBUFFER_ACCELERATION'] = 'false'
+
unknown_arg = ""
for arg in sys.argv[1:]:
- if arg in self.fstypes + self.vmtypes:
+ if arg in self.fstypes + self.vmtypes + self.wictypes:
self.check_arg_fstype(arg)
elif arg == 'nographic':
- self.qemu_opt_script += ' -nographic'
- self.kernel_cmdline_script += ' console=ttyS0'
+ self.nographic = True
+ elif arg == "nonetwork":
+ self.nonetwork = True
+ elif arg == 'sdl':
+ self.sdl = True
+ elif arg == 'gtk':
+ self.gtk = True
+ elif arg == 'gl':
+ self.gl = True
+ elif arg == 'gl-es':
+ self.gl_es = True
+ elif arg == 'egl-headless':
+ self.egl_headless = True
+ elif arg == 'novga':
+ self.novga = True
elif arg == 'serial':
- self.kernel_cmdline_script += ' console=ttyS0'
+ self.serialconsole = True
+ elif arg == "serialstdio":
self.serialstdio = True
elif arg == 'audio':
logger.info("Enabling audio in qemu")
@@ -440,18 +529,25 @@ class BaseConfig(object):
self.vhost_enabled = True
elif arg == 'slirp':
self.slirp_enabled = True
+ elif arg.startswith('bridge='):
+ self.net_bridge = '%s' % arg[len('bridge='):]
elif arg == 'snapshot':
self.snapshot = True
elif arg == 'publicvnc':
+ self.publicvnc = True
self.qemu_opt_script += ' -vnc :0'
+ elif arg == 'guestagent':
+ self.guest_agent = True
+ elif arg == "qmp":
+ self.qmp = "unix:qmp.sock"
+ elif arg.startswith("qmp="):
+ self.qmp = arg[len('qmp='):]
+ elif arg.startswith('guestagent-sockpath='):
+ self.guest_agent_sockpath = '%s' % arg[len('guestagent-sockpath='):]
elif arg.startswith('tcpserial='):
- self.tcpserial_portnum = arg[len('tcpserial='):]
- elif arg.startswith('biosdir='):
- self.custombiosdir = arg[len('biosdir='):]
- elif arg.startswith('biosfilename='):
- self.qemu_opt_script += ' -bios %s' % arg[len('biosfilename='):]
+ self.tcpserial_portnum = '%s' % arg[len('tcpserial='):]
elif arg.startswith('qemuparams='):
- self.qemu_opt_script += ' %s' % arg[len('qemuparams='):]
+ self.qemuparams = ' %s' % arg[len('qemuparams='):]
elif arg.startswith('bootparams='):
self.bootparams = arg[len('bootparams='):]
elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
@@ -470,43 +566,48 @@ class BaseConfig(object):
"Try 'runqemu help' on how to use it" % \
(unknown_arg, arg))
# Check to make sure it is a valid machine
- if unknown_arg:
- if self.get('MACHINE') == unknown_arg:
- return
+ if unknown_arg and self.get('MACHINE') != unknown_arg:
if self.get('DEPLOY_DIR_IMAGE'):
machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE'))
if unknown_arg == machine:
self.set("MACHINE", machine)
- return
self.check_arg_machine(unknown_arg)
if not (self.get('DEPLOY_DIR_IMAGE') or self.qbconfload):
- self.load_bitbake_env()
+ self.load_bitbake_env(target=self.rootfs)
s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
if s:
self.set("DEPLOY_DIR_IMAGE", s.group(1))
+ if not self.get('IMAGE_LINK_NAME') and self.rootfs:
+ s = re.search('^IMAGE_LINK_NAME="(.*)"', self.bitbake_e, re.M)
+ if s:
+ image_link_name = s.group(1)
+ self.set("IMAGE_LINK_NAME", image_link_name)
+ logger.debug('Using IMAGE_LINK_NAME = "%s"' % image_link_name)
+
def check_kvm(self):
"""Check kvm and kvm-host"""
if not (self.kvm_enabled or self.vhost_enabled):
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU'), self.get('QB_SMP'))
return
if not self.get('QB_CPU_KVM'):
raise RunQemuError("QB_CPU_KVM is NULL, this board doesn't support kvm")
- self.qemu_opt_script += ' %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'))
+ self.qemu_opt_script += ' %s %s %s' % (self.get('QB_MACHINE'), self.get('QB_CPU_KVM'), self.get('QB_SMP'))
yocto_kvm_wiki = "https://wiki.yoctoproject.org/wiki/How_to_enable_KVM_for_Poky_qemu"
yocto_paravirt_kvm_wiki = "https://wiki.yoctoproject.org/wiki/Running_an_x86_Yocto_Linux_image_under_QEMU_KVM"
dev_kvm = '/dev/kvm'
dev_vhost = '/dev/vhost-net'
- with open('/proc/cpuinfo', 'r') as f:
- kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
- if not kvm_cap:
- logger.error("You are trying to enable KVM on a cpu without VT support.")
- logger.error("Remove kvm from the command-line, or refer:")
- raise RunQemuError(yocto_kvm_wiki)
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ with open('/proc/cpuinfo', 'r') as f:
+ kvm_cap = re.search('vmx|svm', "".join(f.readlines()))
+ if not kvm_cap:
+ logger.error("You are trying to enable KVM on a cpu without VT support.")
+ logger.error("Remove kvm from the command-line, or refer:")
+ raise RunQemuError(yocto_kvm_wiki)
if not os.path.exists(dev_kvm):
logger.error("Missing KVM device. Have you inserted kvm modules?")
@@ -515,11 +616,6 @@ class BaseConfig(object):
if os.access(dev_kvm, os.W_OK|os.R_OK):
self.qemu_opt_script += ' -enable-kvm'
- if self.get('MACHINE') == "qemux86":
- # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
- # See YOCTO #12301
- # On 64 bit we use x2apic
- self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
else:
logger.error("You have no read or write permission on /dev/kvm.")
logger.error("Please change the ownership of this file as described at:")
@@ -531,10 +627,10 @@ class BaseConfig(object):
logger.error("For further help see:")
raise RunQemuError(yocto_paravirt_kvm_wiki)
- if not os.access(dev_kvm, os.W_OK|os.R_OK):
+ if not os.access(dev_vhost, os.W_OK|os.R_OK):
logger.error("You have no read or write permission on /dev/vhost-net.")
logger.error("Please change the ownership of this file as described at:")
- raise RunQemuError(yocto_kvm_wiki)
+ raise RunQemuError(yocto_paravirt_kvm_wiki)
def check_fstype(self):
"""Check and setup FSTYPE"""
@@ -545,6 +641,40 @@ class BaseConfig(object):
else:
raise RunQemuError("FSTYPE is NULL!")
+ # parse QB_FSINFO into dict, e.g. { 'wic': ['no-kernel-in-fs', 'a-flag'], 'ext4': ['another-flag']}
+ wic_fs = False
+ qb_fsinfo = self.get('QB_FSINFO')
+ if qb_fsinfo:
+ qb_fsinfo = qb_fsinfo.split()
+ for fsinfo in qb_fsinfo:
+ try:
+ fstype, fsflag = fsinfo.split(':')
+
+ if fstype == 'wic':
+ if fsflag == 'no-kernel-in-fs':
+ wic_fs = True
+ elif fsflag == 'kernel-in-fs':
+ wic_fs = False
+ else:
+ logger.warning('Unknown flag "%s:%s" in QB_FSINFO', fstype, fsflag)
+ continue
+ else:
+ logger.warning('QB_FSINFO is not supported for image type "%s"', fstype)
+ continue
+
+ if fstype in self.fsinfo:
+ self.fsinfo[fstype].append(fsflag)
+ else:
+ self.fsinfo[fstype] = [fsflag]
+ except Exception:
+ logger.error('Invalid parameter "%s" in QB_FSINFO', fsinfo)
+
+ # treat wic images as vmimages (with kernel) or as fsimages (rootfs only)
+ if wic_fs:
+ self.fstypes = self.fstypes + self.wictypes
+ else:
+ self.vmtypes = self.vmtypes + self.wictypes
+
def check_rootfs(self):
"""Check and set rootfs"""
@@ -562,20 +692,37 @@ class BaseConfig(object):
if self.rootfs and not os.path.exists(self.rootfs):
# Lazy rootfs
- self.rootfs = "%s/%s-%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
- self.rootfs, self.get('MACHINE'),
+ self.rootfs = "%s/%s.%s" % (self.get('DEPLOY_DIR_IMAGE'),
+ self.get('IMAGE_LINK_NAME'),
self.fstype)
elif not self.rootfs:
- cmd_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
- cmd_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
- cmds = (cmd_name, cmd_link)
- self.rootfs = get_first_file(cmds)
+ glob_name = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_NAME'), self.fstype)
+ glob_link = '%s/%s*.%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'), self.fstype)
+ globs = (glob_name, glob_link)
+ self.rootfs = get_first_file(globs)
if not self.rootfs:
- raise RunQemuError("Failed to find rootfs: %s or %s" % cmds)
+ raise RunQemuError("Failed to find rootfs: %s or %s" % globs)
if not os.path.exists(self.rootfs):
raise RunQemuError("Can't find rootfs: %s" % self.rootfs)
+ def setup_pkkek1(self):
+ """
+ Extract from PEM certificate the Platform Key and first Key
+ Exchange Key certificate string. The hypervisor needs to provide
+ it in the Type 11 SMBIOS table
+ """
+ pemcert = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), 'OvmfPkKek1.pem')
+ try:
+ with open(pemcert, 'r') as pemfile:
+ key = pemfile.read().replace('\n', ''). \
+ replace('-----BEGIN CERTIFICATE-----', ''). \
+ replace('-----END CERTIFICATE-----', '')
+ self.ovmf_secboot_pkkek1 = key
+
+ except FileNotFoundError:
+ raise RunQemuError("Can't open PEM certificate %s " % pemcert)
+
def check_ovmf(self):
"""Check and set full path for OVMF firmware and variable file(s)."""
@@ -586,6 +733,8 @@ class BaseConfig(object):
path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
if os.path.exists(path):
self.ovmf_bios[index] = path
+ if ovmf.endswith('secboot'):
+ self.setup_pkkek1()
break
else:
raise RunQemuError("Can't find OVMF firmware: %s" % ovmf)
@@ -612,10 +761,10 @@ class BaseConfig(object):
kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
- cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
- self.kernel = get_first_file(cmds)
+ globs = (kernel_match_name, kernel_match_link, kernel_startswith)
+ self.kernel = get_first_file(globs)
if not self.kernel:
- raise RunQemuError('KERNEL not found: %s, %s or %s' % cmds)
+ raise RunQemuError('KERNEL not found: %s, %s or %s' % globs)
if not os.path.exists(self.kernel):
raise RunQemuError("KERNEL %s not found" % self.kernel)
@@ -632,79 +781,106 @@ class BaseConfig(object):
dtb = self.get('QB_DTB')
if dtb:
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
- cmd_match = "%s/%s" % (deploy_dir_image, dtb)
- cmd_startswith = "%s/%s*" % (deploy_dir_image, dtb)
- cmd_wild = "%s/*.dtb" % deploy_dir_image
- cmds = (cmd_match, cmd_startswith, cmd_wild)
- self.dtb = get_first_file(cmds)
+ glob_match = "%s/%s" % (deploy_dir_image, dtb)
+ glob_startswith = "%s/%s*" % (deploy_dir_image, dtb)
+ glob_wild = "%s/*.dtb" % deploy_dir_image
+ globs = (glob_match, glob_startswith, glob_wild)
+ self.dtb = get_first_file(globs)
if not os.path.exists(self.dtb):
- raise RunQemuError('DTB not found: %s, %s or %s' % cmds)
+ raise RunQemuError('DTB not found: %s, %s or %s' % globs)
+
+ def check_bios(self):
+ """Check and set bios"""
+
+ # See if the user supplied a BIOS option
+ if self.get('BIOS'):
+ self.bios = self.get('BIOS')
- def check_biosdir(self):
- """Check custombiosdir"""
- if not self.custombiosdir:
+ # QB_DEFAULT_BIOS is always a full file path
+ bios_name = os.path.basename(self.get('QB_DEFAULT_BIOS'))
+
+ # The user didn't want a bios to be loaded
+ if (bios_name == "" or bios_name == "none") and not self.bios:
return
- biosdir = ""
- biosdir_native = "%s/%s" % (self.get('STAGING_DIR_NATIVE'), self.custombiosdir)
- biosdir_host = "%s/%s" % (self.get('STAGING_DIR_HOST'), self.custombiosdir)
- for i in (self.custombiosdir, biosdir_native, biosdir_host):
- if os.path.isdir(i):
- biosdir = i
- break
+ if not self.bios:
+ deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
+ self.bios = "%s/%s" % (deploy_dir_image, bios_name)
+
+ if not self.bios:
+ raise RunQemuError('BIOS not found: %s' % bios_match_name)
+
+ if not os.path.exists(self.bios):
+ raise RunQemuError("BIOS %s not found" % self.bios)
- if biosdir:
- logger.debug("Assuming biosdir is: %s" % biosdir)
- self.qemu_opt_script += ' -L %s' % biosdir
- else:
- logger.error("Custom BIOS directory not found. Tried: %s, %s, and %s" % (self.custombiosdir, biosdir_native, biosdir_host))
- raise RunQemuError("Invalid custombiosdir: %s" % self.custombiosdir)
def check_mem(self):
- s = re.search('-m +([0-9]+)', self.qemu_opt_script)
+ """
+ Both qemu and kernel needs memory settings, so check QB_MEM and set it
+ for both.
+ """
+ s = re.search('-m +([0-9]+)', self.qemuparams)
if s:
self.set('QB_MEM', '-m %s' % s.group(1))
elif not self.get('QB_MEM'):
- logger.info('QB_MEM is not set, use 512M by default')
- self.set('QB_MEM', '-m 512')
+ logger.info('QB_MEM is not set, use 256M by default')
+ self.set('QB_MEM', '-m 256')
+
+ # Check and remove M or m suffix
+ qb_mem = self.get('QB_MEM')
+ if qb_mem.endswith('M') or qb_mem.endswith('m'):
+ qb_mem = qb_mem[:-1]
+
+ # Add -m prefix it not present
+ if not qb_mem.startswith('-m'):
+ qb_mem = '-m %s' % qb_mem
+
+ self.set('QB_MEM', qb_mem)
mach = self.get('MACHINE')
- if not mach.startswith('qemumips'):
+ if not mach.startswith(('qemumips', 'qemux86', 'qemuloongarch64')):
self.kernel_cmdline_script += ' mem=%s' % self.get('QB_MEM').replace('-m','').strip() + 'M'
self.qemu_opt_script += ' %s' % self.get('QB_MEM')
def check_tcpserial(self):
if self.tcpserial_portnum:
+ ports = self.tcpserial_portnum.split(':')
+ port = ports[0]
if self.get('QB_TCPSERIAL_OPT'):
- self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', self.tcpserial_portnum)
+ self.qemu_opt_script += ' ' + self.get('QB_TCPSERIAL_OPT').replace('@PORT@', port)
else:
- self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s' % self.tcpserial_portnum
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
+
+ if len(ports) > 1:
+ for port in ports[1:]:
+ self.qemu_opt_script += ' -serial tcp:127.0.0.1:%s,nodelay=on' % port
def check_and_set(self):
"""Check configs sanity and set when needed"""
self.validate_paths()
- if not self.slirp_enabled:
+ if not self.slirp_enabled and not self.net_bridge:
check_tun()
# Check audio
if self.audio_enabled:
if not self.get('QB_AUDIO_DRV'):
raise RunQemuError("QB_AUDIO_DRV is NULL, this board doesn't support audio")
if not self.get('QB_AUDIO_OPT'):
- logger.warn('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
+ logger.warning('QB_AUDIO_OPT is NULL, you may need define it to make audio work')
else:
self.qemu_opt_script += ' %s' % self.get('QB_AUDIO_OPT')
os.putenv('QEMU_AUDIO_DRV', self.get('QB_AUDIO_DRV'))
else:
os.putenv('QEMU_AUDIO_DRV', 'none')
+ self.check_qemu_system()
self.check_kvm()
self.check_fstype()
self.check_rootfs()
self.check_ovmf()
self.check_kernel()
self.check_dtb()
- self.check_biosdir()
+ self.check_bios()
self.check_mem()
self.check_tcpserial()
@@ -713,7 +889,7 @@ class BaseConfig(object):
if self.get('DEPLOY_DIR_IMAGE'):
deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
else:
- logger.warn("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
+ logger.warning("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
return
if self.rootfs and not os.path.exists(self.rootfs):
@@ -721,8 +897,10 @@ class BaseConfig(object):
machine = self.get('MACHINE')
if not machine:
machine = os.path.basename(deploy_dir_image)
- self.qemuboot = "%s/%s-%s.qemuboot.conf" % (deploy_dir_image,
- self.rootfs, machine)
+ if not self.get('IMAGE_LINK_NAME'):
+ raise RunQemuError("IMAGE_LINK_NAME wasn't set to find corresponding .qemuboot.conf file")
+ self.qemuboot = "%s/%s.qemuboot.conf" % (deploy_dir_image,
+ self.get('IMAGE_LINK_NAME'))
else:
cmd = 'ls -t %s/*.qemuboot.conf' % deploy_dir_image
logger.debug('Running %s...' % cmd)
@@ -813,43 +991,46 @@ class BaseConfig(object):
self.set('STAGING_BINDIR_NATIVE', '%s/usr/bin' % self.get('STAGING_DIR_NATIVE'))
def print_config(self):
- logger.info('Continuing with the following parameters:\n')
+ logoutput = ['Continuing with the following parameters:']
if not self.fstype in self.vmtypes:
- print('KERNEL: [%s]' % self.kernel)
+ logoutput.append('KERNEL: [%s]' % self.kernel)
+ if self.bios:
+ logoutput.append('BIOS: [%s]' % self.bios)
if self.dtb:
- print('DTB: [%s]' % self.dtb)
- print('MACHINE: [%s]' % self.get('MACHINE'))
- print('FSTYPE: [%s]' % self.fstype)
+ logoutput.append('DTB: [%s]' % self.dtb)
+ logoutput.append('MACHINE: [%s]' % self.get('MACHINE'))
+ try:
+ fstype_flags = ' (' + ', '.join(self.fsinfo[self.fstype]) + ')'
+ except KeyError:
+ fstype_flags = ''
+ logoutput.append('FSTYPE: [%s%s]' % (self.fstype, fstype_flags))
if self.fstype == 'nfs':
- print('NFS_DIR: [%s]' % self.rootfs)
+ logoutput.append('NFS_DIR: [%s]' % self.rootfs)
else:
- print('ROOTFS: [%s]' % self.rootfs)
+ logoutput.append('ROOTFS: [%s]' % self.rootfs)
if self.ovmf_bios:
- print('OVMF: %s' % self.ovmf_bios)
- print('CONFFILE: [%s]' % self.qemuboot)
- print('')
+ logoutput.append('OVMF: %s' % self.ovmf_bios)
+ if (self.ovmf_secboot_pkkek1):
+ logoutput.append('SECBOOT PKKEK1: [%s...]' % self.ovmf_secboot_pkkek1[0:100])
+ logoutput.append('CONFFILE: [%s]' % self.qemuboot)
+ logoutput.append('')
+ logger.info('\n'.join(logoutput))
def setup_nfs(self):
if not self.nfs_server:
if self.slirp_enabled:
self.nfs_server = '10.0.2.2'
else:
- self.nfs_server = '192.168.7.1'
-
- # Figure out a new nfs_instance to allow multiple qemus running.
- # CentOS 7.1's ps doesn't print full command line without "ww"
- # when invoke by subprocess.Popen().
- cmd = "ps auxww"
- ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
- pattern = '/bin/unfsd .* -i .*\.pid -e .*/exports([0-9]+) '
- all_instances = re.findall(pattern, ps, re.M)
- if all_instances:
- all_instances.sort(key=int)
- self.nfs_instance = int(all_instances.pop()) + 1
-
- nfsd_port = 3049 + 2 * self.nfs_instance
- mountd_port = 3048 + 2 * self.nfs_instance
+ self.nfs_server = '192.168.7.@GATEWAY@'
+
+ nfsd_port = 3048 + self.nfs_instance
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+ while not self.check_free_port('localhost', nfsd_port, lockdir):
+ self.nfs_instance += 1
+ nfsd_port += 1
+ mountd_port = nfsd_port
# Export vars for runqemu-export-rootfs
export_dict = {
'NFS_INSTANCE': self.nfs_instance,
@@ -860,7 +1041,11 @@ class BaseConfig(object):
# Use '%s' since they are integers
os.putenv(k, '%s' % v)
- self.unfs_opts="nfsvers=3,port=%s,udp,mountport=%s" % (nfsd_port, mountd_port)
+ qb_nfsrootfs_extra_opt = self.get("QB_NFSROOTFS_EXTRA_OPT")
+ if qb_nfsrootfs_extra_opt and not qb_nfsrootfs_extra_opt.startswith(","):
+ qb_nfsrootfs_extra_opt = "," + qb_nfsrootfs_extra_opt
+
+ self.unfs_opts="nfsvers=3,port=%s,tcp,mountport=%s%s" % (nfsd_port, mountd_port, qb_nfsrootfs_extra_opt)
# Extract .tar.bz2 or .tar.bz if no nfs dir
if not (self.rootfs and os.path.isdir(self.rootfs)):
@@ -880,39 +1065,68 @@ class BaseConfig(object):
if not src:
raise RunQemuError("No NFS_DIR is set, and can't find %s or %s to extract" % (src1, src2))
logger.info('NFS_DIR not found, extracting %s to %s' % (src, dest))
- cmd = 'runqemu-extract-sdk %s %s' % (src, dest)
- logger.info('Running %s...' % cmd)
- if subprocess.call(cmd, shell=True) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
- self.clean_nfs_dir = True
+ cmd = ('runqemu-extract-sdk', src, dest)
+ logger.info('Running %s...' % str(cmd))
+ if subprocess.call(cmd) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.rootfs = dest
+ self.cleanup_files.append(self.rootfs)
+ self.cleanup_files.append('%s.pseudo_state' % self.rootfs)
# Start the userspace NFS server
- cmd = 'runqemu-export-rootfs start %s' % self.rootfs
- logger.info('Running %s...' % cmd)
- if subprocess.call(cmd, shell=True) != 0:
- raise RunQemuError('Failed to run %s' % cmd)
+ cmd = ('runqemu-export-rootfs', 'start', self.rootfs)
+ logger.info('Running %s...' % str(cmd))
+ if subprocess.call(cmd) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
self.nfs_running = True
+ def setup_cmd(self):
+ cmd = self.get('QB_SETUP_CMD')
+ if cmd != '':
+ logger.info('Running setup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
+
+ def setup_net_bridge(self):
+ self.set('NETWORK_CMD', '-netdev bridge,br=%s,id=net0,helper=%s -device virtio-net-pci,netdev=net0 ' % (
+ self.net_bridge, os.path.join(self.bindir_native, 'qemu-oe-bridge-helper')))
+
+ def make_lock_dir(self, lockdir):
+ if not os.path.exists(lockdir):
+ # There might be a race issue when multi runqemu processess are
+ # running at the same time.
+ try:
+ os.mkdir(lockdir)
+ os.chmod(lockdir, 0o777)
+ except FileExistsError:
+ pass
+ return
+
def setup_slirp(self):
"""Setup user networking"""
if self.fstype == 'nfs':
self.setup_nfs()
- self.kernel_cmdline_script += ' ip=dhcp'
+ netconf = " " + self.cmdline_ip_slirp
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
# Port mapping
- hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+ hostfwd = ",hostfwd=tcp:127.0.0.1:2222-:22,hostfwd=tcp:127.0.0.1:2323-:23"
qb_slirp_opt_default = "-netdev user,id=net0%s,tftp=%s" % (hostfwd, self.get('DEPLOY_DIR_IMAGE'))
qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
# Figure out the port
ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
ports = [int(i) for i in ports]
mac = 2
+
+ lockdir = "/tmp/qemu-port-locks"
+ self.make_lock_dir(lockdir)
+
# Find a free port to avoid conflicts
for p in ports[:]:
p_new = p
- while not check_free_port('localhost', p_new):
+ while not self.check_free_port('localhost', p_new, lockdir):
p_new += 1
mac += 1
while p_new in ports:
@@ -947,28 +1161,25 @@ class BaseConfig(object):
logger.error("ip: %s" % ip)
raise OEPathError("runqemu-ifup, runqemu-ifdown or ip not found")
- if not os.path.exists(lockdir):
- # There might be a race issue when multi runqemu processess are
- # running at the same time.
- try:
- os.mkdir(lockdir)
- os.chmod(lockdir, 0o777)
- except FileExistsError:
- pass
+ self.make_lock_dir(lockdir)
- cmd = '%s link' % ip
- logger.debug('Running %s...' % cmd)
- ip_link = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+ cmd = (ip, 'link')
+ logger.debug('Running %s...' % str(cmd))
+ ip_link = subprocess.check_output(cmd).decode('utf-8')
# Matches line like: 6: tap0: <foo>
- possibles = re.findall('^[0-9]+: +(tap[0-9]+): <.*', ip_link, re.M)
+ oe_tap_name = 'tap'
+ if 'OE_TAP_NAME' in os.environ:
+ oe_tap_name = os.environ['OE_TAP_NAME']
+ tap_re = '^[0-9]+: +(' + oe_tap_name + '[0-9]+): <.*'
+ possibles = re.findall(tap_re, ip_link, re.M)
tap = ""
for p in possibles:
lockfile = os.path.join(lockdir, p)
if os.path.exists('%s.skip' % lockfile):
logger.info('Found %s.skip, skipping %s' % (lockfile, p))
continue
- self.lock = lockfile + '.lock'
- if self.acquire_lock(error=False):
+ self.taplock = lockfile + '.lock'
+ if self.acquire_taplock(error=False):
tap = p
logger.info("Using preconfigured tap device %s" % tap)
logger.info("If this is not intended, touch %s.skip to make runqemu skip %s." %(lockfile, tap))
@@ -983,26 +1194,33 @@ class BaseConfig(object):
gid = os.getgid()
uid = os.getuid()
logger.info("Setting up tap interface under sudo")
- cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.bindir_native)
- tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n')
+ cmd = ('sudo', self.qemuifup, str(gid))
+ try:
+ tap = subprocess.check_output(cmd).decode('utf-8').strip()
+ except subprocess.CalledProcessError as e:
+ logger.error('Setting up tap device failed:\n%s\nRun runqemu-gen-tapdevs to manually create one.' % str(e))
+ sys.exit(1)
lockfile = os.path.join(lockdir, tap)
- self.lock = lockfile + '.lock'
- self.acquire_lock()
+ self.taplock = lockfile + '.lock'
+ self.acquire_taplock()
self.cleantap = True
logger.debug('Created tap: %s' % tap)
if not tap:
logger.error("Failed to setup tap device. Run runqemu-gen-tapdevs to manually create.")
- return 1
+ sys.exit(1)
self.tap = tap
- tapnum = int(tap[3:])
+ tapnum = int(tap[len(oe_tap_name):])
gateway = tapnum * 2 + 1
client = gateway + 1
if self.fstype == 'nfs':
self.setup_nfs()
- netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
- logger.info("Network configuration: %s", netconf)
- self.kernel_cmdline_script += " ip=%s" % netconf
+ netconf = " " + self.cmdline_ip_tap
+ netconf = netconf.replace('@CLIENT@', str(client))
+ netconf = netconf.replace('@GATEWAY@', str(gateway))
+ self.nfs_server = self.nfs_server.replace('@GATEWAY@', str(gateway))
+ logger.info("Network configuration:%s", netconf)
+ self.kernel_cmdline_script += netconf
mac = "%s%02x" % (self.mac_tap, client)
qb_tap_opt = self.get('QB_TAP_OPT')
if qb_tap_opt:
@@ -1016,14 +1234,19 @@ class BaseConfig(object):
self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
def setup_network(self):
- if self.get('QB_NET') == 'none':
+ if self.nonetwork or self.get('QB_NET') == 'none':
+ self.set('NETWORK_CMD', '-nic none')
return
if sys.stdin.isatty():
- self.saved_stty = subprocess.check_output("stty -g", shell=True).decode('utf-8')
+ self.saved_stty = subprocess.check_output(("stty", "-g")).decode('utf-8').strip()
self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
- if self.slirp_enabled:
+ if self.net_bridge:
+ self.setup_net_bridge()
+ elif self.slirp_enabled:
+ self.cmdline_ip_slirp = self.get('QB_CMDLINE_IP_SLIRP') or self.cmdline_ip_slirp
self.setup_slirp()
else:
+ self.cmdline_ip_tap = self.get('QB_CMDLINE_IP_TAP') or self.cmdline_ip_tap
self.setup_tap()
def setup_rootfs(self):
@@ -1031,7 +1254,19 @@ class BaseConfig(object):
return
if 'wic.' in self.fstype:
self.fstype = self.fstype[4:]
- rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
+ rootfs_format = self.fstype if self.fstype in ('vmdk', 'vhd', 'vhdx', 'qcow2', 'vdi') else 'raw'
+
+ tmpfsdir = os.environ.get("RUNQEMU_TMPFS_DIR", None)
+ if self.snapshot and tmpfsdir:
+ newrootfs = os.path.join(tmpfsdir, os.path.basename(self.rootfs)) + "." + str(os.getpid())
+ logger.info("Copying rootfs to %s" % newrootfs)
+ copy_start = time.time()
+ shutil.copyfile(self.rootfs, newrootfs)
+ logger.info("Copy done in %s seconds" % (time.time() - copy_start))
+ self.rootfs = newrootfs
+ # Don't need a second copy now!
+ self.snapshot = False
+ self.cleanup_files.append(newrootfs)
qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
if qb_rootfs_opt:
@@ -1039,6 +1274,10 @@ class BaseConfig(object):
else:
self.rootfs_options = '-drive file=%s,if=virtio,format=%s' % (self.rootfs, rootfs_format)
+ qb_rootfs_extra_opt = self.get("QB_ROOTFS_EXTRA_OPT")
+ if qb_rootfs_extra_opt and not qb_rootfs_extra_opt.startswith(","):
+ qb_rootfs_extra_opt = "," + qb_rootfs_extra_opt
+
if self.fstype in ('cpio.gz', 'cpio'):
self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
self.rootfs_options = '-initrd %s' % self.rootfs
@@ -1051,28 +1290,40 @@ class BaseConfig(object):
drive_type = self.get('QB_DRIVE_TYPE')
if drive_type.startswith("/dev/sd"):
logger.info('Using scsi drive')
- vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
- % (self.rootfs, rootfs_format)
+ vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd%s' \
+ % (self.rootfs, rootfs_format, qb_rootfs_extra_opt)
elif drive_type.startswith("/dev/hd"):
logger.info('Using ide drive')
vm_drive = "-drive file=%s,format=%s" % (self.rootfs, rootfs_format)
+ elif drive_type.startswith("/dev/vdb"):
+ logger.info('Using block virtio drive');
+ vm_drive = '-drive id=disk0,file=%s,if=none,format=%s -device virtio-blk-device,drive=disk0%s' \
+ % (self.rootfs, rootfs_format,qb_rootfs_extra_opt)
else:
# virtio might have been selected explicitly (just use it), or
# is used as fallback (then warn about that).
if not drive_type.startswith("/dev/vd"):
- logger.warn("Unknown QB_DRIVE_TYPE: %s" % drive_type)
- logger.warn("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
- logger.warn('Trying to use virtio block drive')
+ logger.warning("Unknown QB_DRIVE_TYPE: %s" % drive_type)
+ logger.warning("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
+ logger.warning('Trying to use virtio block drive')
vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
# All branches above set vm_drive.
- self.rootfs_options = '%s -no-reboot' % vm_drive
- self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
+ self.rootfs_options = vm_drive
+ if not self.fstype in self.vmtypes:
+ self.rootfs_options += ' -no-reboot'
+
+ # By default, ' rw' is appended to QB_KERNEL_ROOT unless either ro or rw is explicitly passed.
+ qb_kernel_root = self.get('QB_KERNEL_ROOT')
+ qb_kernel_root_l = qb_kernel_root.split()
+ if not ('ro' in qb_kernel_root_l or 'rw' in qb_kernel_root_l):
+ qb_kernel_root += ' rw'
+ self.kernel_cmdline = 'root=%s' % qb_kernel_root
if self.fstype == 'nfs':
self.rootfs_options = ''
k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, os.path.abspath(self.rootfs), self.unfs_opts)
- self.kernel_cmdline = 'root=%s rw highres=off' % k_root
+ self.kernel_cmdline = 'root=%s rw' % k_root
if self.fstype == 'none':
self.rootfs_options = ''
@@ -1083,7 +1334,7 @@ class BaseConfig(object):
"""attempt to determine the appropriate qemu-system binary"""
mach = self.get('MACHINE')
if not mach:
- search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
+ search = '.*(qemux86-64|qemux86|qemuarm64|qemuarm|qemuloongarch64|qemumips64|qemumips64el|qemumipsel|qemumips|qemuppc).*'
if self.rootfs:
match = re.match(search, self.rootfs)
if match:
@@ -1106,6 +1357,8 @@ class BaseConfig(object):
qbsys = 'x86_64'
elif mach == 'qemuppc':
qbsys = 'ppc'
+ elif mach == 'qemuloongarch64':
+ qbsys = 'loongarch64'
elif mach == 'qemumips':
qbsys = 'mips'
elif mach == 'qemumips64':
@@ -1118,24 +1371,151 @@ class BaseConfig(object):
qbsys = 'riscv64'
elif mach == 'qemuriscv32':
qbsys = 'riscv32'
+ else:
+ logger.error("Unable to determine QEMU PC System emulator for %s machine." % mach)
+ logger.error("As %s is not among valid QEMU machines such as," % mach)
+ logger.error("qemux86-64, qemux86, qemuarm64, qemuarm, qemumips64, qemumips64el, qemumipsel, qemumips, qemuppc")
+ raise RunQemuError("Set qb_system_name with suitable QEMU PC System emulator in .*qemuboot.conf.")
return 'qemu-system-%s' % qbsys
- def setup_final(self):
+ def check_qemu_system(self):
qemu_system = self.get('QB_SYSTEM_NAME')
if not qemu_system:
qemu_system = self.guess_qb_system()
if not qemu_system:
raise RunQemuError("Failed to boot, QB_SYSTEM_NAME is NULL!")
+ self.qemu_system = qemu_system
+
+ def check_render_nodes(self):
+ render_hint = """If /dev/dri/renderD* is absent due to lack of suitable GPU, 'modprobe vgem' will create one suitable for mesa llvmpipe software renderer."""
+ try:
+ content = os.listdir("/dev/dri")
+ nodes = [i for i in content if i.startswith('renderD')]
+ if len(nodes) == 0:
+ raise RunQemuError("No render nodes found in /dev/dri/: %s. %s" %(content, render_hint))
+ for n in nodes:
+ try:
+ with open(os.path.join("/dev/dri", n), "w") as f:
+ f.close()
+ break
+ except IOError:
+ pass
+ else:
+ raise RunQemuError("None of the render nodes in /dev/dri/ are accessible: %s; you may need to add yourself to 'render' group or otherwise ensure you have read-write permissions on one of them." %(nodes))
+ except FileNotFoundError:
+ raise RunQemuError("/dev/dri directory does not exist; no render nodes available on this machine. %s" %(render_hint))
+
+ def setup_guest_agent(self):
+ if self.guest_agent == True:
+ self.qemu_opt += ' -chardev socket,path=' + self.guest_agent_sockpath + ',server,nowait,id=qga0 '
+ self.qemu_opt += ' -device virtio-serial '
+ self.qemu_opt += ' -device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0 '
+
+ def setup_qmp(self):
+ if self.qmp:
+ self.qemu_opt += " -qmp %s,server,nowait" % self.qmp
+
+ def setup_vga(self):
+ if self.nographic == True:
+ if self.sdl == True:
+ raise RunQemuError('Option nographic makes no sense alongside the sdl option.')
+ if self.gtk == True:
+ raise RunQemuError('Option nographic makes no sense alongside the gtk option.')
+ self.qemu_opt += ' -nographic'
+
+ if self.novga == True:
+ self.qemu_opt += ' -vga none'
+ return
+
+ if (self.gl_es == True or self.gl == True) and (self.sdl == False and self.gtk == False):
+ raise RunQemuError('Option gl/gl-es needs gtk or sdl option.')
+
+ # If we have no display option, we autodetect based upon what qemu supports. We
+ # need our font setup and show-cusor below so we need to see what qemu --help says
+ # is supported so we can pass our correct config in.
+ if not self.nographic and not self.sdl and not self.gtk and not self.publicvnc and not self.egl_headless == True:
+ output = subprocess.check_output([self.qemu_bin, "--help"], universal_newlines=True, env=self.qemu_environ)
+ if "-display gtk" in output:
+ self.gtk = True
+ elif "-display sdl" in output:
+ self.sdl = True
+ else:
+ self.qemu_opt += ' -display none'
+
+ if self.sdl == True or self.gtk == True or self.egl_headless == True:
+
+ if self.qemu_system.endswith(('i386', 'x86_64')):
+ if self.gl or self.gl_es or self.egl_headless:
+ self.qemu_opt += ' -device virtio-vga-gl '
+ else:
+ self.qemu_opt += ' -device virtio-vga '
+
+ self.qemu_opt += ' -display '
+ if self.egl_headless == True:
+ self.check_render_nodes()
+ self.set_dri_path()
+ self.qemu_opt += 'egl-headless,'
+ else:
+ if self.sdl == True:
+ self.qemu_opt += 'sdl,'
+ elif self.gtk == True:
+ self.qemu_environ['FONTCONFIG_PATH'] = '/etc/fonts'
+ self.qemu_opt += 'gtk,'
+
+ if self.gl == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=on,'
+ elif self.gl_es == True:
+ self.set_dri_path()
+ self.qemu_opt += 'gl=es,'
+ self.qemu_opt += 'show-cursor=on'
+
+ self.qemu_opt += ' %s' %self.get('QB_GRAPHICS')
+
+ def setup_serial(self):
+ # Setup correct kernel command line for serial
+ if self.get('SERIAL_CONSOLES') and (self.serialstdio == True or self.serialconsole == True or self.nographic == True or self.tcpserial_portnum):
+ for entry in self.get('SERIAL_CONSOLES').split(' '):
+ self.kernel_cmdline_script += ' console=%s' %entry.split(';')[1]
+
+ # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES).
+ # If no serial or serialtcp options were specified, only ttyS0 is created
+ # and sysvinit shows an error trying to enable ttyS1:
+ # INIT: Id "S1" respawning too fast: disabled for 5 minutes
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+
+ # Assume if the user passed serial options, they know what they want
+ # and pad to two devices
+ if serial_num == 1:
+ self.qemu_opt += " -serial null"
+ elif serial_num >= 2:
+ return
+
+ if self.serialstdio == True or self.nographic == True:
+ self.qemu_opt += " -serial mon:stdio"
+ else:
+ self.qemu_opt += " -serial mon:vc"
+ if self.serialconsole:
+ if sys.stdin.isatty():
+ subprocess.check_call(("stty", "intr", "^]"))
+ logger.info("Interrupt character is '^]'")
+
+ self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
- qemu_bin = '%s/%s' % (self.bindir_native, qemu_system)
+ serial_num = len(re.findall("-serial", self.qemu_opt))
+ if serial_num < 2:
+ self.qemu_opt += " -serial null"
+
+ def find_qemu(self):
+ qemu_bin = os.path.join(self.bindir_native, self.qemu_system)
# It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
# find QEMU in sysroot, it needs to use host's qemu.
if not os.path.exists(qemu_bin):
logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
for path in (os.environ['PATH'] or '').split(':'):
- qemu_bin_tmp = os.path.join(path, qemu_system)
+ qemu_bin_tmp = os.path.join(path, self.qemu_system)
logger.info("Trying: %s" % qemu_bin_tmp)
if os.path.exists(qemu_bin_tmp):
qemu_bin = qemu_bin_tmp
@@ -1146,71 +1526,87 @@ class BaseConfig(object):
if not os.access(qemu_bin, os.X_OK):
raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
+ self.qemu_bin = qemu_bin
- check_libgl(qemu_bin)
+ def setup_final(self):
- self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+ self.find_qemu()
+
+ self.qemu_opt = "%s %s %s %s %s" % (self.qemu_bin, self.get('NETWORK_CMD'), self.get('QB_RNG'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND').replace('@DEPLOY_DIR_IMAGE@', self.get('DEPLOY_DIR_IMAGE')))
for ovmf in self.ovmf_bios:
format = ovmf.rsplit('.', 1)[-1]
+ if format == "bin":
+ format = "raw"
self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
- if self.ovmf_bios:
- # OVMF only supports normal VGA, i.e. we need to override a -vga vmware
- # that gets added for example for normal qemux86.
- self.qemu_opt += ' -vga std'
self.qemu_opt += ' ' + self.qemu_opt_script
+ if self.ovmf_secboot_pkkek1:
+ # Provide the Platform Key and first Key Exchange Key certificate as an
+ # OEM string in the SMBIOS Type 11 table. Prepend the certificate string
+ # with "application prefix" of the EnrollDefaultKeys.efi application
+ self.qemu_opt += ' -smbios type=11,value=4e32566d-8e9e-4f52-81d3-5bb9715f9727:' \
+ + self.ovmf_secboot_pkkek1
+
+ # Append qemuparams to override previous settings
+ if self.qemuparams:
+ self.qemu_opt += ' ' + self.qemuparams
+
if self.snapshot:
self.qemu_opt += " -snapshot"
- if self.serialstdio:
- if sys.stdin.isatty():
- subprocess.check_call("stty intr ^]", shell=True)
- logger.info("Interrupt character is '^]'")
-
- first_serial = ""
- if not re.search("-nographic", self.qemu_opt):
- first_serial = "-serial mon:vc"
- # We always want a ttyS1. Since qemu by default adds a serial
- # port when nodefaults is not specified, it seems that all that
- # would be needed is to make sure a "-serial" is there. However,
- # it appears that when "-serial" is specified, it ignores the
- # default serial port that is normally added. So here we make
- # sure to add two -serial if there are none. And only one if
- # there is one -serial already.
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- self.qemu_opt += " %s %s" % (first_serial, self.get("QB_SERIAL_OPT"))
- elif serial_num == 1:
- self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
-
- # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
- # if not serial or serialtcp options was specified only ttyS0 is created
- # and sysvinit shows an error trying to enable ttyS1:
- # INIT: Id "S1" respawning too fast: disabled for 5 minutes
- serial_num = len(re.findall("-serial", self.qemu_opt))
- if serial_num == 0:
- if re.search("-nographic", self.qemu_opt):
- self.qemu_opt += " -serial mon:stdio -serial null"
- else:
- self.qemu_opt += " -serial mon:vc -serial null"
+ self.setup_guest_agent()
+ self.setup_qmp()
+ self.setup_serial()
+ self.setup_vga()
def start_qemu(self):
+ import shlex
if self.kernel:
- kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
+ kernel_opts = "-kernel %s" % (self.kernel)
+ if self.get('QB_KERNEL_CMDLINE') == "none":
+ if self.bootparams:
+ kernel_opts += " -append '%s'" % (self.bootparams)
+ else:
+ kernel_opts += " -append '%s %s %s %s'" % (self.kernel_cmdline,
self.kernel_cmdline_script, self.get('QB_KERNEL_CMDLINE_APPEND'),
self.bootparams)
if self.dtb:
kernel_opts += " -dtb %s" % self.dtb
else:
kernel_opts = ""
+
+ if self.bios:
+ self.qemu_opt += " -bios %s" % self.bios
+
cmd = "%s %s" % (self.qemu_opt, kernel_opts)
+ cmds = shlex.split(cmd)
logger.info('Running %s\n' % cmd)
- process = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE)
- self.qemupid = process.pid
- if process.wait():
- logger.error("Failed to run qemu: %s", process.stderr.read().decode())
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
+ pass_fds = []
+ if self.taplock_descriptor:
+ pass_fds = [self.taplock_descriptor.fileno()]
+ if len(self.portlocks):
+ for descriptor in self.portlocks.values():
+ pass_fds.append(descriptor.fileno())
+ process = subprocess.Popen(cmds, stderr=subprocess.PIPE, pass_fds=pass_fds, env=self.qemu_environ)
+ self.qemuprocess = process
+ retcode = process.wait()
+ if retcode:
+ if retcode == -signal.SIGTERM:
+ logger.info("Qemu terminated by SIGTERM")
+ else:
+ logger.error("Failed to run qemu: %s", process.stderr.read().decode())
+
+ def cleanup_cmd(self):
+ cmd = self.get('QB_CLEANUP_CMD')
+ if cmd != '':
+ logger.info('Running cleanup command %s' % str(cmd))
+ if subprocess.call(cmd, shell=True) != 0:
+ raise RunQemuError('Failed to run %s' % str(cmd))
def cleanup(self):
if self.cleaned:
@@ -1220,35 +1616,48 @@ class BaseConfig(object):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
logger.info("Cleaning up")
+
+ if self.qemuprocess:
+ try:
+ # give it some time to shut down, ignore return values and output
+ self.qemuprocess.send_signal(signal.SIGTERM)
+ self.qemuprocess.communicate(timeout=5)
+ except subprocess.TimeoutExpired:
+ self.qemuprocess.kill()
+
+ with open('/proc/uptime', 'r') as f:
+ uptime_seconds = f.readline().split()[0]
+ logger.info('Host uptime: %s\n' % uptime_seconds)
if self.cleantap:
- cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.bindir_native)
- logger.debug('Running %s' % cmd)
- subprocess.check_call(cmd, shell=True)
- if self.lock_descriptor:
- logger.info("Releasing lockfile for tap device '%s'" % self.tap)
- self.release_lock()
+ cmd = ('sudo', self.qemuifdown, self.tap)
+ logger.debug('Running %s' % str(cmd))
+ subprocess.check_call(cmd)
+ self.release_taplock()
if self.nfs_running:
logger.info("Shutting down the userspace NFS server...")
- cmd = "runqemu-export-rootfs stop %s" % self.rootfs
- logger.debug('Running %s' % cmd)
- subprocess.check_call(cmd, shell=True)
+ cmd = ("runqemu-export-rootfs", "stop", self.rootfs)
+ logger.debug('Running %s' % str(cmd))
+ subprocess.check_call(cmd)
+ self.release_portlock()
if self.saved_stty:
- cmd = "stty %s" % self.saved_stty
- subprocess.check_call(cmd, shell=True)
+ subprocess.check_call(("stty", self.saved_stty))
- if self.clean_nfs_dir:
- logger.info('Removing %s' % self.rootfs)
- shutil.rmtree(self.rootfs)
- shutil.rmtree('%s.pseudo_state' % self.rootfs)
+ if self.cleanup_files:
+ for ent in self.cleanup_files:
+ logger.info('Removing %s' % ent)
+ if os.path.isfile(ent):
+ os.remove(ent)
+ else:
+ shutil.rmtree(ent)
- self.cleaned = True
+ # Deliberately ignore the return code of 'tput smam'.
+ subprocess.call(["tput", "smam"])
- def load_bitbake_env(self, mach=None):
- if self.bitbake_e:
- return
+ self.cleaned = True
+ def run_bitbake_env(self, mach=None, target=''):
bitbake = shutil.which('bitbake')
if not bitbake:
return
@@ -1256,17 +1665,38 @@ class BaseConfig(object):
if not mach:
mach = self.get('MACHINE')
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ multiconfig = "mc:%s" % multiconfig
+
if mach:
- cmd = 'MACHINE=%s bitbake -e' % mach
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
else:
- cmd = 'bitbake -e'
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
logger.info('Running %s...' % cmd)
try:
- self.bitbake_e = subprocess.check_output(cmd, shell=True).decode('utf-8')
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
except subprocess.CalledProcessError as err:
- self.bitbake_e = ''
- logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
+ logger.warning("Couldn't run '%s' to gather environment information, maybe the target wasn't an image name, will retry with virtual/kernel as a target:\n%s" % (cmd, err.output.decode('utf-8')))
+ # need something with IMAGE_NAME_SUFFIX/IMAGE_LINK_NAME defined (kernel also inherits image-artifact-names.bbclass)
+ target = 'virtual/kernel'
+ if mach:
+ cmd = 'MACHINE=%s bitbake -e %s %s' % (mach, multiconfig, target)
+ else:
+ cmd = 'bitbake -e %s %s' % (multiconfig, target)
+ try:
+ return subprocess.check_output(cmd, shell=True).decode('utf-8')
+ except subprocess.CalledProcessError as err:
+ logger.warning("Couldn't run '%s' to gather environment information, giving up with 'bitbake -e':\n%s" % (cmd, err.output.decode('utf-8')))
+ return ''
+
+
+ def load_bitbake_env(self, mach=None, target=None):
+ if self.bitbake_e:
+ return
+
+ self.bitbake_e = self.run_bitbake_env(mach=mach, target=target)
def validate_combos(self):
if (self.fstype in self.vmtypes) and self.kernel:
@@ -1278,10 +1708,15 @@ class BaseConfig(object):
if result and os.path.exists(result):
return result
- cmd = 'bitbake qemu-helper-native -e'
- logger.info('Running %s...' % cmd)
- out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
- out = out.stdout.read().decode('utf-8')
+ cmd = ['bitbake', '-e']
+ multiconfig = self.get('MULTICONFIG')
+ if multiconfig:
+ cmd.append('mc:%s:qemu-helper-native' % multiconfig)
+ else:
+ cmd.append('qemu-helper-native')
+
+ logger.info('Running %s...' % str(cmd))
+ out = subprocess.check_output(cmd).decode('utf-8')
match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M)
if match:
@@ -1291,7 +1726,7 @@ class BaseConfig(object):
return result
raise RunQemuError("Native sysroot directory %s doesn't exist" % result)
else:
- raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+ raise RunQemuError("Can't find STAGING_BINDIR_NATIVE in '%s' output" % str(cmd))
def main():
@@ -1301,9 +1736,13 @@ def main():
try:
config = BaseConfig()
+ renice = os.path.expanduser("~/bin/runqemu-renice")
+ if os.path.exists(renice):
+ logger.info('Using %s to renice' % renice)
+ subprocess.check_call([renice, str(os.getpid())])
+
def sigterm_handler(signum, frame):
- logger.info("SIGTERM received")
- os.kill(config.qemupid, signal.SIGTERM)
+ logger.info("Received signal: %s" % (signum))
config.cleanup()
signal.signal(signal.SIGTERM, sigterm_handler)
@@ -1316,6 +1755,7 @@ def main():
config.setup_network()
config.setup_rootfs()
config.setup_final()
+ config.setup_cmd()
config.start_qemu()
except RunQemuError as err:
logger.error(err)
@@ -1325,6 +1765,7 @@ def main():
traceback.print_exc()
return 1
finally:
+ config.cleanup_cmd()
config.cleanup()
if __name__ == "__main__":
diff --git a/scripts/runqemu-addptable2image b/scripts/runqemu-addptable2image
index f0195ad8a3..87a8da3a63 100755
--- a/scripts/runqemu-addptable2image
+++ b/scripts/runqemu-addptable2image
@@ -1,23 +1,11 @@
#!/bin/sh
-# Add a partion table to an ext2 image file
+# Add a partition table to an ext2 image file
#
# Copyright (C) 2006-2007 OpenedHand Ltd.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
IMAGE=$1
IMAGEOUT=$2
diff --git a/scripts/runqemu-export-rootfs b/scripts/runqemu-export-rootfs
index 70cdcdbb13..6a8acd0d5a 100755
--- a/scripts/runqemu-export-rootfs
+++ b/scripts/runqemu-export-rootfs
@@ -2,18 +2,8 @@
#
# Copyright (c) 2005-2009 Wind River Systems, Inc.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
usage() {
echo "Usage: $0 {start|stop|restart} <nfs-export-dir>"
@@ -44,16 +34,12 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
- if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
- echo "Have you run 'bitbake meta-ide-support'?"
- else
- echo "This shouldn't happen - something is missing from your toolchain installation"
- fi
+ echo "This shouldn't happen - something is missing from your toolchain installation"
exit 1
fi
@@ -84,26 +70,11 @@ MOUNTD_PORT=${MOUNTD_PORT:=$[ 3048 + 2 * $NFS_INSTANCE ]}
## For debugging you would additionally add
## --debug all
-UNFSD_OPTS="-p -N -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
+UNFSD_OPTS="-p -i $NFSPID -e $EXPORTS -n $NFSD_PORT -m $MOUNTD_PORT"
# See how we were called.
case "$1" in
start)
- PORTMAP_RUNNING=`ps -ef | grep portmap | grep -v grep`
- RPCBIND_RUNNING=`ps -ef | grep rpcbind | grep -v grep`
- if [[ "x$PORTMAP_RUNNING" = "x" && "x$RPCBIND_RUNNING" = "x" ]]; then
- echo "======================================================="
- echo "Error: neither rpcbind nor portmap appear to be running"
- echo "Please install and start one of these services first"
- echo "======================================================="
- echo "Tip: for recent Ubuntu hosts, run:"
- echo " sudo apt-get install rpcbind"
- echo "Then add OPTIONS=\"-i -w\" to /etc/default/rpcbind and run"
- echo " sudo service portmap restart"
-
- exit 1
- fi
-
echo "Creating exports file..."
echo "$NFS_EXPORT_DIR (rw,no_root_squash,no_all_squash,insecure)" > $EXPORTS
diff --git a/scripts/runqemu-extract-sdk b/scripts/runqemu-extract-sdk
index 4da3eb10a7..db05da25f2 100755
--- a/scripts/runqemu-extract-sdk
+++ b/scripts/runqemu-extract-sdk
@@ -7,18 +7,8 @@
#
# Copyright (c) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
function usage() {
echo "Usage: $0 <image-tarball> <extract-dir>"
@@ -35,7 +25,7 @@ if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
echo "Did you forget to source your build system environment setup script?"
exit 1
fi
-. $SYSROOT_SETUP_SCRIPT meta-ide-support
+. $SYSROOT_SETUP_SCRIPT qemu-helper-native
PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
ROOTFS_TARBALL=$1
@@ -79,7 +69,7 @@ fi
pseudo_state_dir="$SDK_ROOTFS_DIR/../$(basename "$SDK_ROOTFS_DIR").pseudo_state"
pseudo_state_dir="$(readlink -f $pseudo_state_dir)"
-debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.tar\.'`"
+debug_image="`echo $ROOTFS_TARBALL | grep '\-dbg\.rootfs\.tar'`"
if [ -e "$pseudo_state_dir" -a -z "$debug_image" ]; then
echo "Error: $pseudo_state_dir already exists!"
diff --git a/scripts/runqemu-gen-tapdevs b/scripts/runqemu-gen-tapdevs
index 11de318c1a..a00c79c442 100755
--- a/scripts/runqemu-gen-tapdevs
+++ b/scripts/runqemu-gen-tapdevs
@@ -1,57 +1,63 @@
#!/bin/bash
#
# Create a "bank" of tap network devices that can be used by the
-# runqemu script. This script needs to be run as root, and will
-# use the tunctl binary from the build system sysroot. Note: many Linux
-# distros these days still use an older version of tunctl which does not
-# support the group permissions option, hence the need to use the build
-# system provided version.
+# runqemu script. This script needs to be run as root
#
# Copyright (C) 2010 Intel Corp.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+gid=`id -g`
+if [ -n "$SUDO_GID" ]; then
+ gid=$SUDO_GID
+fi
usage() {
- echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
- echo "Where <uid> is the numeric user id the tap devices will be owned by"
+ echo "Usage: sudo $0 <gid> <num>"
echo "Where <gid> is the numeric group id the tap devices will be owned by"
echo "<num> is the number of tap devices to create (0 to remove all)"
- echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
- echo "e.g. $ bitbake qemu-helper-native"
- echo "$ sudo $0 1000 1000 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
+ echo "For example:"
+ echo "$ bitbake qemu-helper-native"
+ echo "$ sudo $0 $gid 4"
+ echo ""
exit 1
}
-if [ $EUID -ne 0 ]; then
- echo "Error: This script must be run with root privileges"
- exit
+# Allow passing 4 arguments for backward compatibility with warning
+if [ $# -gt 4 ]; then
+ echo "Error: Incorrect number of arguments"
+ usage
fi
-
-if [ $# -ne 4 ]; then
+if [ $# -gt 3 ]; then
+ echo "Warning: Ignoring the <native-sysroot-basedir> parameter. It is no longer needed."
+fi
+if [ $# -gt 2 ]; then
+ echo "Warning: Ignoring the <uid> parameter. It is no longer needed."
+ GID=$2
+ COUNT=$3
+elif [ $# -eq 2 ]; then
+ GID=$1
+ COUNT=$2
+else
echo "Error: Incorrect number of arguments"
usage
fi
-TUID=$1
-GID=$2
-COUNT=$3
-STAGING_BINDIR_NATIVE=$4
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
- echo "Error: $TUNCTL is not an executable"
- usage
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+# check if COUNT is a number and >= 0
+if ! [ $COUNT -ge 0 ]; then
+ echo "Error: Incorrect count: $COUNT"
+ exit 1
+fi
+
+if [ $EUID -ne 0 ]; then
+ echo "Error: This script must be run with root privileges"
+ exit
fi
SCRIPT_DIR=`dirname $0`
@@ -61,48 +67,41 @@ if [ ! -x "$RUNQEMU_IFUP" ]; then
exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ -z "$IFCONFIG" ]; then
- # Is it ever anywhere else?
- IFCONFIG=/sbin/ip
-fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
- exit 1
-fi
-
-if [ $COUNT -ge 0 ]; then
- # Ensure we start with a clean slate
- for tap in `$IFCONFIG link | grep tap | awk '{ print \$2 }' | sed s/://`; do
- echo "Note: Destroying pre-existing tap interface $tap..."
- $TUNCTL -d $tap
- done
- rm -f /etc/runqemu-nosudo
+if interfaces=`ip tuntap list` 2>/dev/null; then
+ interfaces=`echo "$interfaces" |cut -f1 -d: |grep -E "^$OE_TAP_NAME.*"`
else
- echo "Error: Incorrect count: $COUNT"
+ echo "Failed to call 'ip tuntap list'" >&2
exit 1
fi
-if [ $COUNT -gt 0 ]; then
- echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
- for ((index=0; index < $COUNT; index++)); do
- echo "Creating tap$index"
- ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
- if [ $? -ne 0 ]; then
- echo "Error running tunctl: $ifup"
- exit 1
- fi
- done
-
- echo "Note: For systems running NetworkManager, it's recommended"
- echo "Note: that the tap devices be set as unmanaged in the"
- echo "Note: NetworkManager.conf file. Add the following lines to"
- echo "Note: /etc/NetworkManager/NetworkManager.conf"
- echo "[keyfile]"
- echo "unmanaged-devices=interface-name:tap*"
+# Ensure we start with a clean slate
+for tap in $interfaces; do
+ echo "Note: Destroying pre-existing tap interface $tap..."
+ ip tuntap del $tap mode tap
+done
+rm -f /etc/runqemu-nosudo
- # The runqemu script will check for this file, and if it exists,
- # will use the existing bank of tap devices without creating
- # additional ones via sudo.
- touch /etc/runqemu-nosudo
+if [ $COUNT -eq 0 ]; then
+ exit 0
fi
+
+echo "Creating $COUNT tap devices for GID: $GID..."
+for ((index=0; index < $COUNT; index++)); do
+ echo "Creating $OE_TAP_NAME$index"
+ if ! ifup=`$RUNQEMU_IFUP $GID 2>&1`; then
+ echo "Error bringing up interface: $ifup"
+ exit 1
+ fi
+done
+
+echo "Note: For systems running NetworkManager, it's recommended"
+echo "Note: that the tap devices be set as unmanaged in the"
+echo "Note: NetworkManager.conf file. Add the following lines to"
+echo "Note: /etc/NetworkManager/NetworkManager.conf"
+echo "[keyfile]"
+echo "unmanaged-devices=interface-name:$OE_TAP_NAME*"
+
+# The runqemu script will check for this file, and if it exists,
+# will use the existing bank of tap devices without creating
+# additional ones via sudo.
+touch /etc/runqemu-nosudo
diff --git a/scripts/runqemu-ifdown b/scripts/runqemu-ifdown
index 2486968588..822a2a39b9 100755
--- a/scripts/runqemu-ifdown
+++ b/scripts/runqemu-ifdown
@@ -1,8 +1,7 @@
#!/bin/bash
#
# QEMU network configuration script to bring down tap devices. This
-# utility needs to be run as root, and will use the tunctl binary
-# from the native sysroot.
+# utility needs to be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -13,21 +12,11 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
- echo "sudo $(basename $0) <tap-dev> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <tap-dev>"
}
if [ $EUID -ne 0 ]; then
@@ -35,30 +24,31 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 2 ]; then
+if [ $# -gt 2 ] || [ $# -lt 1 ]; then
usage
exit 1
fi
+# backward compatibility
+if [ $# -eq 2 ] ; then
+ echo "Warning: native-sysroot-basedir parameter is ignored. It is no longer needed." >&2
+fi
+
TAP=$1
-STAGING_BINDIR_NATIVE=$2
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -e "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
+if ! ip tuntap del $TAP mode tap 2>/dev/null; then
+ echo "Error: Unable to run up tuntap del"
exit 1
fi
-$TUNCTL -d $TAP
-
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ -x "$IFCONFIG" ]; then
- if `$IFCONFIG link show $TAP > /dev/null 2>&1`; then
- $IFCONFIG link del $TAP
+if [ -x "$IPTOOL" ]; then
+ if `$IPTOOL link show $TAP > /dev/null 2>&1`; then
+ $IPTOOL link del $TAP
fi
fi
# cleanup the remaining iptables rules
@@ -70,7 +60,13 @@ if [ ! -x "$IPTABLES" ]; then
echo "$IPTABLES cannot be executed"
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
+
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
+fi
+
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$n/32
$IPTABLES -D POSTROUTING -t nat -j MASQUERADE -s 192.168.7.$dest/32
+true
diff --git a/scripts/runqemu-ifup b/scripts/runqemu-ifup
index 59a15eaa2e..05c9325b6b 100755
--- a/scripts/runqemu-ifup
+++ b/scripts/runqemu-ifup
@@ -1,10 +1,7 @@
#!/bin/bash
#
# QEMU network interface configuration script. This utility needs to
-# be run as root, and will use the tunctl binary from a native sysroot.
-# Note: many Linux distros these days still use an older version of
-# tunctl which does not support the group permissions option, hence
-# the need to use build system's version.
+# be run as root, and will use the ip utility
#
# If you find yourself calling this script a lot, you can add the
# the following to your /etc/sudoers file to be able to run this
@@ -20,21 +17,11 @@
#
# Copyright (c) 2006-2011 Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
usage() {
- echo "sudo $(basename $0) <uid> <gid> <native-sysroot-basedir>"
+ echo "sudo $(basename $0) <gid>"
}
if [ $EUID -ne 0 ]; then
@@ -42,41 +29,43 @@ if [ $EUID -ne 0 ]; then
exit 1
fi
-if [ $# -ne 3 ]; then
+if [ $# -eq 2 ]; then
+ echo "Warning: uid parameter is ignored. It is no longer needed." >&2
+ GROUP="$2"
+elif [ $# -eq 1 ]; then
+ GROUP="$1"
+else
usage
exit 1
fi
-USERID="-u $1"
-GROUP="-g $2"
-STAGING_BINDIR_NATIVE=$3
-TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
-if [ ! -x "$TUNCTL" ]; then
- echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
- exit 1
+if [ -z "$OE_TAP_NAME" ]; then
+ OE_TAP_NAME=tap
fi
-TAP=`$TUNCTL -b $GROUP 2>&1`
-STATUS=$?
-if [ $STATUS -ne 0 ]; then
-# If tunctl -g fails, try using tunctl -u, for older host kernels
-# which do not support the TUNSETGROUP ioctl
- TAP=`$TUNCTL -b $USERID 2>&1`
- STATUS=$?
- if [ $STATUS -ne 0 ]; then
- echo "tunctl failed:"
- exit 1
+if taps=$(ip tuntap list 2>/dev/null); then
+ tap_no_last=$(echo "$taps" |cut -f 1 -d ":" |grep -E "^$OE_TAP_NAME.*" |sed "s/$OE_TAP_NAME//g" | sort -rn | head -n 1)
+ if [ -z "$tap_no_last" ]; then
+ tap_no=0
+ else
+ tap_no=$(("$tap_no_last" + 1))
fi
+ ip tuntap add "$OE_TAP_NAME$tap_no" mode tap group "$GROUP" && TAP=$OE_TAP_NAME$tap_no
+fi
+
+if [ -z "$TAP" ]; then
+ echo "Error: Unable to find a tap device to use"
+ exit 1
fi
-IFCONFIG=`which ip 2> /dev/null`
-if [ "x$IFCONFIG" = "x" ]; then
+IPTOOL=`which ip 2> /dev/null`
+if [ "x$IPTOOL" = "x" ]; then
# better than nothing...
- IFCONFIG=/sbin/ip
+ IPTOOL=/sbin/ip
fi
-if [ ! -x "$IFCONFIG" ]; then
- echo "$IFCONFIG cannot be executed"
+if [ ! -x "$IPTOOL" ]; then
+ echo "$IPTOOL cannot be executed"
exit 1
fi
@@ -89,22 +78,22 @@ if [ ! -x "$IPTABLES" ]; then
exit 1
fi
-n=$[ (`echo $TAP | sed 's/tap//'` * 2) + 1 ]
-$IFCONFIG addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
+n=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 1 ]
+$IPTOOL addr add 192.168.7.$n/32 broadcast 192.168.7.255 dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to set up IP addressing on $TAP"
exit 1
fi
-$IFCONFIG link set dev $TAP up
+$IPTOOL link set dev $TAP up
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to bring up $TAP"
exit 1
fi
-dest=$[ (`echo $TAP | sed 's/tap//'` * 2) + 2 ]
-$IFCONFIG route add to 192.168.7.$dest dev $TAP
+dest=$[ (`echo $TAP | sed "s/$OE_TAP_NAME//"` * 2) + 2 ]
+$IPTOOL route add to 192.168.7.$dest dev $TAP
STATUS=$?
if [ $STATUS -ne 0 ]; then
echo "Failed to add route to 192.168.7.$dest using $TAP"
diff --git a/scripts/runqemu.README b/scripts/runqemu.README
index da9abd7dfb..e5f4b4634c 100644
--- a/scripts/runqemu.README
+++ b/scripts/runqemu.README
@@ -1,12 +1,12 @@
Using OE images with QEMU
=========================
-OE-Core can generate qemu bootable kernels and images with can be used
+OE-Core can generate qemu bootable kernels and images which can be used
on a desktop system. The scripts currently support booting ARM, MIPS, PowerPC
-and x86 (32 and 64 bit) images. The scripts can be used within the OE build
-system or externaly.
+and x86 (32 and 64 bit) images. The scripts can be used within the OE build
+system or externally.
-The runqemu script is run as:
+The runqemu script is run as:
runqemu <machine> <zimage> <filesystem>
@@ -15,13 +15,13 @@ where:
<machine> is the machine/architecture to use (qemuarm/qemumips/qemuppc/qemux86/qemux86-64)
<zimage> is the path to a kernel (e.g. zimage-qemuarm.bin)
<filesystem> is the path to an ext2 image (e.g. filesystem-qemuarm.ext2) or an nfs directory
-
-If <machine> isn't specified, the script will try to detect the machine name
+
+If <machine> isn't specified, the script will try to detect the machine name
from the name of the <zimage> file.
If <filesystem> isn't specified, nfs booting will be assumed.
-When used within the build system, it will default to qemuarm, ext2 and the last kernel and
+When used within the build system, it will default to qemuarm, ext2 and the last kernel and
core-image-sato-sdk image built by the build system. If an sdk image isn't present it will look
for sato and minimal images.
@@ -31,7 +31,7 @@ Full usage instructions can be seen by running the command with no options speci
Notes
=====
- - The scripts run qemu using sudo. Change perms on /dev/net/tun to
+ - The scripts run qemu using sudo. Change perms on /dev/net/tun to
run as non root. The runqemu-gen-tapdevs script can also be used by
root to prepopulate the appropriate network devices.
- You can access the host computer at 192.168.7.1 within the image.
diff --git a/scripts/send-error-report b/scripts/send-error-report
index 8939f5f594..cfbcaa52cb 100755
--- a/scripts/send-error-report
+++ b/scripts/send-error-report
@@ -6,6 +6,9 @@
# Copyright (C) 2013 Intel Corporation
# Author: Andreea Proca <andreea.b.proca@intel.com>
# Author: Michael Wood <michael.g.wood@intel.com>
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import urllib.request, urllib.error
import sys
@@ -62,7 +65,7 @@ def edit_content(json_file_path):
def prepare_data(args):
# attempt to get the max_log_size from the server's settings
- max_log_size = getPayloadLimit("http://"+args.server+"/ClientPost/JSON")
+ max_log_size = getPayloadLimit(args.protocol+args.server+"/ClientPost/JSON")
if not os.path.isfile(args.error_file):
log.error("No data file found.")
@@ -132,9 +135,9 @@ def send_data(data, args):
headers={'Content-type': 'application/json', 'User-Agent': "send-error-report/"+version}
if args.json:
- url = "http://"+args.server+"/ClientPost/JSON/"
+ url = args.protocol+args.server+"/ClientPost/JSON/"
else:
- url = "http://"+args.server+"/ClientPost/"
+ url = args.protocol+args.server+"/ClientPost/"
req = urllib.request.Request(url, data=data, headers=headers)
try:
@@ -187,6 +190,11 @@ if __name__ == '__main__':
help="Return the result in json format, silences all other output",
action="store_true")
+ arg_parse.add_argument("--no-ssl",
+ help="Use http instead of https protocol",
+ dest="protocol",
+ action="store_const", const="http://", default="https://")
+
args = arg_parse.parse_args()
diff --git a/scripts/send-pull-request b/scripts/send-pull-request
index 883deacb07..70b5a5cfb2 100755
--- a/scripts/send-pull-request
+++ b/scripts/send-pull-request
@@ -1,21 +1,8 @@
#!/bin/bash
#
# Copyright (c) 2010-2011, Intel Corporation.
-# All Rights Reserved
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
-# the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
#
diff --git a/scripts/sstate-cache-management.py b/scripts/sstate-cache-management.py
new file mode 100755
index 0000000000..d3f600bd28
--- /dev/null
+++ b/scripts/sstate-cache-management.py
@@ -0,0 +1,329 @@
+#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import argparse
+import os
+import re
+import sys
+
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from dataclasses import dataclass
+from pathlib import Path
+
+if sys.version_info < (3, 8, 0):
+ raise RuntimeError("Sorry, python 3.8.0 or later is required for this script.")
+
+SSTATE_PREFIX = "sstate:"
+SSTATE_EXTENSION = ".tar.zst"
+# SSTATE_EXTENSION = ".tgz"
+# .siginfo.done files are mentioned in the original script?
+SSTATE_SUFFIXES = (
+ SSTATE_EXTENSION,
+ f"{SSTATE_EXTENSION}.siginfo",
+ f"{SSTATE_EXTENSION}.done",
+)
+
+RE_SSTATE_PKGSPEC = re.compile(
+ rf"""sstate:(?P<pn>[^:]*):
+ (?P<package_target>[^:]*):
+ (?P<pv>[^:]*):
+ (?P<pr>[^:]*):
+ (?P<sstate_pkgarch>[^:]*):
+ (?P<sstate_version>[^_]*):
+ (?P<bb_unihash>[^_]*)_
+ (?P<bb_task>[^:]*)
+ (?P<ext>({"|".join([re.escape(s) for s in SSTATE_SUFFIXES])}))$""",
+ re.X,
+)
+
+
+# Really we'd like something like a Path subclass which implements a stat
+# cache here, unfortunately there's no good way to do that transparently
+# (yet); see:
+#
+# https://github.com/python/cpython/issues/70219
+# https://discuss.python.org/t/make-pathlib-extensible/3428/77
+@dataclass
+class SstateEntry:
+ """Class for keeping track of an entry in sstate-cache."""
+
+ path: Path
+ match: re.Match
+ stat_result: os.stat_result = None
+
+ def __hash__(self):
+ return self.path.__hash__()
+
+ def __getattr__(self, name):
+ return self.match.group(name)
+
+
+# this is what's in the original script; as far as I can tell, it's an
+# implementation artefact which we don't need?
+def find_archs():
+ # all_archs
+ builder_arch = os.uname().machine
+
+ # FIXME
+ layer_paths = [Path("../..")]
+
+ tune_archs = set()
+ re_tune = re.compile(r'AVAILTUNES .*=.*"(.*)"')
+ for path in layer_paths:
+ for tunefile in [
+ p for p in path.glob("meta*/conf/machine/include/**/*") if p.is_file()
+ ]:
+ with open(tunefile) as f:
+ for line in f:
+ m = re_tune.match(line)
+ if m:
+ tune_archs.update(m.group(1).split())
+
+ # all_machines
+ machine_archs = set()
+ for path in layer_paths:
+ for machine_file in path.glob("meta*/conf/machine/*.conf"):
+ machine_archs.add(machine_file.parts[-1][:-5])
+
+ extra_archs = set()
+ all_archs = (
+ set(
+ arch.replace("-", "_")
+ for arch in machine_archs | tune_archs | set(["allarch", builder_arch])
+ )
+ | extra_archs
+ )
+
+ print(all_archs)
+
+
+# again, not needed?
+def find_tasks():
+ print(set([p.bb_task for p in paths]))
+
+
+def collect_sstate_paths(args):
+ def scandir(path, paths):
+ # Assume everything is a directory; by not checking we avoid needing an
+ # additional stat which is potentially a synchronous roundtrip over NFS
+ try:
+ for p in path.iterdir():
+ filename = p.parts[-1]
+ if filename.startswith(SSTATE_PREFIX):
+ if filename.endswith(SSTATE_SUFFIXES):
+ m = RE_SSTATE_PKGSPEC.match(p.parts[-1])
+ assert m
+ paths.add(SstateEntry(p, m))
+ # ignore other things (includes things like lockfiles)
+ else:
+ scandir(p, paths)
+
+ except NotADirectoryError:
+ pass
+
+ paths = set()
+ # TODO: parellise scandir
+ scandir(Path(args.cache_dir), paths)
+
+ def path_stat(p):
+ p.stat_result = p.path.lstat()
+
+ if args.remove_duplicated:
+ # This is probably slightly performance negative on a local filesystem
+ # when we interact with the GIL; over NFS it's a massive win.
+ with ThreadPoolExecutor(max_workers=args.jobs) as executor:
+ executor.map(path_stat, paths)
+
+ return paths
+
+
+def remove_by_stamps(args, paths):
+ all_sums = set()
+ for stamps_dir in args.stamps_dir:
+ stamps_path = Path(stamps_dir)
+ assert stamps_path.is_dir()
+ re_sigdata = re.compile(r"do_.*\.sigdata\.([^.]*)")
+ all_sums |= set(
+ [
+ re_sigdata.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*.sigdata.*")
+ ]
+ )
+ re_setscene = re.compile(r"do_.*_setscene\.([^.]*)")
+ all_sums |= set(
+ [
+ re_setscene.search(x.parts[-1]).group(1)
+ for x in stamps_path.glob("*/*/*.do_*_setscene.*")
+ ]
+ )
+ return [p for p in paths if p.bb_unihash not in all_sums]
+
+
+def remove_duplicated(args, paths):
+ # Skip populate_lic as it produces duplicates in a normal build
+ #
+ # 9ae16469e707 sstate-cache-management: skip populate_lic archives when removing duplicates
+ valid_paths = [p for p in paths if p.bb_task != "populate_lic"]
+
+ keep = dict()
+ remove = list()
+ for p in valid_paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task, p.ext])
+ if sstate_sig not in keep:
+ keep[sstate_sig] = p
+ elif p.stat_result.st_mtime > keep[sstate_sig].stat_result.st_mtime:
+ remove.append(keep[sstate_sig])
+ keep[sstate_sig] = p
+ else:
+ remove.append(p)
+
+ return remove
+
+
+def remove_orphans(args, paths):
+ remove = list()
+ pathsigs = defaultdict(list)
+ for p in paths:
+ sstate_sig = ":".join([p.pn, p.sstate_pkgarch, p.bb_task])
+ pathsigs[sstate_sig].append(p)
+ for k, v in pathsigs.items():
+ if len([p for p in v if p.ext == SSTATE_EXTENSION]) == 0:
+ remove.extend(v)
+ return remove
+
+
+def parse_arguments():
+ parser = argparse.ArgumentParser(description="sstate cache management utility.")
+
+ parser.add_argument(
+ "--cache-dir",
+ default=os.environ.get("SSTATE_CACHE_DIR"),
+ help="""Specify sstate cache directory, will use the environment
+ variable SSTATE_CACHE_DIR if it is not specified.""",
+ )
+
+ # parser.add_argument(
+ # "--extra-archs",
+ # help="""Specify list of architectures which should be tested, this list
+ # will be extended with native arch, allarch and empty arch. The
+ # script won't be trying to generate list of available archs from
+ # AVAILTUNES in tune files.""",
+ # )
+
+ # parser.add_argument(
+ # "--extra-layer",
+ # help="""Specify the layer which will be used for searching the archs,
+ # it will search the meta and meta-* layers in the top dir by
+ # default, and will search meta, meta-*, <layer1>, <layer2>,
+ # ...<layern> when specified. Use "," as the separator.
+ #
+ # This is useless for --stamps-dir or when --extra-archs is used.""",
+ # )
+
+ parser.add_argument(
+ "-d",
+ "--remove-duplicated",
+ action="store_true",
+ help="""Remove the duplicated sstate cache files of one package, only
+ the newest one will be kept. The duplicated sstate cache files
+ of one package must have the same arch, which means sstate cache
+ files with multiple archs are not considered duplicate.
+
+ Conflicts with --stamps-dir.""",
+ )
+
+ parser.add_argument(
+ "--remove-orphans",
+ action="store_true",
+ help=f"""Remove orphan siginfo files from the sstate cache, i.e. those
+ where this is no {SSTATE_EXTENSION} file but there are associated
+ tracking files.""",
+ )
+
+ parser.add_argument(
+ "--stamps-dir",
+ action="append",
+ help="""Specify the build directory's stamps directories, the sstate
+ cache file which IS USED by these build diretories will be KEPT,
+ other sstate cache files in cache-dir will be removed. Can be
+ specified multiple times for several directories.
+
+ Conflicts with --remove-duplicated.""",
+ )
+
+ parser.add_argument(
+ "-j", "--jobs", default=8, type=int, help="Run JOBS jobs in parallel."
+ )
+
+ # parser.add_argument(
+ # "-L",
+ # "--follow-symlink",
+ # action="store_true",
+ # help="Remove both the symbol link and the destination file, default: no.",
+ # )
+
+ parser.add_argument(
+ "-y",
+ "--yes",
+ action="store_true",
+ help="""Automatic yes to prompts; assume "yes" as answer to all prompts
+ and run non-interactively.""",
+ )
+
+ parser.add_argument(
+ "-v", "--verbose", action="store_true", help="Explain what is being done."
+ )
+
+ parser.add_argument(
+ "-D",
+ "--debug",
+ action="count",
+ default=0,
+ help="Show debug info, repeat for more debug info.",
+ )
+
+ args = parser.parse_args()
+ if args.cache_dir is None or (
+ not args.remove_duplicated and not args.stamps_dir and not args.remove_orphans
+ ):
+ parser.print_usage()
+ sys.exit(1)
+
+ return args
+
+
+def main():
+ args = parse_arguments()
+
+ paths = collect_sstate_paths(args)
+ if args.remove_duplicated:
+ remove = remove_duplicated(args, paths)
+ elif args.stamps_dir:
+ remove = remove_by_stamps(args, paths)
+ else:
+ remove = list()
+
+ if args.remove_orphans:
+ remove = set(remove) | set(remove_orphans(args, paths))
+
+ if args.debug >= 1:
+ print("\n".join([str(p.path) for p in remove]))
+ print(f"{len(remove)} out of {len(paths)} files will be removed!")
+ if not args.yes:
+ print("Do you want to continue (y/n)?")
+ confirm = input() in ("y", "Y")
+ else:
+ confirm = True
+ if confirm:
+ # TODO: parallelise remove
+ for p in remove:
+ p.path.unlink()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/sstate-cache-management.sh b/scripts/sstate-cache-management.sh
deleted file mode 100755
index 2ab450ab59..0000000000
--- a/scripts/sstate-cache-management.sh
+++ /dev/null
@@ -1,469 +0,0 @@
-#!/bin/bash
-
-# Copyright (c) 2012 Wind River Systems, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-# Global vars
-cache_dir=
-confirm=
-fsym=
-total_deleted=0
-verbose=
-debug=0
-
-usage () {
- cat << EOF
-Welcome to sstate cache management utilities.
-sstate-cache-management.sh <OPTION>
-
-Options:
- -h, --help
- Display this help and exit.
-
- --cache-dir=<sstate cache dir>
- Specify sstate cache directory, will use the environment
- variable SSTATE_CACHE_DIR if it is not specified.
-
- --extra-archs=<arch1>,<arch2>...<archn>
- Specify list of architectures which should be tested, this list
- will be extended with native arch, allarch and empty arch. The
- script won't be trying to generate list of available archs from
- AVAILTUNES in tune files.
-
- --extra-layer=<layer1>,<layer2>...<layern>
- Specify the layer which will be used for searching the archs,
- it will search the meta and meta-* layers in the top dir by
- default, and will search meta, meta-*, <layer1>, <layer2>,
- ...<layern> when specified. Use "," as the separator.
-
- This is useless for --stamps-dir or when --extra-archs is used.
-
- -d, --remove-duplicated
- Remove the duplicated sstate cache files of one package, only
- the newest one will be kept. The duplicated sstate cache files
- of one package must have the same arch, which means sstate cache
- files with multiple archs are not considered duplicate.
-
- Conflicts with --stamps-dir.
-
- --stamps-dir=<dir1>,<dir2>...<dirn>
- Specify the build directory's stamps directories, the sstate
- cache file which IS USED by these build diretories will be KEPT,
- other sstate cache files in cache-dir will be removed. Use ","
- as the separator. For example:
- --stamps-dir=build1/tmp/stamps,build2/tmp/stamps
-
- Conflicts with --remove-duplicated.
-
- -L, --follow-symlink
- Remove both the symbol link and the destination file, default: no.
-
- -y, --yes
- Automatic yes to prompts; assume "yes" as answer to all prompts
- and run non-interactively.
-
- -v, --verbose
- Explain what is being done.
-
- -D, --debug
- Show debug info, repeat for more debug info.
-
-EOF
-}
-
-if [ $# -lt 1 ]; then
- usage
- exit 0
-fi
-
-# Echo no files to remove
-no_files () {
- echo No files to remove
-}
-
-# Echo nothing to do
-do_nothing () {
- echo Nothing to do
-}
-
-# Read the input "y"
-read_confirm () {
- echo "$total_deleted out of $total_files files will be removed! "
- if [ "$confirm" != "y" ]; then
- echo "Do you want to continue (y/n)? "
- while read confirm; do
- [ "$confirm" = "Y" -o "$confirm" = "y" -o "$confirm" = "n" \
- -o "$confirm" = "N" ] && break
- echo "Invalid input \"$confirm\", please input 'y' or 'n': "
- done
- else
- echo
- fi
-}
-
-# Print error information and exit.
-echo_error () {
- echo "ERROR: $1" >&2
- exit 1
-}
-
-# Generate the remove list:
-#
-# * Add .done/.siginfo to the remove list
-# * Add destination of symlink to the remove list
-#
-# $1: output file, others: sstate cache file (.tgz)
-gen_rmlist (){
- local rmlist_file="$1"
- shift
- local files="$@"
- for i in $files; do
- echo $i >> $rmlist_file
- # Add the ".siginfo"
- if [ -e $i.siginfo ]; then
- echo $i.siginfo >> $rmlist_file
- fi
- # Add the destination of symlink
- if [ -L "$i" ]; then
- if [ "$fsym" = "y" ]; then
- dest="`readlink -e $i`"
- if [ -n "$dest" ]; then
- echo $dest >> $rmlist_file
- # Remove the .siginfo when .tgz is removed
- if [ -f "$dest.siginfo" ]; then
- echo $dest.siginfo >> $rmlist_file
- fi
- fi
- fi
- # Add the ".tgz.done" and ".siginfo.done" (may exist in the future)
- base_fn="${i##/*/}"
- t_fn="$base_fn.done"
- s_fn="$base_fn.siginfo.done"
- for d in $t_fn $s_fn; do
- if [ -f $cache_dir/$d ]; then
- echo $cache_dir/$d >> $rmlist_file
- fi
- done
- fi
- done
-}
-
-# Remove the duplicated cache files for the pkg, keep the newest one
-remove_duplicated () {
-
- local topdir
- local oe_core_dir
- local tunedirs
- local all_archs
- local all_machines
- local ava_archs
- local arch
- local file_names
- local sstate_files_list
- local fn_tmp
- local list_suffix=`mktemp` || exit 1
-
- if [ -z "$extra_archs" ] ; then
- # Find out the archs in all the layers
- echo "Figuring out the archs in the layers ... "
- oe_core_dir=$(dirname $(dirname $(readlink -e $0)))
- topdir=$(dirname $oe_core_dir)
- tunedirs="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/include' 2>/dev/null`"
- [ -n "$tunedirs" ] || echo_error "Can't find the tune directory"
- all_machines="`find $topdir/meta* ${oe_core_dir}/meta* $layers -path '*/meta*/conf/machine/*' -name '*.conf' 2>/dev/null | sed -e 's/.*\///' -e 's/.conf$//'`"
- all_archs=`grep -r -h "^AVAILTUNES .*=" $tunedirs | sed -e 's/.*=//' -e 's/\"//g'`
- fi
-
- # Use the "_" to substitute "-", e.g., x86-64 to x86_64, but not for extra_archs which can be something like cortexa9t2-vfp-neon
- # Sort to remove the duplicated ones
- # Add allarch and builder arch (native)
- builder_arch=$(uname -m)
- all_archs="$(echo allarch $all_archs $all_machines $builder_arch \
- | sed -e 's/-/_/g' -e 's/ /\n/g' | sort -u) $extra_archs"
- echo "Done"
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -name 'sstate*' | wc -l`
- # Save all the sstate files in a file
- sstate_files_list=`mktemp` || exit 1
- find $cache_dir -name 'sstate:*:*:*:*:*:*:*.tgz*' >$sstate_files_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $sstate_files_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- echo "Figuring out the archs in the sstate cache dir ... "
- # Using this SSTATE_PKGSPEC definition it's 6th colon separated field
- # SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
- for arch in $all_archs; do
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs $arch"
- # ${builder_arch}_$arch used by toolchain sstate
- grep -q ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:${builder_arch}_$arch:[^:]*:[^:]*\.tgz$" $sstate_files_list
- [ $? -eq 0 ] && ava_archs="$ava_archs ${builder_arch}_$arch"
- done
- echo "Done"
- echo "The following archs have been found in the cache dir:"
- echo $ava_archs
- echo ""
-
- # Save the file list which needs to be removed
- local remove_listdir=`mktemp -d` || exit 1
- for suffix in $sstate_suffixes; do
- if [ "$suffix" = "populate_lic" ] ; then
- echo "Skipping populate_lic, because removing duplicates doesn't work correctly for them (use --stamps-dir instead)"
- continue
- fi
- # Total number of files including .siginfo and .done files
- total_files_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list | wc -l 2>/dev/null`
- total_tgz_suffix=`grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz$" $sstate_files_list | wc -l 2>/dev/null`
- # Save the file list to a file, some suffix's file may not exist
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:_]*_$suffix\.tgz.*" $sstate_files_list >$list_suffix 2>/dev/null
- local deleted_tgz=0
- local deleted_files=0
- for ext in tgz tgz.siginfo tgz.done; do
- echo "Figuring out the sstate:xxx_$suffix.$ext ... "
- # Uniq BPNs
- file_names=`for arch in $ava_archs ""; do
- sed -ne "s%.*/sstate:\([^:]*\):[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$%\1%p" $list_suffix
- done | sort -u`
-
- fn_tmp=`mktemp` || exit 1
- rm_list="$remove_listdir/sstate:xxx_$suffix"
- for fn in $file_names; do
- [ -z "$verbose" ] || echo "Analyzing sstate:$fn-xxx_$suffix.${ext}"
- for arch in $ava_archs ""; do
- grep -h ".*/sstate:$fn:[^:]*:[^:]*:[^:]*:$arch:[^:]*:[^:]*\.${ext}$" $list_suffix >$fn_tmp
- if [ -s $fn_tmp ] ; then
- [ $debug -gt 1 ] && echo "Available files for $fn-$arch- with suffix $suffix.${ext}:" && cat $fn_tmp
- # Use the modification time
- to_del=$(ls -t $(cat $fn_tmp) | sed -n '1!p')
- [ $debug -gt 2 ] && echo "Considering to delete: $to_del"
- # The sstate file which is downloaded from the SSTATE_MIRROR is
- # put in SSTATE_DIR, and there is a symlink in SSTATE_DIR/??/ to
- # it, so filter it out from the remove list if it should not be
- # removed.
- to_keep=$(ls -t $(cat $fn_tmp) | sed -n '1p')
- [ $debug -gt 2 ] && echo "Considering to keep: $to_keep"
- for k in $to_keep; do
- if [ -L "$k" ]; then
- # The symlink's destination
- k_dest="`readlink -e $k`"
- # Maybe it is the one in cache_dir
- k_maybe="$cache_dir/${k##/*/}"
- # Remove it from the remove list if they are the same.
- if [ "$k_dest" = "$k_maybe" ]; then
- to_del="`echo $to_del | sed 's#'\"$k_maybe\"'##g'`"
- fi
- fi
- done
- rm -f $fn_tmp
- [ $debug -gt 2 ] && echo "Decided to delete: $to_del"
- gen_rmlist $rm_list.$ext "$to_del"
- fi
- done
- done
- done
- deleted_tgz=`cat $rm_list.* 2>/dev/null | grep ".tgz$" | wc -l`
- deleted_files=`cat $rm_list.* 2>/dev/null | wc -l`
- [ "$deleted_files" -gt 0 -a $debug -gt 0 ] && cat $rm_list.*
- echo "($deleted_tgz out of $total_tgz_suffix .tgz files for $suffix suffix will be removed or $deleted_files out of $total_files_suffix when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
- done
- deleted_tgz=0
- rm_old_list=$remove_listdir/sstate-old-filenames
- find $cache_dir -name 'sstate-*.tgz' >$rm_old_list
- [ -s "$rm_old_list" ] && deleted_tgz=`cat $rm_old_list | grep ".tgz$" | wc -l`
- [ -s "$rm_old_list" ] && deleted_files=`cat $rm_old_list | wc -l`
- [ -s "$rm_old_list" -a $debug -gt 0 ] && cat $rm_old_list
- echo "($deleted_tgz .tgz files with old sstate-* filenames will be removed or $deleted_files when counting also .siginfo and .done files)"
- let total_deleted=$total_deleted+$deleted_files
-
- rm -f $list_suffix
- rm -f $sstate_files_list
- if [ $total_deleted -gt 0 ]; then
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- for list in `ls $remove_listdir/`; do
- echo "Removing $list.tgz (`cat $remove_listdir/$list | wc -w` files) ... "
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $remove_listdir/$list`; do
- rm -f $verbose $i
- done
- echo "Done"
- done
- echo "$total_deleted files have been removed!"
- else
- do_nothing
- fi
- else
- no_files
- fi
- [ -d $remove_listdir ] && rm -fr $remove_listdir
-}
-
-# Remove the sstate file by stamps dir, the file not used by the stamps dir
-# will be removed.
-rm_by_stamps (){
-
- local cache_list=`mktemp` || exit 1
- local keep_list=`mktemp` || exit 1
- local rm_list=`mktemp` || exit 1
- local sums
- local all_sums
-
- # Total number of files including sstate-, .siginfo and .done files
- total_files=`find $cache_dir -type f -name 'sstate*' | wc -l`
- # Save all the state file list to a file
- find $cache_dir -type f -name 'sstate*' | sort -u -o $cache_list
-
- echo "Figuring out the suffixes in the sstate cache dir ... "
- local sstate_suffixes="`sed 's%.*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^_]*_\([^:]*\)\.tgz.*%\1%g' $cache_list | sort -u`"
- echo "Done"
- echo "The following suffixes have been found in the cache dir:"
- echo $sstate_suffixes
-
- # Figure out all the md5sums in the stamps dir.
- echo "Figuring out all the md5sums in stamps dir ... "
- for i in $sstate_suffixes; do
- # There is no "\.sigdata" but "_setcene" when it is mirrored
- # from the SSTATE_MIRRORS, use them to figure out the sum.
- sums=`find $stamps -maxdepth 3 -name "*.do_$i.*" \
- -o -name "*.do_${i}_setscene.*" | \
- sed -ne 's#.*_setscene\.##p' -e 's#.*\.sigdata\.##p' | \
- sed -e 's#\..*##' | sort -u`
- all_sums="$all_sums $sums"
- done
- echo "Done"
-
- echo "Figuring out the files which will be removed ... "
- for i in $all_sums; do
- grep ".*/sstate:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:[^:]*:${i}_.*" $cache_list >>$keep_list
- done
- echo "Done"
-
- if [ -s $keep_list ]; then
- sort -u $keep_list -o $keep_list
- to_del=`comm -1 -3 $keep_list $cache_list`
- gen_rmlist $rm_list "$to_del"
- let total_deleted=`cat $rm_list | sort -u | wc -w`
- if [ $total_deleted -gt 0 ]; then
- [ $debug -gt 0 ] && cat $rm_list | sort -u
- read_confirm
- if [ "$confirm" = "y" -o "$confirm" = "Y" ]; then
- echo "Removing sstate cache files ... ($total_deleted files)"
- # Remove them one by one to avoid the argument list too long error
- for i in `cat $rm_list | sort -u`; do
- rm -f $verbose $i
- done
- echo "$total_deleted files have been removed"
- else
- do_nothing
- fi
- else
- no_files
- fi
- else
- echo_error "All files in cache dir will be removed! Abort!"
- fi
-
- rm -f $cache_list
- rm -f $keep_list
- rm -f $rm_list
-}
-
-# Parse arguments
-while [ -n "$1" ]; do
- case $1 in
- --cache-dir=*)
- cache_dir=`echo $1 | sed -e 's#^--cache-dir=##' | xargs readlink -e`
- [ -d "$cache_dir" ] || echo_error "Invalid argument to --cache-dir"
- shift
- ;;
- --remove-duplicated|-d)
- rm_duplicated="y"
- shift
- ;;
- --yes|-y)
- confirm="y"
- shift
- ;;
- --follow-symlink|-L)
- fsym="y"
- shift
- ;;
- --extra-archs=*)
- extra_archs=`echo $1 | sed -e 's#^--extra-archs=##' -e 's#,# #g'`
- [ -n "$extra_archs" ] || echo_error "Invalid extra arch parameter"
- shift
- ;;
- --extra-layer=*)
- extra_layers=`echo $1 | sed -e 's#^--extra-layer=##' -e 's#,# #g'`
- [ -n "$extra_layers" ] || echo_error "Invalid extra layer parameter"
- for i in $extra_layers; do
- l=`readlink -e $i`
- if [ -d "$l" ]; then
- layers="$layers $l"
- else
- echo_error "Can't find layer $i"
- fi
- done
- shift
- ;;
- --stamps-dir=*)
- stamps=`echo $1 | sed -e 's#^--stamps-dir=##' -e 's#,# #g'`
- [ -n "$stamps" ] || echo_error "Invalid stamps dir $i"
- for i in $stamps; do
- [ -d "$i" ] || echo_error "Invalid stamps dir $i"
- done
- shift
- ;;
- --verbose|-v)
- verbose="-v"
- shift
- ;;
- --debug|-D)
- debug=`expr $debug + 1`
- echo "Debug level $debug"
- shift
- ;;
- --help|-h)
- usage
- exit 0
- ;;
- *)
- echo "Invalid arguments $*"
- echo_error "Try 'sstate-cache-management.sh -h' for more information."
- ;;
- esac
-done
-
-# sstate cache directory, use environment variable SSTATE_CACHE_DIR
-# if it was not specified, otherwise, error.
-[ -n "$cache_dir" ] || cache_dir=$SSTATE_CACHE_DIR
-[ -n "$cache_dir" ] || echo_error "No cache dir found!"
-[ -d "$cache_dir" ] || echo_error "Invalid cache directory \"$cache_dir\""
-
-[ -n "$rm_duplicated" -a -n "$stamps" ] && \
- echo_error "Can not use both --remove-duplicated and --stamps-dir"
-
-[ "$rm_duplicated" = "y" ] && remove_duplicated
-[ -n "$stamps" ] && rm_by_stamps
-[ -z "$rm_duplicated" -a -z "$stamps" ] && \
- echo "What do you want to do?"
-exit 0
diff --git a/scripts/sstate-diff-machines.sh b/scripts/sstate-diff-machines.sh
index 27c6a33006..5ed413b2ee 100755
--- a/scripts/sstate-diff-machines.sh
+++ b/scripts/sstate-diff-machines.sh
@@ -1,5 +1,9 @@
#!/bin/bash
-
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to compare sstate checksums between MACHINES.
# Execute script and compare generated list.M files.
# Using bash to have PIPESTATUS variable.
@@ -125,6 +129,8 @@ for M in ${machines}; do
fi
done
+COMPARE_TASKS="do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata do_package_write_rpm.sigdata do_package_write_deb.sigdata do_package_write_tar.sigdata"
+
function compareSignatures() {
MACHINE1=$1
MACHINE2=$2
@@ -132,7 +138,7 @@ function compareSignatures() {
PRE_PATTERN=""
[ -n "${PATTERN}" ] || PRE_PATTERN="-v"
[ -n "${PATTERN}" ] || PATTERN="MACHINE"
- for TASK in do_configure.sigdata do_populate_sysroot.sigdata do_package_write_ipk.sigdata; do
+ for TASK in $COMPARE_TASKS; do
printf "\n\n === Comparing signatures for task ${TASK} between ${MACHINE1} and ${MACHINE2} ===\n" | tee -a ${OUTPUT}/signatures.${MACHINE2}.${TASK}.log
diff ${OUTPUT}/${MACHINE1}/list.M ${OUTPUT}/${MACHINE2}/list.M | grep ${PRE_PATTERN} "${PATTERN}" | grep ${TASK} > ${OUTPUT}/signatures.${MACHINE2}.${TASK}
for i in `cat ${OUTPUT}/signatures.${MACHINE2}.${TASK} | sed 's#[^/]*/\([^/]*\)/.*#\1#g' | sort -u | xargs`; do
diff --git a/scripts/sstate-sysroot-cruft.sh b/scripts/sstate-sysroot-cruft.sh
index d9917f5152..b2002badfb 100755
--- a/scripts/sstate-sysroot-cruft.sh
+++ b/scripts/sstate-sysroot-cruft.sh
@@ -1,5 +1,9 @@
#!/bin/sh
-
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
# Used to find files installed in sysroot which are not tracked by sstate manifest
# Global vars
@@ -143,18 +147,6 @@ WHITELIST="${WHITELIST} \
.*/var/cache/fontconfig/ \
"
-# created by oe.utils.write_ld_so_conf which is used from few bbclasses and recipes:
-# meta/classes/image-prelink.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/classes/insane.bbclass: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# meta/recipes-gnome/gobject-introspection/gobject-introspection_1.48.0.bb: oe.utils.write_ld_so_conf(d)
-# introduced in oe-core commit 7fd1d7e639c2ed7e0699937a5cb245c187b7c811
-# and more visible since added to gobject-introspection in 10e0c1a3a452baa05d160a92a54b2e33cf0fd061
-WHITELIST="${WHITELIST} \
- [^/]*/etc/ld.so.conf \
-"
-
SYSROOTS="`readlink -f ${tmpdir}`/sysroots/"
mkdir ${OUTPUT}
diff --git a/scripts/sysroot-relativelinks.py b/scripts/sysroot-relativelinks.py
index ffe254728b..ccb3c867f0 100755
--- a/scripts/sysroot-relativelinks.py
+++ b/scripts/sysroot-relativelinks.py
@@ -1,4 +1,10 @@
#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
+
import sys
import os
diff --git a/scripts/task-time b/scripts/task-time
index e58040a9b9..8f71b29b77 100755
--- a/scripts/task-time
+++ b/scripts/task-time
@@ -1,4 +1,9 @@
#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import argparse
import os
diff --git a/scripts/test-reexec b/scripts/test-reexec
index 30e792c7d9..fccdac4da6 100755
--- a/scripts/test-reexec
+++ b/scripts/test-reexec
@@ -3,21 +3,8 @@
# Test Script for task re-execution
#
# Copyright 2012 Intel Corporation
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# DESCRIPTION
# This script is intended to address issues for re-execution of
diff --git a/scripts/test-remote-image b/scripts/test-remote-image
index 27b1cae38f..d209d22854 100755
--- a/scripts/test-remote-image
+++ b/scripts/test-remote-image
@@ -1,19 +1,9 @@
#!/usr/bin/env python3
-
-# Copyright (c) 2014 Intel Corporation
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
+# Copyright (c) 2014 Intel Corporation
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# DESCRIPTION
# This script is used to test public autobuilder images on remote hardware.
diff --git a/scripts/tiny/dirsize.py b/scripts/tiny/dirsize.py
index ddccc5a8c8..501516b0d4 100755
--- a/scripts/tiny/dirsize.py
+++ b/scripts/tiny/dirsize.py
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Display details of the root filesystem size, broken up by directory.
# Allows for limiting by size to focus on the larger files.
diff --git a/scripts/tiny/ksize.py b/scripts/tiny/ksize.py
index ea1ca7ff23..db2b9ec39f 100755
--- a/scripts/tiny/ksize.py
+++ b/scripts/tiny/ksize.py
@@ -1,24 +1,10 @@
#!/usr/bin/env python3
#
# Copyright (c) 2011, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
+# SPDX-License-Identifier: GPL-2.0-or-later
#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-#
-# Display details of the kernel build size, broken up by built-in.o. Sort
+# Display details of the kernel build size, broken up by built-in.[o,a]. Sort
# the objects by size. Run from the top level kernel build directory.
#
# Author: Darren Hart <dvhart@linux.intel.com>
@@ -41,7 +27,7 @@ def usage():
class Sizes:
def __init__(self, glob):
self.title = glob
- p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
output = p.communicate()[0].splitlines()
if len(output) > 2:
sizes = output[-1].split()[0:4]
@@ -63,17 +49,17 @@ class Report:
path = os.path.dirname(filename)
p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
- shell=True, stdout=PIPE, stderr=PIPE)
+ shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
glob = ' '.join(p.communicate()[0].splitlines())
oreport = Report(glob, str(path) + "/*.o")
oreport.sizes.title = str(path) + "/*.o"
r.parts.append(oreport)
if subglob:
- p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
+ p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
for f in p.communicate()[0].splitlines():
path = os.path.dirname(f)
- r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
+ r.parts.append(Report.create(f, path, str(path) + "/*/built-in.[o,a]"))
r.parts.sort(reverse=True)
for b in r.parts:
@@ -153,7 +139,7 @@ def main():
else:
assert False, "unhandled option"
- glob = "arch/*/built-in.o */built-in.o"
+ glob = "arch/*/built-in.[o,a] */built-in.[o,a]"
vmlinux = Report.create("vmlinux", "Linux Kernel", glob)
vmlinux.show()
diff --git a/scripts/tiny/ksum.py b/scripts/tiny/ksum.py
index d4f3892156..8f0e4c0517 100755
--- a/scripts/tiny/ksum.py
+++ b/scripts/tiny/ksum.py
@@ -1,22 +1,8 @@
-#!/usr/bin/env python
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#!/usr/bin/env python3
#
# Copyright (c) 2016, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
# module sizes for a built kernel, as a quick tool for comparing the
@@ -76,7 +62,7 @@ def is_ko_file(filename):
return False
def collect_object_files():
- print "Collecting object files recursively from %s..." % os.getcwd()
+ print("Collecting object files recursively from %s..." % os.getcwd())
for dirpath, dirs, files in os.walk(os.getcwd()):
for filename in files:
if is_ko_file(filename):
@@ -84,7 +70,7 @@ def collect_object_files():
elif is_vmlinux_file(filename):
global vmlinux_file
vmlinux_file = os.path.join(dirpath, filename)
- print "Collecting object files [DONE]"
+ print("Collecting object files [DONE]")
def add_ko_file(filename):
p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
@@ -92,9 +78,9 @@ def add_ko_file(filename):
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % filename[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % filename[len(os.getcwd()) + 1:])
global n_ko_files, ko_text, ko_data, ko_bss, ko_total
ko_text += int(sizes[0])
ko_data += int(sizes[1])
@@ -108,9 +94,9 @@ def get_vmlinux_totals():
if len(output) > 2:
sizes = output[-1].split()[0:4]
if verbose:
- print " %10d %10d %10d %10d\t" % \
- (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
- print "%s" % vmlinux_file[len(os.getcwd()) + 1:]
+ print(" %10d %10d %10d %10d\t" % \
+ (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])), end=' ')
+ print("%s" % vmlinux_file[len(os.getcwd()) + 1:])
global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
vmlinux_text += int(sizes[0])
vmlinux_data += int(sizes[1])
@@ -143,20 +129,20 @@ def main():
sum_ko_files()
get_vmlinux_totals()
- print "\nTotals:"
- print "\nvmlinux:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total)
- print "\nmodules (%d):" % n_ko_files
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
- (ko_text, ko_data, ko_bss, ko_total)
- print "\nvmlinux + modules:"
- print " text\tdata\t\tbss\t\ttotal"
- print " %-10d\t%-10d\t%-10d\t%-10d" % \
+ print("\nTotals:")
+ print("\nvmlinux:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total))
+ print("\nmodules (%d):" % n_ko_files)
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
+ (ko_text, ko_data, ko_bss, ko_total))
+ print("\nvmlinux + modules:")
+ print(" text\tdata\t\tbss\t\ttotal")
+ print(" %-10d\t%-10d\t%-10d\t%-10d" % \
(vmlinux_text + ko_text, vmlinux_data + ko_data, \
- vmlinux_bss + ko_bss, vmlinux_total + ko_total)
+ vmlinux_bss + ko_bss, vmlinux_total + ko_total))
if __name__ == "__main__":
try:
diff --git a/scripts/verify-bashisms b/scripts/verify-bashisms
index a979bd2965..fc3677c6ed 100755
--- a/scripts/verify-bashisms
+++ b/scripts/verify-bashisms
@@ -1,8 +1,13 @@
#!/usr/bin/env python3
+#
+# Copyright OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: GPL-2.0-only
+#
import sys, os, subprocess, re, shutil
-whitelist = (
+allowed = (
# type is supported by dash
'if type systemctl >/dev/null 2>/dev/null; then',
'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
@@ -16,8 +21,8 @@ whitelist = (
'. $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE'
)
-def is_whitelisted(s):
- for w in whitelist:
+def is_allowed(s):
+ for w in allowed:
if w in s:
return True
return False
@@ -46,7 +51,7 @@ def process(filename, function, lineno, script):
output = e.output.replace(fn.name, function)
if not output or not output.startswith('possible bashism'):
# Probably starts with or contains only warnings. Dump verbatim
- # with one space indention. Can't do the splitting and whitelist
+ # with one space indention. Can't do the splitting and allowed
# checking below.
return '\n'.join([filename,
' Unexpected output from checkbashisms.pl'] +
@@ -62,7 +67,7 @@ def process(filename, function, lineno, script):
# ...
# ...
result = []
- # Check the results against the whitelist
+ # Check the results against the allowed list
for message, source in zip(output[0::2], output[1::2]):
if not is_whitelisted(source):
if lineno is not None:
@@ -97,7 +102,7 @@ if __name__=='__main__':
args = parser.parse_args()
if shutil.which("checkbashisms.pl") is None:
- print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
+ print("Cannot find checkbashisms.pl on $PATH, get it from https://salsa.debian.org/debian/devscripts/raw/master/scripts/checkbashisms.pl")
sys.exit(1)
# The order of defining the worker function,
diff --git a/scripts/wic b/scripts/wic
index 7392bc4e7f..06e0b48db0 100755
--- a/scripts/wic
+++ b/scripts/wic
@@ -1,22 +1,8 @@
#!/usr/bin/env python3
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+# SPDX-License-Identifier: GPL-2.0-only
#
# DESCRIPTION 'wic' is the OpenEmbedded Image Creator that users can
# use to generate bootable images. Invoking it without any arguments
@@ -35,9 +21,10 @@ import os
import sys
import argparse
import logging
+import subprocess
+import shutil
from collections import namedtuple
-from distutils import spawn
# External modules
scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -60,13 +47,10 @@ if os.environ.get('SDKTARGETSYSROOT'):
break
sdkroot = os.path.dirname(sdkroot)
-bitbake_exe = spawn.find_executable('bitbake')
+bitbake_exe = shutil.which('bitbake')
if bitbake_exe:
bitbake_path = scriptpath.add_bitbake_lib_path()
- from bb import cookerdata
- from bb.main import bitbake_main, BitBakeConfigParameters
-else:
- bitbake_main = None
+ import bb
from wic import WicError
from wic.misc import get_bitbake_var, BB_VARS
@@ -124,7 +108,7 @@ def wic_create_subcommand(options, usage_str):
Command-line handling for image creation. The real work is done
by image.engine.wic_create()
"""
- if options.build_rootfs and not bitbake_main:
+ if options.build_rootfs and not bitbake_exe:
raise WicError("Can't build rootfs as bitbake is not in the $PATH")
if not options.image_name:
@@ -160,9 +144,7 @@ def wic_create_subcommand(options, usage_str):
argv.append("--debug")
logger.info("Building rootfs...\n")
- if bitbake_main(BitBakeConfigParameters(argv),
- cookerdata.CookerConfiguration()):
- raise WicError("bitbake exited with error")
+ subprocess.check_call(argv)
rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
@@ -177,11 +159,12 @@ def wic_create_subcommand(options, usage_str):
"(Use -e/--image-name to specify it)")
native_sysroot = options.native_sysroot
+ if options.kernel_dir:
+ kernel_dir = options.kernel_dir
+
if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
logger.info("Building wic-tools...\n")
- if bitbake_main(BitBakeConfigParameters("bitbake wic-tools".split()),
- cookerdata.CookerConfiguration()):
- raise WicError("bitbake wic-tools failed")
+ subprocess.check_call(["bitbake", "wic-tools"])
native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
if not native_sysroot:
@@ -226,7 +209,7 @@ def wic_create_subcommand(options, usage_str):
logger.info(" (Please check that the build artifacts for the machine")
logger.info(" selected in local.conf actually exist and that they")
logger.info(" are the correct artifacts for the image (.wks file)).\n")
- raise WicError("The artifact that couldn't be found was %s:\n %s", not_found, not_found_dir)
+ raise WicError("The artifact that couldn't be found was %s:\n %s" % (not_found, not_found_dir))
krootfs_dir = options.rootfs_dir
if krootfs_dir is None:
@@ -332,6 +315,8 @@ def wic_init_parser_create(subparser):
subparser.add_argument("-o", "--outdir", dest="outdir", default='.',
help="name of directory to create image in")
+ subparser.add_argument("-w", "--workdir",
+ help="temporary workdir to use for intermediate files")
subparser.add_argument("-e", "--image-name", dest="image_name",
help="name of the image to use the artifacts from "
"e.g. core-image-sato")
@@ -362,6 +347,10 @@ def wic_init_parser_create(subparser):
"bitbake variables")
subparser.add_argument("-D", "--debug", dest="debug", action="store_true",
default=False, help="output debug information")
+ subparser.add_argument("-i", "--imager", dest="imager",
+ default="direct", help="the wic imager plugin")
+ subparser.add_argument("--extra-space", type=int, dest="extra_space",
+ default=0, help="additional free disk space to add to the image")
return
@@ -410,9 +399,9 @@ def imgpathtype(arg):
def wic_init_parser_cp(subparser):
subparser.add_argument("src",
- help="source spec")
- subparser.add_argument("dest", type=imgpathtype,
- help="image spec: <image>:<vfat partition>[<path>]")
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
+ subparser.add_argument("dest",
+ help="image spec: <image>:<vfat partition>[<path>] or <file>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
@@ -421,6 +410,9 @@ def wic_init_parser_rm(subparser):
help="path: <image>:<vfat partition><path>")
subparser.add_argument("-n", "--native-sysroot",
help="path to the native sysroot containing the tools")
+ subparser.add_argument("-r", dest="recursive_delete", action="store_true", default=False,
+ help="remove directories and their contents recursively, "
+ " this only applies to ext* partition")
def expandtype(rules):
"""
@@ -430,7 +422,7 @@ def expandtype(rules):
if rules == 'auto':
return {}
result = {}
- for rule in rules.split('-'):
+ for rule in rules.split(','):
try:
part, size = rule.split(':')
except ValueError:
@@ -505,31 +497,48 @@ subcommands = {
def init_parser(parser):
parser.add_argument("--version", action="version",
version="%(prog)s {version}".format(version=__version__))
+ parser.add_argument("-D", "--debug", dest="debug", action="store_true",
+ default=False, help="output debug information")
+
subparsers = parser.add_subparsers(dest='command', help=hlp.wic_usage)
for subcmd in subcommands:
subparser = subparsers.add_parser(subcmd, help=subcommands[subcmd][2])
subcommands[subcmd][3](subparser)
+class WicArgumentParser(argparse.ArgumentParser):
+ def format_help(self):
+ return hlp.wic_help
def main(argv):
- parser = argparse.ArgumentParser(
+ parser = WicArgumentParser(
description="wic version %s" % __version__)
init_parser(parser)
args = parser.parse_args(argv)
+ if args.debug:
+ logger.setLevel(logging.DEBUG)
+
if "command" in vars(args):
if args.command == "help":
if args.help_topic is None:
parser.print_help()
- print()
- print("Please specify a help topic")
elif args.help_topic in helptopics:
hlpt = helptopics[args.help_topic]
hlpt[0](hlpt[1], hlpt[2])
return 0
+ # validate wic cp src and dest parameter to identify which one of it is
+ # image and cast it into imgtype
+ if args.command == "cp":
+ if ":" in args.dest:
+ args.dest = imgtype(args.dest)
+ elif ":" in args.src:
+ args.src = imgtype(args.src)
+ else:
+ raise argparse.ArgumentTypeError("no image or partition number specified.")
+
return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
diff --git a/scripts/yocto-check-layer b/scripts/yocto-check-layer
index 5a4fd752ca..67cc71950f 100755
--- a/scripts/yocto-check-layer
+++ b/scripts/yocto-check-layer
@@ -3,7 +3,9 @@
# Yocto Project layer checking tool
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
import os
import sys
@@ -22,7 +24,7 @@ import scriptpath
scriptpath.add_oe_lib_path()
scriptpath.add_bitbake_lib_path()
-from checklayer import LayerType, detect_layers, add_layer, add_layer_dependencies, get_signatures
+from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_layer_dependencies, get_signatures, check_bblayers, sanity_check_layers
from oeqa.utils.commands import get_bb_vars
PROGNAME = 'yocto-check-layer'
@@ -39,6 +41,12 @@ def test_layer(td, layer, test_software_layer_signatures):
tc.loadTests(CASES_PATHS)
return tc.runTests()
+def dump_layer_debug(layer):
+ logger.debug("Found layer %s (%s)" % (layer["name"], layer["path"]))
+ collections = layer.get("collections", {})
+ if collections:
+ logger.debug("%s collections: %s" % (layer["name"], ", ".join(collections)))
+
def main():
parser = argparse.ArgumentParser(
description="Yocto Project layer checking tool",
@@ -49,6 +57,8 @@ def main():
help='File to output log (optional)', action='store')
parser.add_argument('--dependency', nargs="+",
help='Layers to process for dependencies', action='store')
+ parser.add_argument('--no-auto-dependency', help='Disable automatic testing of dependencies',
+ action='store_true')
parser.add_argument('--machines', nargs="+",
help='List of MACHINEs to be used during testing', action='store')
parser.add_argument('--additional-layers', nargs="+",
@@ -82,7 +92,7 @@ def main():
logger.setLevel(logging.ERROR)
if not 'BUILDDIR' in os.environ:
- logger.error("You must source the environment before run this script.")
+ logger.error("You must source the environment before running this script.")
logger.error("$ source oe-init-build-env")
return 1
builddir = os.environ['BUILDDIR']
@@ -90,7 +100,7 @@ def main():
layers = detect_layers(args.layers, args.no_auto)
if not layers:
- logger.error("Fail to detect layers")
+ logger.error("Failed to detect layers")
return 1
if args.additional_layers:
additional_layers = detect_layers(args.additional_layers, args.no_auto)
@@ -102,15 +112,26 @@ def main():
else:
dep_layers = layers
+ logger.debug("Found additional layers:")
+ for l in additional_layers:
+ dump_layer_debug(l)
+ logger.debug("Found dependency layers:")
+ for l in dep_layers:
+ dump_layer_debug(l)
+
+ if not sanity_check_layers(additional_layers + dep_layers, logger):
+ logger.error("Failed layer validation")
+ return 1
+
logger.info("Detected layers:")
for layer in layers:
if layer['type'] == LayerType.ERROR_BSP_DISTRO:
logger.error("%s: Can't be DISTRO and BSP type at the same time."\
- " The conf/distro and conf/machine folders was found."\
+ " Both conf/distro and conf/machine folders were found."\
% layer['name'])
layers.remove(layer)
elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
- logger.error("%s: Don't have conf/layer.conf file."\
+ logger.info("%s: Doesn't have conf/layer.conf file, so ignoring"\
% layer['name'])
layers.remove(layer)
else:
@@ -119,6 +140,21 @@ def main():
if not layers:
return 1
+ # Find all dependencies, and get them checked too
+ if not args.no_auto_dependency:
+ depends = []
+ for layer in layers:
+ layer_depends = get_layer_dependencies(layer, dep_layers, logger)
+ if layer_depends:
+ for d in layer_depends:
+ if d not in depends:
+ depends.append(d)
+
+ for d in depends:
+ if d not in layers:
+ logger.info("Adding %s to the list of layers to test, as a dependency", d['name'])
+ layers.append(d)
+
shutil.copyfile(bblayersconf, bblayersconf + '.backup')
def cleanup_bblayers(signum, frame):
shutil.copyfile(bblayersconf + '.backup', bblayersconf)
@@ -132,32 +168,37 @@ def main():
layers_tested = 0
for layer in layers:
- if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
- layer['type'] == LayerType.ERROR_BSP_DISTRO:
+ if layer['type'] in (LayerType.ERROR_NO_LAYER_CONF, LayerType.ERROR_BSP_DISTRO):
+ continue
+
+ # Reset to a clean backup copy for each run
+ shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+ if layer['type'] not in (LayerType.CORE, ) and check_bblayers(bblayersconf, layer['path'], logger):
+ logger.info("%s already in %s. To capture initial signatures, layer under test should not present "
+ "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name']))
+ results[layer['name']] = None
+ results_status[layer['name']] = 'SKIPPED (Layer under test should not present in BBLAYERS)'
continue
logger.info('')
logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
layer['path']))
- shutil.copyfile(bblayersconf + '.backup', bblayersconf)
-
missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
if not missing_dependencies:
for additional_layer in additional_layers:
if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
missing_dependencies = True
break
- if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
- any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
- additional_layers)):
+ if missing_dependencies:
logger.info('Skipping %s due to missing dependencies.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
layers_tested = layers_tested + 1
continue
- if any(map(lambda additional_layer: not add_layer(bblayersconf, additional_layer, dep_layers, logger),
+ if any(map(lambda additional_layer: not add_layers(bblayersconf, [additional_layer], logger),
additional_layers)):
logger.info('Skipping %s due to missing additional layers.' % layer['name'])
results[layer['name']] = None
@@ -169,10 +210,17 @@ def main():
td['bbvars'] = get_bb_vars()
logger.info('Getting initial signatures ...')
td['builddir'] = builddir
- td['sigs'], td['tunetasks'] = get_signatures(td['builddir'])
+ try:
+ td['sigs'], td['tunetasks'] = get_signatures(td['builddir'])
+ except RuntimeError as e:
+ logger.info(str(e))
+ results[layer['name']] = None
+ results_status[layer['name']] = 'FAIL (Generating world signatures)'
+ layers_tested = layers_tested + 1
+ continue
td['machines'] = args.machines
- if not add_layer(bblayersconf, layer, dep_layers, logger):
+ if not add_layers(bblayersconf, [layer], logger):
logger.info('Skipping %s ???.' % layer['name'])
results[layer['name']] = None
results_status[layer['name']] = 'SKIPPED (Unknown)'
diff --git a/scripts/yocto-check-layer-wrapper b/scripts/yocto-check-layer-wrapper
index bbf6ee176d..2e3b699031 100755
--- a/scripts/yocto-check-layer-wrapper
+++ b/scripts/yocto-check-layer-wrapper
@@ -6,7 +6,9 @@
# script to avoid a contaminated environment.
#
# Copyright (C) 2017 Intel Corporation
-# Released under the MIT license (see COPYING.MIT)
+#
+# SPDX-License-Identifier: MIT
+#
if [ -z "$BUILDDIR" ]; then
echo "Please source oe-init-build-env before run this script."
@@ -30,7 +32,9 @@ cd $base_dir
build_dir=$(mktemp -p $base_dir -d -t build-XXXX)
-source oe-init-build-env $build_dir
+this_dir=$(dirname $(readlink -f $0))
+
+source $this_dir/../oe-init-build-env $build_dir
if [[ $output_log != '' ]]; then
yocto-check-layer -o "$output_log" "$*"
else
diff --git a/scripts/yocto_testresults_query.py b/scripts/yocto_testresults_query.py
new file mode 100755
index 0000000000..521ead8473
--- /dev/null
+++ b/scripts/yocto_testresults_query.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python3
+
+# Yocto Project test results management tool
+# This script is an thin layer over resulttool to manage tes results and regression reports.
+# Its main feature is to translate tags or branch names to revisions SHA1, and then to run resulttool
+# with those computed revisions
+#
+# Copyright (C) 2023 OpenEmbedded Contributors
+#
+# SPDX-License-Identifier: MIT
+#
+
+import sys
+import os
+import argparse
+import subprocess
+import tempfile
+import lib.scriptutils as scriptutils
+
+script_path = os.path.dirname(os.path.realpath(__file__))
+poky_path = os.path.abspath(os.path.join(script_path, ".."))
+resulttool = os.path.abspath(os.path.join(script_path, "resulttool"))
+logger = scriptutils.logger_create(sys.argv[0])
+testresults_default_url="git://git.yoctoproject.org/yocto-testresults"
+
+def create_workdir():
+ workdir = tempfile.mkdtemp(prefix='yocto-testresults-query.')
+ logger.info(f"Shallow-cloning testresults in {workdir}")
+ subprocess.check_call(["git", "clone", testresults_default_url, workdir, "--depth", "1"])
+ return workdir
+
+def get_sha1(pokydir, revision):
+ try:
+ rev = subprocess.check_output(["git", "rev-list", "-n", "1", revision], cwd=pokydir).decode('utf-8').strip()
+ logger.info(f"SHA-1 revision for {revision} in {pokydir} is {rev}")
+ return rev
+ except subprocess.CalledProcessError:
+ logger.error(f"Can not find SHA-1 for {revision} in {pokydir}")
+ return None
+
+def get_branch(tag):
+ # The tags in test results repository, as returned by git rev-list, have the following form:
+ # refs/tags/<branch>/<count>-g<sha1>/<num>
+ return '/'.join(tag.split("/")[2:-2])
+
+def fetch_testresults(workdir, sha1):
+ logger.info(f"Fetching test results for {sha1} in {workdir}")
+ rawtags = subprocess.check_output(["git", "ls-remote", "--refs", "--tags", "origin", f"*{sha1}*"], cwd=workdir).decode('utf-8').strip()
+ if not rawtags:
+ raise Exception(f"No reference found for commit {sha1} in {workdir}")
+ branch = ""
+ for rev in [rawtag.split()[1] for rawtag in rawtags.splitlines()]:
+ if not branch:
+ branch = get_branch(rev)
+ logger.info(f"Fetching matching revision: {rev}")
+ subprocess.check_call(["git", "fetch", "--depth", "1", "origin", f"{rev}:{rev}"], cwd=workdir)
+ return branch
+
+def compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args):
+ logger.info(f"Running resulttool regression between SHA1 {baserevision} and {targetrevision}")
+ command = [resulttool, "regression-git", "--branch", basebranch, "--commit", baserevision, "--branch2", targetbranch, "--commit2", targetrevision, workdir]
+ if args.limit:
+ command.extend(["-l", args.limit])
+ report = subprocess.check_output(command).decode("utf-8")
+ return report
+
+def print_report_with_header(report, baseversion, baserevision, targetversion, targetrevision):
+ print("========================== Regression report ==============================")
+ print(f'{"=> Target:": <16}{targetversion: <16}({targetrevision})')
+ print(f'{"=> Base:": <16}{baseversion: <16}({baserevision})')
+ print("===========================================================================\n")
+ print(report, end='')
+
+def regression(args):
+ logger.info(f"Compute regression report between {args.base} and {args.target}")
+ if args.testresultsdir:
+ workdir = args.testresultsdir
+ else:
+ workdir = create_workdir()
+
+ try:
+ baserevision = get_sha1(poky_path, args.base)
+ targetrevision = get_sha1(poky_path, args.target)
+ if not baserevision or not targetrevision:
+ logger.error("One or more revision(s) missing. You might be targeting nonexistant tags/branches, or are in wrong repository (you must use Poky and not oe-core)")
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+ sys.exit(1)
+ basebranch = fetch_testresults(workdir, baserevision)
+ targetbranch = fetch_testresults(workdir, targetrevision)
+ report = compute_regression_report(workdir, basebranch, baserevision, targetbranch, targetrevision, args)
+ print_report_with_header(report, args.base, baserevision, args.target, targetrevision)
+ finally:
+ if not args.testresultsdir:
+ subprocess.check_call(["rm", "-rf", workdir])
+
+def main():
+ parser = argparse.ArgumentParser(description="Yocto Project test results helper")
+ subparsers = parser.add_subparsers(
+ help="Supported commands for test results helper",
+ required=True)
+ parser_regression_report = subparsers.add_parser(
+ "regression-report",
+ help="Generate regression report between two fixed revisions. Revisions can be branch name or tag")
+ parser_regression_report.add_argument(
+ 'base',
+ help="Revision or tag against which to compare results (i.e: the older)")
+ parser_regression_report.add_argument(
+ 'target',
+ help="Revision or tag to compare against the base (i.e: the newer)")
+ parser_regression_report.add_argument(
+ '-t',
+ '--testresultsdir',
+ help=f"An existing test results directory. {sys.argv[0]} will automatically clone it and use default branch if not provided")
+ parser_regression_report.add_argument(
+ '-l',
+ '--limit',
+ help=f"Maximum number of changes to display per test. Can be set to 0 to print all changes")
+ parser_regression_report.set_defaults(func=regression)
+
+ args = parser.parse_args()
+ args.func(args)
+
+if __name__ == '__main__':
+ try:
+ ret = main()
+ except Exception:
+ ret = 1
+ import traceback
+ traceback.print_exc()
+ sys.exit(ret)